aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--CREDITS8
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-trigger-pattern4
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt2
-rw-r--r--Documentation/admin-guide/pm/cpufreq.rst2
-rw-r--r--Documentation/admin-guide/security-bugs.rst21
-rw-r--r--Documentation/core-api/xarray.rst52
-rw-r--r--Documentation/cpu-freq/cpufreq-stats.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/shmobile.txt2
-rw-r--r--Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt9
-rw-r--r--Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt65
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt1
-rw-r--r--Documentation/devicetree/bindings/display/himax,hx8357d.txt26
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g101evn01012
-rw-r--r--Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt42
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt30
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt81
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt4
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt3
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt30
-rw-r--r--Documentation/devicetree/bindings/display/truly,nt35597.txt59
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-omap.txt8
-rw-r--r--Documentation/devicetree/bindings/net/can/holt_hi311x.txt2
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_can.txt28
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt2
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/gpu/amdgpu-dc.rst68
-rw-r--r--Documentation/gpu/drivers.rst1
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst31
-rw-r--r--Documentation/gpu/drm-kms.rst19
-rw-r--r--Documentation/gpu/drm-mm.rst7
-rw-r--r--Documentation/gpu/drm-uapi.rst3
-rw-r--r--Documentation/gpu/todo.rst58
-rw-r--r--Documentation/gpu/vkms.rst101
-rw-r--r--Documentation/i2c/busses/i2c-nvidia-gpu18
-rw-r--r--Documentation/input/event-codes.rst11
-rw-r--r--Documentation/media/uapi/v4l/dev-meta.rst2
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-fmt.rst5
-rw-r--r--Documentation/networking/rxrpc.txt17
-rw-r--r--Documentation/vm/unevictable-lru.rst6
-rw-r--r--Documentation/x86/x86_64/mm.txt34
-rw-r--r--Documentation/x86/zero-page.txt2
-rw-r--r--MAINTAINERS97
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/include/asm/termios.h8
-rw-r--r--arch/alpha/include/uapi/asm/ioctls.h5
-rw-r--r--arch/alpha/include/uapi/asm/termbits.h17
-rw-r--r--arch/arm/boot/dts/imx53-ppd.dts2
-rw-r--r--arch/arm/boot/dts/imx6sll.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi7
-rw-r--r--arch/arm/boot/dts/vf610m4-colibri.dts4
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/pgtable-2level.h2
-rw-r--r--arch/arm/include/asm/proc-fns.h61
-rw-r--r--arch/arm/kernel/bugs.c4
-rw-r--r--arch/arm/kernel/head-common.S6
-rw-r--r--arch/arm/kernel/setup.c40
-rw-r--r--arch/arm/kernel/smp.c31
-rw-r--r--arch/arm/mach-omap2/display.c111
-rw-r--r--arch/arm/mm/proc-v7-bugs.c17
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi3
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980-condor.dts47
-rw-r--r--arch/arm64/include/asm/processor.h8
-rw-r--r--arch/arm64/include/asm/sysreg.h4
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/setup.c1
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h4
-rw-r--r--arch/microblaze/include/asm/pgtable.h2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c2
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig1
-rw-r--r--arch/mips/kernel/setup.c1
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/mips/loongson64/loongson-3/numa.c12
-rw-r--r--arch/mips/mm/dma-noncoherent.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c11
-rw-r--r--arch/nds32/include/asm/pgtable.h2
-rw-r--r--arch/parisc/include/asm/pgtable.h2
-rw-r--r--arch/parisc/include/asm/spinlock.h4
-rw-r--r--arch/parisc/kernel/syscall.S12
-rw-r--r--arch/powerpc/include/asm/io.h20
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h2
-rw-r--r--arch/powerpc/include/asm/ptrace.h1
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kvm/trace.h8
-rw-r--r--arch/powerpc/kvm/trace_booke.h9
-rw-r--r--arch/powerpc/kvm/trace_hv.h9
-rw-r--r--arch/powerpc/kvm/trace_pr.h9
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/mm/slb.c35
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c64
-rw-r--r--arch/riscv/Makefile19
-rw-r--r--arch/riscv/boot/.gitignore2
-rw-r--r--arch/riscv/boot/Makefile33
-rw-r--r--arch/riscv/boot/install.sh60
-rw-r--r--arch/riscv/configs/defconfig1
-rw-r--r--arch/riscv/include/asm/module.h1
-rw-r--r--arch/riscv/include/asm/ptrace.h4
-rw-r--r--arch/riscv/include/asm/uaccess.h4
-rw-r--r--arch/riscv/include/asm/unistd.h5
-rw-r--r--arch/riscv/include/uapi/asm/unistd.h (renamed from arch/riscv/include/uapi/asm/syscalls.h)26
-rw-r--r--arch/riscv/kernel/cpu.c9
-rw-r--r--arch/riscv/kernel/head.S10
-rw-r--r--arch/riscv/kernel/module.c12
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S2
-rw-r--r--arch/riscv/lib/Makefile2
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/boot/compressed/Makefile16
-rw-r--r--arch/s390/configs/debug_defconfig14
-rw-r--r--arch/s390/configs/performance_defconfig13
-rw-r--r--arch/s390/defconfig79
-rw-r--r--arch/s390/include/asm/mmu_context.h5
-rw-r--r--arch/s390/include/asm/pgalloc.h6
-rw-r--r--arch/s390/include/asm/pgtable.h18
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/tlb.h6
-rw-r--r--arch/s390/kernel/entry.S6
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c33
-rw-r--r--arch/s390/kernel/vdso32/Makefile6
-rw-r--r--arch/s390/kernel/vdso64/Makefile6
-rw-r--r--arch/s390/kernel/vmlinux.lds.S4
-rw-r--r--arch/s390/mm/pgalloc.c1
-rw-r--r--arch/s390/numa/numa.c1
-rw-r--r--arch/um/drivers/ubd_kern.c12
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/events/intel/uncore.h33
-rw-r--r--arch/x86/events/intel/uncore_snb.c121
-rw-r--r--arch/x86/include/asm/mce.h2
-rw-r--r--arch/x86/include/asm/mshyperv.h2
-rw-r--r--arch/x86/include/asm/page_64_types.h12
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h4
-rw-r--r--arch/x86/include/asm/qspinlock.h13
-rw-r--r--arch/x86/include/asm/xen/page.h35
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c6
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c11
-rw-r--r--arch/x86/kernel/cpu/vmware.c2
-rw-r--r--arch/x86/kernel/ldt.c59
-rw-r--r--arch/x86/kernel/vsmp_64.c84
-rw-r--r--arch/x86/xen/mmu_pv.c6
-rw-r--r--arch/x86/xen/p2m.c3
-rw-r--r--arch/x86/xen/spinlock.c14
-rw-r--r--arch/xtensa/include/asm/processor.h6
-rw-r--r--arch/xtensa/kernel/head.S7
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-lib.c26
-rw-r--r--block/blk-merge.c5
-rw-r--r--block/blk.h12
-rw-r--r--block/bounce.c1
-rw-r--r--crypto/crypto_user_base.c18
-rw-r--r--crypto/crypto_user_stat.c21
-rw-r--r--crypto/simd.c5
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_platform.c1
-rw-r--r--drivers/acpi/nfit/core.c19
-rw-r--r--drivers/acpi/nfit/mce.c8
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/sata_rcar.c6
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/clk/clk-fixed-factor.c1
-rw-r--r--drivers/clk/meson/axg.c13
-rw-r--r--drivers/clk/meson/gxbb.c12
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c2
-rw-r--r--drivers/clocksource/i8253.c14
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c7
-rw-r--r--drivers/cpufreq/ti-cpufreq.c26
-rw-r--r--drivers/cpuidle/cpuidle-arm.c40
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c31
-rw-r--r--drivers/dma-buf/dma-fence.c36
-rw-r--r--drivers/dma-buf/reservation.c189
-rw-r--r--drivers/dma-buf/udmabuf.c1
-rw-r--r--drivers/firmware/efi/arm-init.c4
-rw-r--r--drivers/firmware/efi/arm-runtime.c2
-rw-r--r--drivers/firmware/efi/efi.c35
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c3
-rw-r--r--drivers/firmware/efi/libstub/fdt.c4
-rw-r--r--drivers/firmware/efi/memmap.c3
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c2
-rw-r--r--drivers/gnss/serial.c3
-rw-r--r--drivers/gnss/sirf.c3
-rw-r--r--drivers/gpio/gpio-mockup.c6
-rw-r--r--drivers/gpio/gpio-pxa.c4
-rw-r--r--drivers/gpio/gpiolib.c5
-rw-r--r--drivers/gpu/drm/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h155
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c167
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c313
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c472
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h243
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c245
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c282
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c278
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c373
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c406
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c173
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c239
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c747
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h130
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_regs.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c145
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c57
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c38
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c35
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c32
-rw-r--r--drivers/gpu/drm/amd/display/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c975
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h123
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c115
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c79
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c172
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c95
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c840
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c884
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h (renamed from drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h)105
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c947
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c341
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c375
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c257
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c203
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c98
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_event_log.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/compressor.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h (renamed from drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h)34
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c208
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h11
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c47
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h14
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_shared.h27
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c15
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/Makefile31
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c326
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h47
-rw-r--r--drivers/gpu/drm/amd/include/amd_acpi.h175
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h32
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h35
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h7
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h119
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c134
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c45
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c25
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c222
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_common.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c72
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c49
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c1
-rw-r--r--drivers/gpu/drm/arc/arcpgu.h4
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c3
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c38
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c14
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c28
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c21
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c3
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c64
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c15
-rw-r--r--drivers/gpu/drm/bochs/bochs.h4
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c30
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c20
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c65
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c12
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c247
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c114
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c64
-rw-r--r--drivers/gpu/drm/drm_atomic.c142
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c689
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c444
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c21
-rw-r--r--drivers/gpu/drm/drm_bufs.c3
-rw-r--r--drivers/gpu/drm/drm_client.c12
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c14
-rw-r--r--drivers/gpu/drm/drm_connector.c180
-rw-r--r--drivers/gpu/drm/drm_crtc.c33
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c115
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c334
-rw-r--r--drivers/gpu/drm/drm_debugfs.c89
-rw-r--r--drivers/gpu/drm/drm_dp_cec.c2
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c92
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c12
-rw-r--r--drivers/gpu/drm/drm_drv.c23
-rw-r--r--drivers/gpu/drm/drm_dsc.c228
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c43
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c9
-rw-r--r--drivers/gpu/drm/drm_fourcc.c81
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c13
-rw-r--r--drivers/gpu/drm/drm_gem.c109
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c86
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c2
-rw-r--r--drivers/gpu/drm/drm_global.c137
-rw-r--r--drivers/gpu/drm/drm_info.c137
-rw-r--r--drivers/gpu/drm/drm_internal.h5
-rw-r--r--drivers/gpu/drm/drm_lease.c38
-rw-r--r--drivers/gpu/drm/drm_memory.c10
-rw-r--r--drivers/gpu/drm/drm_mode_config.c12
-rw-r--r--drivers/gpu/drm/drm_mode_object.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c4
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c15
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c6
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c16
-rw-r--r--drivers/gpu/drm/drm_pci.c5
-rw-r--r--drivers/gpu/drm/drm_plane.c39
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c331
-rw-r--r--drivers/gpu/drm/drm_prime.c118
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c8
-rw-r--r--drivers/gpu/drm/drm_syncobj.c80
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c7
-rw-r--r--drivers/gpu/drm/exynos/Kconfig5
-rw-r--r--drivers/gpu/drm/exynos/Makefile3
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c96
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c157
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c111
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h134
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c30
-rw-r--r--drivers/gpu/drm/exynos/regs-decon5433.h22
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h9
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c33
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c25
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c57
-rw-r--r--drivers/gpu/drm/i915/Makefile13
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c115
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h10
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c309
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c138
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h299
-rw-r--r--drivers/gpu/drm/i915/i915_fixed.h143
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c178
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c251
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h47
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c362
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h36
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c83
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.h27
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c186
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c49
-rw-r--r--drivers/gpu/drm/i915/i915_query.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h837
-rw-r--r--drivers/gpu/drm/i915/i915_request.c121
-rw-r--r--drivers/gpu/drm/i915/i915_request.h13
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c399
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h36
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c7
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h5
-rw-r--r--drivers/gpu/drm/i915/i915_syncmap.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c31
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h19
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h13
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c10
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c1337
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c119
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c184
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c34
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c91
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c6
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c57
-rw-r--r--drivers/gpu/drm/i915/intel_color.c3
-rw-r--r--drivers/gpu/drm/i915/intel_combo_phy.c254
-rw-r--r--drivers/gpu/drm/i915/intel_connector.c (renamed from drivers/gpu/drm/i915/intel_modes.c)129
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c162
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c666
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c79
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h52
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2274
-rw-r--r--drivers/gpu/drm/i915/intel_display.h58
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1108
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c41
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c120
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h8
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h277
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c128
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h35
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c306
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c77
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c12
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c45
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c113
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h41
-rw-r--r--drivers/gpu/drm/i915/intel_guc_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c216
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c214
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c237
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c131
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c411
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c347
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c67
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c158
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h15
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c948
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c358
-rw-r--r--drivers/gpu/drm/i915/intel_quirks.c169
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c109
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h49
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c362
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c56
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c825
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c10
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h7
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.c1088
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c991
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.h36
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c36
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c428
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c44
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.h15
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c199
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h37
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c59
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c70
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c566
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c247
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c190
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c5
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c11
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c10
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c12
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c18
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c10
-rw-r--r--drivers/gpu/drm/meson/Kconfig1
-rw-r--r--drivers/gpu/drm/meson/Makefile2
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.c7
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.h11
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c265
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c91
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h67
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c12
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c588
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.h14
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c199
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h3
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c127
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h2
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c144
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c42
-rw-r--r--drivers/gpu/drm/meson/meson_viu.h1
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c90
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c64
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c3
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c67
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c22
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c37
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c27
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c6
-rw-r--r--drivers/gpu/drm/panel/Kconfig25
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c3
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c330
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c264
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c7
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c184
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c675
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_dev.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c37
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h32
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c27
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c9
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c73
-rw-r--r--drivers/gpu/drm/radeon/r300.c4
-rw-r--r--drivers/gpu/drm/radeon/r420.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c68
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c23
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c1
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/Makefile2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c1076
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c1349
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c128
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c7
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c10
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c168
-rw-r--r--drivers/gpu/drm/selftests/Makefile6
-rw-r--r--drivers/gpu/drm/selftests/drm_helper_selftests.h9
-rw-r--r--drivers/gpu/drm/selftests/drm_modeset_selftests.h34
-rw-r--r--drivers/gpu/drm/selftests/test-drm_damage_helper.c811
-rw-r--r--drivers/gpu/drm/selftests/test-drm_format.c280
-rw-r--r--drivers/gpu/drm/selftests/test-drm_framebuffer.c346
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.c32
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.h43
-rw-r--r--drivers/gpu/drm/selftests/test-drm_plane_helper.c (renamed from drivers/gpu/drm/selftests/test-drm-helper.c)38
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c4
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c4
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c1
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c6
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c1
-rw-r--r--drivers/gpu/drm/stm/drv.c13
-rw-r--r--drivers/gpu/drm/stm/ltdc.c45
-rw-r--r--drivers/gpu/drm/stm/ltdc.h5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c106
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c25
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c113
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.h11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c15
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c28
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c83
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c45
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h14
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c201
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c57
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h80
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c52
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c49
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.h37
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.c47
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.h28
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c57
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.h25
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.c70
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.h68
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c11
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig11
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile1
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c72
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c6
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c6
-rw-r--r--drivers/gpu/drm/tinydrm/hx8357d.c270
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c5
-rw-r--r--drivers/gpu/drm/tinydrm/ili9341.c4
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c6
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c14
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c7
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c5
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c73
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c14
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c14
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c4
-rw-r--r--drivers/gpu/drm/udl/udl_main.c7
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c46
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c15
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h37
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c10
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c212
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c29
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h79
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c152
-rw-r--r--drivers/gpu/drm/v3d/v3d_trace.h121
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h6
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c388
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h8
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c19
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c31
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h43
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c39
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c137
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c56
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c46
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c65
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c129
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c22
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h3
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c26
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c593
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h150
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c360
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c562
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c54
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c2
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c5
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c1
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c52
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c52
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c1019
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c3
-rw-r--r--drivers/gpu/vga/vgaarb.c21
-rw-r--r--drivers/hid/hid-alps.c18
-rw-r--r--drivers/hid/hid-asus.c3
-rw-r--r--drivers/hid/hid-ids.h11
-rw-r--r--drivers/hid/hid-input.c47
-rw-r--r--drivers/hid/hid-logitech-hidpp.c309
-rw-r--r--drivers/hid/hid-multitouch.c6
-rw-r--r--drivers/hid/hid-quirks.c4
-rw-r--r--drivers/hid/hid-steam.c154
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c21
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c8
-rw-r--r--drivers/hid/uhid.c25
-rw-r--r--drivers/hid/usbhid/hiddev.c18
-rw-r--r--drivers/hv/hv_kvp.c26
-rw-r--r--drivers/hwmon/hwmon.c8
-rw-r--r--drivers/hwmon/ibmpowernv.c7
-rw-r--r--drivers/i2c/busses/Kconfig11
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c368
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c15
-rw-r--r--drivers/iommu/amd_iommu_init.c3
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/intel-svm.c2
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c27
-rw-r--r--drivers/media/cec/cec-adap.c49
-rw-r--r--drivers/media/i2c/adv7511.c2
-rw-r--r--drivers/media/i2c/adv7604.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/tc358743.c3
-rw-r--r--drivers/media/i2c/tda1997x.c4
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c6
-rw-r--r--drivers/media/platform/omap3isp/isp.c3
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c2
-rw-r--r--drivers/media/platform/vim2m.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c43
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c4
-rw-r--r--drivers/misc/atmel-ssc.c2
-rw-r--r--drivers/misc/sgi-gru/grukdump.c4
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c86
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c10
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c11
-rw-r--r--drivers/mtd/nand/raw/nand_base.c1
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c32
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c21
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c136
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/can/dev.c48
-rw-r--r--drivers/net/can/flexcan.c108
-rw-r--r--drivers/net/can/rcar/rcar_can.c5
-rw-r--r--drivers/net/can/rx-offload.c51
-rw-r--r--drivers/net/can/spi/hi311x.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c10
-rw-r--r--drivers/net/can/usb/ucan.c7
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c10
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c23
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c61
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c21
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h1
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c70
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c18
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c4
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c10
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c74
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c86
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c12
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c44
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c51
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c69
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c2
-rw-r--r--drivers/net/fddi/defza.c7
-rw-r--r--drivers/net/fddi/defza.h3
-rw-r--r--drivers/net/phy/broadcom.c18
-rw-r--r--drivers/net/phy/mdio-gpio.c10
-rw-r--r--drivers/net/phy/mscc.c14
-rw-r--r--drivers/net/phy/realtek.c2
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/smsc95xx.c9
-rw-r--r--drivers/net/virtio_net.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c17
-rw-r--r--drivers/nvme/host/core.c4
-rw-r--r--drivers/nvme/host/fc.c73
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/rdma.c19
-rw-r--r--drivers/nvmem/core.c10
-rw-r--r--drivers/of/device.c4
-rw-r--r--drivers/of/of_numa.c9
-rw-r--r--drivers/opp/ti-opp-supply.c5
-rw-r--r--drivers/pci/pci-acpi.c5
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c2
-rw-r--r--drivers/rtc/hctosys.c4
-rw-r--r--drivers/rtc/rtc-cmos.c16
-rw-r--r--drivers/rtc/rtc-pcf2127.c3
-rw-r--r--drivers/s390/net/ism_drv.c2
-rw-r--r--drivers/s390/net/qeth_core.h27
-rw-r--r--drivers/s390/net/qeth_core_main.c172
-rw-r--r--drivers/s390/net/qeth_core_mpc.h4
-rw-r--r--drivers/s390/net/qeth_l2_main.c39
-rw-r--r--drivers/s390/net/qeth_l3_main.c207
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/scsi/myrb.c3
-rw-r--r--drivers/scsi/myrs.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c9
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h6
-rw-r--r--drivers/scsi/ufs/ufshcd.c9
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c3
-rw-r--r--drivers/slimbus/slimbus.h6
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c2
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c1
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.h2
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c65
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/tty/serial/sh-sci.c8
-rw-r--r--drivers/tty/tty_baudrate.c4
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/uio/uio.c7
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/core/quirks.c14
-rw-r--r--drivers/usb/dwc2/pci.c1
-rw-r--r--drivers/usb/dwc3/core.c1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c8
-rw-r--r--drivers/usb/gadget/function/f_fs.c26
-rw-r--r--drivers/usb/host/xhci-histb.c6
-rw-r--r--drivers/usb/host/xhci-hub.c66
-rw-r--r--drivers/usb/host/xhci-mtk.c6
-rw-r--r--drivers/usb/host/xhci-pci.c6
-rw-r--r--drivers/usb/host/xhci-plat.c6
-rw-r--r--drivers/usb/host/xhci-ring.c45
-rw-r--r--drivers/usb/host/xhci-tegra.c1
-rw-r--r--drivers/usb/host/xhci.c2
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/appledisplay.c1
-rw-r--r--drivers/usb/typec/ucsi/Kconfig10
-rw-r--r--drivers/usb/typec/ucsi/Makefile2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c307
-rw-r--r--drivers/video/hdmi.c511
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/privcmd-buf.c22
-rw-r--r--fs/afs/rxrpc.c11
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/disk-io.c63
-rw-r--r--fs/btrfs/free-space-cache.c22
-rw-r--r--fs/btrfs/inode.c37
-rw-r--r--fs/btrfs/ioctl.c14
-rw-r--r--fs/btrfs/super.c6
-rw-r--r--fs/btrfs/tree-checker.c2
-rw-r--r--fs/btrfs/tree-log.c17
-rw-r--r--fs/ceph/file.c11
-rw-r--r--fs/ceph/mds_client.c12
-rw-r--r--fs/ceph/quota.c3
-rw-r--r--fs/dax.c60
-rw-r--r--fs/exec.c5
-rw-r--r--fs/ext4/inode.c5
-rw-r--r--fs/ext4/namei.c5
-rw-r--r--fs/ext4/resize.c28
-rw-r--r--fs/ext4/super.c17
-rw-r--r--fs/ext4/xattr.c27
-rw-r--r--fs/fuse/dev.c16
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/gfs2/bmap.c54
-rw-r--r--fs/gfs2/rgrp.c3
-rw-r--r--fs/inode.c7
-rw-r--r--fs/iomap.c53
-rw-r--r--fs/namespace.c28
-rw-r--r--fs/nfs/callback_proc.c26
-rw-r--r--fs/nfs/delegation.c11
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c21
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h4
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c19
-rw-r--r--fs/nfs/nfs42proc.c19
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4state.c26
-rw-r--r--fs/nfsd/nfs4proc.c3
-rw-r--r--fs/nilfs2/btnode.c4
-rw-r--r--fs/notify/fanotify/fanotify.c10
-rw-r--r--fs/notify/fsnotify.c7
-rw-r--r--fs/ocfs2/aops.c12
-rw-r--r--fs/ocfs2/cluster/masklog.h9
-rw-r--r--fs/read_write.c15
-rw-r--r--fs/sysfs/file.c4
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c11
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c5
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c11
-rw-r--r--fs/xfs/xfs_bmap_util.c10
-rw-r--r--fs/xfs/xfs_bmap_util.h3
-rw-r--r--fs/xfs/xfs_buf_item.c28
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_ioctl.c2
-rw-r--r--fs/xfs/xfs_message.c2
-rw-r--r--fs/xfs/xfs_reflink.c18
-rw-r--r--fs/xfs/xfs_trace.h5
-rw-r--r--include/asm-generic/4level-fixup.h2
-rw-r--r--include/asm-generic/5level-fixup.h2
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h2
-rw-r--r--include/asm-generic/pgtable-nop4d.h2
-rw-r--r--include/asm-generic/pgtable-nopmd.h2
-rw-r--r--include/asm-generic/pgtable-nopud.h2
-rw-r--r--include/asm-generic/pgtable.h16
-rw-r--r--include/drm/bridge/dw_hdmi.h1
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h14
-rw-r--r--include/drm/drmP.h7
-rw-r--r--include/drm/drm_atomic.h10
-rw-r--r--include/drm/drm_atomic_helper.h45
-rw-r--r--include/drm/drm_atomic_state_helper.h73
-rw-r--r--include/drm/drm_connector.h60
-rw-r--r--include/drm/drm_crtc.h9
-rw-r--r--include/drm/drm_crtc_helper.h6
-rw-r--r--include/drm/drm_damage_helper.h99
-rw-r--r--include/drm/drm_dp_helper.h98
-rw-r--r--include/drm/drm_dp_mst_helper.h6
-rw-r--r--include/drm/drm_drv.h14
-rw-r--r--include/drm/drm_dsc.h485
-rw-r--r--include/drm/drm_fb_cma_helper.h2
-rw-r--r--include/drm/drm_file.h14
-rw-r--r--include/drm/drm_fourcc.h89
-rw-r--r--include/drm/drm_framebuffer.h24
-rw-r--r--include/drm/drm_gem.h181
-rw-r--r--include/drm/drm_gem_cma_helper.h24
-rw-r--r--include/drm/drm_global.h53
-rw-r--r--include/drm/drm_hdcp.h212
-rw-r--r--include/drm/drm_mipi_dsi.h8
-rw-r--r--include/drm/drm_mode_config.h27
-rw-r--r--include/drm/drm_modeset_lock.h59
-rw-r--r--include/drm/drm_plane.h44
-rw-r--r--include/drm/drm_plane_helper.h35
-rw-r--r--include/drm/drm_prime.h4
-rw-r--r--include/drm/drm_property.h3
-rw-r--r--include/drm/drm_syncobj.h4
-rw-r--r--include/drm/drm_vblank.h8
-rw-r--r--include/drm/gpu_scheduler.h9
-rw-r--r--include/drm/i915_pciids.h21
-rw-r--r--include/drm/tinydrm/tinydrm.h35
-rw-r--r--include/drm/ttm/ttm_bo_driver.h23
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h4
-rw-r--r--include/drm/ttm/ttm_memory.h4
-rw-r--r--include/linux/can/dev.h1
-rw-r--r--include/linux/can/rx-offload.h7
-rw-r--r--include/linux/ceph/ceph_features.h8
-rw-r--r--include/linux/compiler-gcc.h12
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/compiler_attributes.h14
-rw-r--r--include/linux/compiler_types.h4
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/dma-fence.h1
-rw-r--r--include/linux/efi.h7
-rw-r--r--include/linux/hdmi.h24
-rw-r--r--include/linux/hid.h32
-rw-r--r--include/linux/i8253.h1
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mtd/nand.h7
-rw-r--r--include/linux/net_dim.h2
-rw-r--r--include/linux/netdevice.h20
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h4
-rw-r--r--include/linux/nmi.h2
-rw-r--r--include/linux/reservation.h12
-rw-r--r--include/linux/skbuff.h18
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/sysfs.h8
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/xarray.h267
-rw-r--r--include/media/v4l2-mem2mem.h2
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/af_rxrpc.h3
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h39
-rw-r--r--include/net/sctp/sctp.h12
-rw-r--r--include/trace/events/kyber.h8
-rw-r--r--include/trace/events/rxrpc.h2
-rw-r--r--include/uapi/drm/amdgpu_drm.h6
-rw-r--r--include/uapi/drm/drm_fourcc.h15
-rw-r--r--include/uapi/drm/drm_mode.h19
-rw-r--r--include/uapi/drm/i915_drm.h8
-rw-r--r--include/uapi/drm/v3d_drm.h39
-rw-r--r--include/uapi/drm/virtgpu_drm.h13
-rw-r--r--include/uapi/linux/input-event-codes.h10
-rw-r--r--include/uapi/linux/kfd_ioctl.h44
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h4
-rw-r--r--include/uapi/linux/netfilter_bridge.h4
-rw-r--r--include/uapi/linux/sctp.h3
-rw-r--r--include/uapi/linux/v4l2-controls.h5
-rw-r--r--include/uapi/linux/virtio_gpu.h18
-rw-r--r--include/video/imx-ipu-v3.h9
-rw-r--r--include/xen/xen-ops.h12
-rw-r--r--kernel/bpf/core.c4
-rw-r--r--kernel/bpf/syscall.c35
-rw-r--r--kernel/debug/kdb/kdb_bt.c4
-rw-r--r--kernel/debug/kdb/kdb_io.c15
-rw-r--r--kernel/debug/kdb/kdb_keyboard.c4
-rw-r--r--kernel/debug/kdb/kdb_main.c35
-rw-r--r--kernel/debug/kdb/kdb_private.h2
-rw-r--r--kernel/debug/kdb/kdb_support.c28
-rw-r--r--kernel/dma/swiotlb.c3
-rw-r--r--kernel/resource.c19
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/fair.c66
-rw-r--r--kernel/sched/psi.c43
-rw-r--r--kernel/time/posix-cpu-timers.c3
-rw-r--r--kernel/trace/trace_probe.c2
-rw-r--r--kernel/user_namespace.c12
-rw-r--r--lib/raid6/test/Makefile4
-rw-r--r--lib/test_firmware.c1
-rw-r--r--lib/test_xarray.c50
-rw-r--r--lib/ubsan.c3
-rw-r--r--lib/xarray.c139
-rw-r--r--mm/gup.c10
-rw-r--r--mm/hugetlb.c23
-rw-r--r--mm/memblock.c2
-rw-r--r--mm/page_alloc.c28
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/swapfile.c6
-rw-r--r--mm/vmscan.c22
-rw-r--r--mm/vmstat.c7
-rw-r--r--mm/z3fold.c101
-rw-r--r--net/batman-adv/bat_v_elp.c6
-rw-r--r--net/batman-adv/fragmentation.c2
-rw-r--r--net/bridge/br_private.h7
-rw-r--r--net/bridge/br_vlan.c3
-rw-r--r--net/can/raw.c15
-rw-r--r--net/ceph/messenger.c12
-rw-r--r--net/core/dev.c13
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/netpoll.c3
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/core/sock.c1
-rw-r--r--net/ipv4/inet_fragment.c29
-rw-r--r--net/ipv4/ip_fragment.c12
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/tcp_input.c15
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/ipv6/af_inet6.c5
-rw-r--r--net/ipv6/anycast.c80
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c13
-rw-r--r--net/ipv6/route.c14
-rw-r--r--net/l2tp/l2tp_core.c9
-rw-r--r--net/netfilter/ipset/ip_set_core.c43
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c8
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c17
-rw-r--r--net/netfilter/nf_conntrack_core.c13
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c13
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c15
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c11
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c47
-rw-r--r--net/netfilter/nft_compat.c21
-rw-r--r--net/netfilter/nft_numgen.c127
-rw-r--r--net/netfilter/nft_osf.c2
-rw-r--r--net/netfilter/xt_IDLETIMER.c20
-rw-r--r--net/openvswitch/conntrack.c3
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/rxrpc/af_rxrpc.c27
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/call_event.c18
-rw-r--r--net/rxrpc/output.c35
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c36
-rw-r--r--net/sched/cls_flower.c14
-rw-r--r--net/sched/sch_fq.c31
-rw-r--r--net/sched/sch_netem.c9
-rw-r--r--net/sctp/output.c24
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/socket.c26
-rw-r--r--net/sctp/stream.c1
-rw-r--r--net/smc/af_smc.c11
-rw-r--r--net/smc/smc_cdc.c26
-rw-r--r--net/smc/smc_cdc.h60
-rw-r--r--net/smc/smc_core.c20
-rw-r--r--net/smc/smc_core.h5
-rw-r--r--net/smc/smc_ism.c43
-rw-r--r--net/smc/smc_ism.h1
-rw-r--r--net/smc/smc_wr.c4
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth_generic.c8
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c61
-rw-r--r--net/sunrpc/xdr.c7
-rw-r--r--net/tipc/discover.c19
-rw-r--r--net/tipc/link.c11
-rw-r--r--net/tipc/net.c45
-rw-r--r--net/tipc/net.h2
-rw-r--r--net/tipc/socket.c15
-rw-r--r--scripts/coccinelle/api/drm-get-put.cocci78
-rwxr-xr-xscripts/faddr2line2
-rwxr-xr-xscripts/kconfig/merge_config.sh7
-rwxr-xr-xscripts/package/builddeb6
-rwxr-xr-xscripts/package/mkdebian7
-rwxr-xr-xscripts/package/mkspec11
-rwxr-xr-xscripts/setlocalversion2
-rwxr-xr-xscripts/spdxcheck.py1
-rw-r--r--security/integrity/digsig_asymmetric.c1
-rw-r--r--security/selinux/hooks.c3
-rw-r--r--security/selinux/ss/mls.c10
-rw-r--r--sound/core/oss/pcm_oss.c6
-rw-r--r--sound/core/oss/pcm_plugin.c6
-rw-r--r--sound/pci/hda/patch_ca0132.c5
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/pci/hda/thinkpad_helper.c4
-rw-r--r--sound/x86/intel_hdmi_audio.c26
-rw-r--r--tools/arch/arm64/include/asm/barrier.h133
-rw-r--r--tools/perf/Documentation/perf-list.txt1
-rw-r--r--tools/perf/Makefile.perf2
-rw-r--r--tools/perf/builtin-record.c7
-rw-r--r--tools/perf/builtin-stat.c28
-rw-r--r--tools/perf/builtin-top.c3
-rw-r--r--tools/perf/builtin-trace.c34
-rw-r--r--tools/perf/examples/bpf/augmented_raw_syscalls.c131
-rw-r--r--tools/perf/jvmti/jvmti_agent.c49
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py493
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling1
-rw-r--r--tools/perf/util/evlist.c27
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/evsel.c1
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c4
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.c5
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.h1
-rw-r--r--tools/perf/util/intel-pt.c16
-rw-r--r--tools/perf/util/pmu.c2
-rw-r--r--tools/power/cpupower/Makefile12
-rw-r--r--tools/power/cpupower/bench/Makefile2
-rw-r--r--tools/power/cpupower/debug/x86_64/Makefile4
-rw-r--r--tools/power/cpupower/lib/cpufreq.c2
-rw-r--r--tools/power/cpupower/lib/cpuidle.c2
-rw-r--r--tools/power/cpupower/lib/cpupower.c4
-rw-r--r--tools/power/cpupower/lib/cpupower_intern.h2
-rw-r--r--tools/testing/nvdimm/test/nfit.c8
-rw-r--r--tools/testing/selftests/powerpc/mm/wild_bctr.c21
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py18
1450 files changed, 47553 insertions, 25138 deletions
diff --git a/.mailmap b/.mailmap
index a76be45fef6c..28fecafa6506 100644
--- a/.mailmap
+++ b/.mailmap
@@ -159,6 +159,7 @@ Peter Oruba <peter@oruba.de>
Peter Oruba <peter.oruba@amd.com>
Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
Praveen BP <praveenbp@ti.com>
+Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
diff --git a/CREDITS b/CREDITS
index 5befd2d714d0..c9273393fe14 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2138,6 +2138,10 @@ E: paul@laufernet.com
D: Soundblaster driver fixes, ISAPnP quirk
S: California, USA
+N: Jarkko Lavinen
+E: jarkko.lavinen@nokia.com
+D: OMAP MMC support
+
N: Jonathan Layes
D: ARPD support
@@ -2200,6 +2204,10 @@ S: Post Office Box 371
S: North Little Rock, Arkansas 72115
S: USA
+N: Christopher Li
+E: sparse@chrisli.org
+D: Sparse maintainer 2009 - 2018
+
N: Stephan Linz
E: linz@mazet.de
E: Stephan.Linz@gmx.de
diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-pattern b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
index fb3d1e03b881..1e5d172e0646 100644
--- a/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
+++ b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
@@ -37,8 +37,8 @@ Description:
0-| / \/ \/
+---0----1----2----3----4----5----6------------> time (s)
- 2. To make the LED go instantly from one brigntess value to another,
- we should use use zero-time lengths (the brightness must be same as
+ 2. To make the LED go instantly from one brightness value to another,
+ we should use zero-time lengths (the brightness must be same as
the previous tuple's). So the format should be:
"brightness_1 duration_1 brightness_1 0 brightness_2 duration_2
brightness_2 0 ...". For example:
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 81d1d5a74728..19f4423e70d9 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -4713,6 +4713,8 @@
prevent spurious wakeup);
n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a
pause after every control message);
+ o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
+ delay after resetting its port);
Example: quirks=0781:5580:bk,0a5c:5834:gij
usbhid.mousepoll=
diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst
index 47153e64dfb5..7eca9026a9ed 100644
--- a/Documentation/admin-guide/pm/cpufreq.rst
+++ b/Documentation/admin-guide/pm/cpufreq.rst
@@ -150,7 +150,7 @@ data structures necessary to handle the given policy and, possibly, to add
a governor ``sysfs`` interface to it. Next, the governor is started by
invoking its ``->start()`` callback.
-That callback it expected to register per-CPU utilization update callbacks for
+That callback is expected to register per-CPU utilization update callbacks for
all of the online CPUs belonging to the given policy with the CPU scheduler.
The utilization update callbacks will be invoked by the CPU scheduler on
important events, like task enqueue and dequeue, on every iteration of the
diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst
index 164bf71149fd..30187d49dc2c 100644
--- a/Documentation/admin-guide/security-bugs.rst
+++ b/Documentation/admin-guide/security-bugs.rst
@@ -32,16 +32,17 @@ Disclosure and embargoed information
The security list is not a disclosure channel. For that, see Coordination
below.
-Once a robust fix has been developed, our preference is to release the
-fix in a timely fashion, treating it no differently than any of the other
-thousands of changes and fixes the Linux kernel project releases every
-month.
-
-However, at the request of the reporter, we will postpone releasing the
-fix for up to 5 business days after the date of the report or after the
-embargo has lifted; whichever comes first. The only exception to that
-rule is if the bug is publicly known, in which case the preference is to
-release the fix as soon as it's available.
+Once a robust fix has been developed, the release process starts. Fixes
+for publicly known bugs are released immediately.
+
+Although our preference is to release fixes for publicly undisclosed bugs
+as soon as they become available, this may be postponed at the request of
+the reporter or an affected party for up to 7 calendar days from the start
+of the release process, with an exceptional extension to 14 calendar days
+if it is agreed that the criticality of the bug requires more time. The
+only valid reason for deferring the publication of a fix is to accommodate
+the logistics of QA and large scale rollouts which require release
+coordination.
Whilst embargoed information may be shared with trusted individuals in
order to develop a fix, such information will not be published alongside
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index a4e705108f42..dbe96cb5558e 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -74,7 +74,8 @@ using :c:func:`xa_load`. xa_store will overwrite any entry with the
new entry and return the previous entry stored at that index. You can
use :c:func:`xa_erase` instead of calling :c:func:`xa_store` with a
``NULL`` entry. There is no difference between an entry that has never
-been stored to and one that has most recently had ``NULL`` stored to it.
+been stored to, one that has been erased and one that has most recently
+had ``NULL`` stored to it.
You can conditionally replace an entry at an index by using
:c:func:`xa_cmpxchg`. Like :c:func:`cmpxchg`, it will only succeed if
@@ -105,23 +106,44 @@ may result in the entry being marked at some, but not all of the other
indices. Storing into one index may result in the entry retrieved by
some, but not all of the other indices changing.
+Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
+will not need to allocate memory. The :c:func:`xa_reserve` function
+will store a reserved entry at the indicated index. Users of the normal
+API will see this entry as containing ``NULL``. If you do not need to
+use the reserved entry, you can call :c:func:`xa_release` to remove the
+unused entry. If another user has stored to the entry in the meantime,
+:c:func:`xa_release` will do nothing; if instead you want the entry to
+become ``NULL``, you should use :c:func:`xa_erase`.
+
+If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
+will return ``true``.
+
Finally, you can remove all entries from an XArray by calling
:c:func:`xa_destroy`. If the XArray entries are pointers, you may wish
to free the entries first. You can do this by iterating over all present
entries in the XArray using the :c:func:`xa_for_each` iterator.
-ID assignment
--------------
+Allocating XArrays
+------------------
+
+If you use :c:func:`DEFINE_XARRAY_ALLOC` to define the XArray, or
+initialise it by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`,
+the XArray changes to track whether entries are in use or not.
You can call :c:func:`xa_alloc` to store the entry at any unused index
in the XArray. If you need to modify the array from interrupt context,
you can use :c:func:`xa_alloc_bh` or :c:func:`xa_alloc_irq` to disable
-interrupts while allocating the ID. Unlike :c:func:`xa_store`, allocating
-a ``NULL`` pointer does not delete an entry. Instead it reserves an
-entry like :c:func:`xa_reserve` and you can release it using either
-:c:func:`xa_erase` or :c:func:`xa_release`. To use ID assignment, the
-XArray must be defined with :c:func:`DEFINE_XARRAY_ALLOC`, or initialised
-by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`,
+interrupts while allocating the ID.
+
+Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert`
+will mark the entry as being allocated. Unlike a normal XArray, storing
+``NULL`` will mark the entry as being in use, like :c:func:`xa_reserve`.
+To free an entry, use :c:func:`xa_erase` (or :c:func:`xa_release` if
+you only want to free the entry if it's ``NULL``).
+
+You cannot use ``XA_MARK_0`` with an allocating XArray as this mark
+is used to track whether an entry is free or not. The other marks are
+available for your use.
Memory allocation
-----------------
@@ -158,6 +180,8 @@ Takes RCU read lock:
Takes xa_lock internally:
* :c:func:`xa_store`
+ * :c:func:`xa_store_bh`
+ * :c:func:`xa_store_irq`
* :c:func:`xa_insert`
* :c:func:`xa_erase`
* :c:func:`xa_erase_bh`
@@ -167,6 +191,9 @@ Takes xa_lock internally:
* :c:func:`xa_alloc`
* :c:func:`xa_alloc_bh`
* :c:func:`xa_alloc_irq`
+ * :c:func:`xa_reserve`
+ * :c:func:`xa_reserve_bh`
+ * :c:func:`xa_reserve_irq`
* :c:func:`xa_destroy`
* :c:func:`xa_set_mark`
* :c:func:`xa_clear_mark`
@@ -177,6 +204,7 @@ Assumes xa_lock held on entry:
* :c:func:`__xa_erase`
* :c:func:`__xa_cmpxchg`
* :c:func:`__xa_alloc`
+ * :c:func:`__xa_reserve`
* :c:func:`__xa_set_mark`
* :c:func:`__xa_clear_mark`
@@ -234,7 +262,8 @@ Sharing the XArray with interrupt context is also possible, either
using :c:func:`xa_lock_irqsave` in both the interrupt handler and process
context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock`
in the interrupt handler. Some of the more common patterns have helper
-functions such as :c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`.
+functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`,
+:c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`.
Sometimes you need to protect access to the XArray with a mutex because
that lock sits above another mutex in the locking hierarchy. That does
@@ -322,7 +351,8 @@ to :c:func:`xas_retry`, and retry the operation if it returns ``true``.
- :c:func:`xa_is_zero`
- Zero entries appear as ``NULL`` through the Normal API, but occupy
an entry in the XArray which can be used to reserve the index for
- future use.
+ future use. This is used by allocating XArrays for allocated entries
+ which are ``NULL``.
Other internal entries may be added in the future. As far as possible, they
will be handled by :c:func:`xas_retry`.
diff --git a/Documentation/cpu-freq/cpufreq-stats.txt b/Documentation/cpu-freq/cpufreq-stats.txt
index a873855c811d..14378cecb172 100644
--- a/Documentation/cpu-freq/cpufreq-stats.txt
+++ b/Documentation/cpu-freq/cpufreq-stats.txt
@@ -86,9 +86,11 @@ transitions.
This will give a fine grained information about all the CPU frequency
transitions. The cat output here is a two dimensional matrix, where an entry
<i,j> (row i, column j) represents the count of number of transitions from
-Freq_i to Freq_j. Freq_i is in descending order with increasing rows and
-Freq_j is in descending order with increasing columns. The output here also
-contains the actual freq values for each row and column for better readability.
+Freq_i to Freq_j. Freq_i rows and Freq_j columns follow the sorting order in
+which the driver has provided the frequency table initially to the cpufreq core
+and so can be sorted (ascending or descending) or unsorted. The output here
+also contains the actual freq values for each row and column for better
+readability.
If the transition table is bigger than PAGE_SIZE, reading this will
return an -EFBIG error.
diff --git a/Documentation/devicetree/bindings/arm/shmobile.txt b/Documentation/devicetree/bindings/arm/shmobile.txt
index f5e0f82fd503..58c4256d37a3 100644
--- a/Documentation/devicetree/bindings/arm/shmobile.txt
+++ b/Documentation/devicetree/bindings/arm/shmobile.txt
@@ -27,7 +27,7 @@ SoCs:
compatible = "renesas,r8a77470"
- RZ/G2M (R8A774A1)
compatible = "renesas,r8a774a1"
- - RZ/G2E (RA8774C0)
+ - RZ/G2E (R8A774C0)
compatible = "renesas,r8a774c0"
- R-Car M1A (R8A77781)
compatible = "renesas,r8a7778"
diff --git a/Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt b/Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt
index 87dfb33fb3be..b9d533717dff 100644
--- a/Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt
+++ b/Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt
@@ -1,11 +1,14 @@
-Device tree bindings for Allwinner A64 DE2 bus
+Device tree bindings for Allwinner DE2/3 bus
The Allwinner A64 DE2 is on a special bus, which needs a SRAM region (SRAM C)
-to be claimed for enabling the access.
+to be claimed for enabling the access. The DE3 on Allwinner H6 is at the same
+situation, and the binding also applies.
Required properties:
- - compatible: Should contain "allwinner,sun50i-a64-de2"
+ - compatible: Should be one of:
+ - "allwinner,sun50i-a64-de2"
+ - "allwinner,sun50i-h6-de3", "allwinner,sun50i-a64-de2"
- reg: A resource specifier for the register space
- #address-cells: Must be set to 1
- #size-cells: Must be set to 1
diff --git a/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt b/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt
deleted file mode 100644
index 2aa06ac0fac5..000000000000
--- a/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt
+++ /dev/null
@@ -1,65 +0,0 @@
-Generic ARM big LITTLE cpufreq driver's DT glue
------------------------------------------------
-
-This is DT specific glue layer for generic cpufreq driver for big LITTLE
-systems.
-
-Both required and optional properties listed below must be defined
-under node /cpus/cpu@x. Where x is the first cpu inside a cluster.
-
-FIXME: Cpus should boot in the order specified in DT and all cpus for a cluster
-must be present contiguously. Generic DT driver will check only node 'x' for
-cpu:x.
-
-Required properties:
-- operating-points: Refer to Documentation/devicetree/bindings/opp/opp.txt
- for details
-
-Optional properties:
-- clock-latency: Specify the possible maximum transition latency for clock,
- in unit of nanoseconds.
-
-Examples:
-
-cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- compatible = "arm,cortex-a15";
- reg = <0>;
- next-level-cache = <&L2>;
- operating-points = <
- /* kHz uV */
- 792000 1100000
- 396000 950000
- 198000 850000
- >;
- clock-latency = <61036>; /* two CLK32 periods */
- };
-
- cpu@1 {
- compatible = "arm,cortex-a15";
- reg = <1>;
- next-level-cache = <&L2>;
- };
-
- cpu@100 {
- compatible = "arm,cortex-a7";
- reg = <100>;
- next-level-cache = <&L2>;
- operating-points = <
- /* kHz uV */
- 792000 950000
- 396000 750000
- 198000 450000
- >;
- clock-latency = <61036>; /* two CLK32 periods */
- };
-
- cpu@101 {
- compatible = "arm,cortex-a7";
- reg = <101>;
- next-level-cache = <&L2>;
- };
-};
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
index 057b81335775..c65fd7a7467c 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
@@ -67,6 +67,8 @@ Required properties:
Optional properties:
- power-domains: Optional phandle to associated power domain as described in
the file ../power/power_domain.txt
+- amlogic,canvas: phandle to canvas provider node as described in the file
+ ../soc/amlogic/amlogic,canvas.txt
Required nodes:
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
index 3aeb0ec06fd0..ba5469dd09f3 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
@@ -13,6 +13,7 @@ Required properties:
- "renesas,r8a7793-lvds" for R8A7793 (R-Car M2-N) compatible LVDS encoders
- "renesas,r8a7795-lvds" for R8A7795 (R-Car H3) compatible LVDS encoders
- "renesas,r8a7796-lvds" for R8A7796 (R-Car M3-W) compatible LVDS encoders
+ - "renesas,r8a77965-lvds" for R8A77965 (R-Car M3-N) compatible LVDS encoders
- "renesas,r8a77970-lvds" for R8A77970 (R-Car V3M) compatible LVDS encoders
- "renesas,r8a77980-lvds" for R8A77980 (R-Car V3H) compatible LVDS encoders
- "renesas,r8a77990-lvds" for R8A77990 (R-Car E3) compatible LVDS encoders
diff --git a/Documentation/devicetree/bindings/display/himax,hx8357d.txt b/Documentation/devicetree/bindings/display/himax,hx8357d.txt
new file mode 100644
index 000000000000..e641f664763d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/himax,hx8357d.txt
@@ -0,0 +1,26 @@
+Himax HX8357D display panels
+
+This binding is for display panels using a Himax HX8357D controller in SPI
+mode, such as the Adafruit 3.5" TFT for Raspberry Pi.
+
+Required properties:
+- compatible: "adafruit,yx350hv15", "himax,hx8357d"
+- dc-gpios: D/C pin
+- reg: address of the panel on the SPI bus
+
+The node for this driver must be a child node of a SPI controller, hence
+all mandatory properties described in ../spi/spi-bus.txt must be specified.
+
+Optional properties:
+- rotation: panel rotation in degrees counter clockwise (0,90,180,270)
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+ display@0{
+ compatible = "adafruit,yx350hv15", "himax,hx8357d";
+ reg = <0>;
+ spi-max-frequency = <32000000>;
+ dc-gpios = <&gpio0 25 GPIO_ACTIVE_HIGH>;
+ rotation = <90>;
+ backlight = <&backlight>;
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g101evn010 b/Documentation/devicetree/bindings/display/panel/auo,g101evn010
new file mode 100644
index 000000000000..bc6a0c858e23
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/auo,g101evn010
@@ -0,0 +1,12 @@
+AU Optronics Corporation 10.1" (1280x800) color TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,g101evn010"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.txt b/Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.txt
new file mode 100644
index 000000000000..35bc0c839f49
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.txt
@@ -0,0 +1,12 @@
+Banana Pi 7" (S070WV20-CT16) TFT LCD Panel
+
+Required properties:
+- compatible: should be "bananapi,s070wv20-ct16"
+- power-supply: see ./panel-common.txt
+
+Optional properties:
+- enable-gpios: see ./simple-panel.txt
+- backlight: see ./simple-panel.txt
+
+This binding is compatible with the simple-panel binding, which is specified
+in ./simple-panel.txt.
diff --git a/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt b/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt
new file mode 100644
index 000000000000..057f7f3f6dbe
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt
@@ -0,0 +1,12 @@
+CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
+
+Required properties:
+- compatible: should be "cdtech,s043wq26h-ct7"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt b/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt
new file mode 100644
index 000000000000..505615dfa0df
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt
@@ -0,0 +1,12 @@
+CDTech(H.K.) Electronics Limited 7" 800x480 color TFT-LCD panel
+
+Required properties:
+- compatible: should be "cdtech,s070wv95-ct16"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt b/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt
new file mode 100644
index 000000000000..fbf5dcd15661
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt
@@ -0,0 +1,12 @@
+DLC Display Co. DLC1010GIG 10.1" WXGA TFT LCD Panel
+
+Required properties:
+- compatible: should be "dlc,dlc1010gig"
+- power-supply: See simple-panel.txt
+
+Optional properties:
+- enable-gpios: See simple-panel.txt
+- backlight: See simple-panel.txt
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
new file mode 100644
index 000000000000..a89f9c830a85
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
@@ -0,0 +1,42 @@
+Binding for Olimex Ltd. LCD-OLinuXino bridge panel.
+
+This device can be used as bridge between a host controller and LCD panels.
+Currently supported LCDs are:
+ - LCD-OLinuXino-4.3TS
+ - LCD-OLinuXino-5
+ - LCD-OLinuXino-7
+ - LCD-OLinuXino-10
+
+The panel itself contains:
+ - AT24C16C EEPROM holding panel identification and timing requirements
+ - AR1021 resistive touch screen controller (optional)
+ - FT5x6 capacitive touch screnn controller (optional)
+ - GT911/GT928 capacitive touch screen controller (optional)
+
+The above chips share same I2C bus. The EEPROM is factory preprogrammed with
+device information (id, serial, etc.) and timing requirements.
+
+Touchscreen bingings can be found in these files:
+ - input/touchscreen/goodix.txt
+ - input/touchscreen/edt-ft5x06.txt
+ - input/touchscreen/ar1021.txt
+
+Required properties:
+ - compatible: should be "olimex,lcd-olinuxino"
+ - reg: address of the configuration EEPROM, should be <0x50>
+ - power-supply: phandle of the regulator that provides the supply voltage
+
+Optional properties:
+ - enable-gpios: GPIO pin to enable or disable the panel
+ - backlight: phandle of the backlight device attacked to the panel
+
+Example:
+&i2c2 {
+ panel@50 {
+ compatible = "olimex,lcd-olinuxino";
+ reg = <0x50>;
+ power-supply = <&reg_vcc5v0>;
+ enable-gpios = <&pio 7 8 GPIO_ACTIVE_HIGH>;
+ backlight = <&backlight>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt
new file mode 100644
index 000000000000..b94e366f451b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt
@@ -0,0 +1,30 @@
+Samsung S6D16D0 4" 864x480 AMOLED panel
+
+Required properties:
+ - compatible: should be:
+ "samsung,s6d16d0",
+ - reg: the virtual channel number of a DSI peripheral
+ - vdd1-supply: I/O voltage supply
+ - reset-gpios: a GPIO spec for the reset pin (active low)
+
+The device node can contain one 'port' child node with one child
+'endpoint' node, according to the bindings defined in
+media/video-interfaces.txt. This node should describe panel's video bus.
+
+Example:
+&dsi {
+ ...
+
+ panel@0 {
+ compatible = "samsung,s6d16d0";
+ reg = <0>;
+ vdd1-supply = <&foo>;
+ reset-gpios = <&foo_gpio 0 GPIO_ACTIVE_LOW>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt b/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
index f5e3c6f2095a..40f3d7c713bb 100644
--- a/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
+++ b/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
@@ -1,47 +1,70 @@
TPO TPG110 Panel
================
-This binding builds on the DPI bindings, adding a few properties
-as a superset of a DPI. See panel-dpi.txt for the required DPI
-bindings.
+This panel driver is a component that acts as an intermediary
+between an RGB output and a variety of panels. The panel
+driver is strapped up in electronics to the desired resolution
+and other properties, and has a control interface over 3WIRE
+SPI. By talking to the TPG110 over SPI, the strapped properties
+can be discovered and the hardware is therefore mostly
+self-describing.
+
+ +--------+
+SPI -> | TPO | -> physical display
+RGB -> | TPG110 |
+ +--------+
+
+If some electrical strap or alternate resolution is desired,
+this can be set up by taking software control of the display
+over the SPI interface. The interface can also adjust
+for properties of the display such as gamma correction and
+certain electrical driving levels.
+
+The TPG110 does not know the physical dimensions of the panel
+connected, so this needs to be specified in the device tree.
+
+It requires a GPIO line for control of its reset line.
+
+The serial protocol has line names that resemble I2C but the
+protocol is not I2C but 3WIRE SPI.
Required properties:
-- compatible : "tpo,tpg110"
+- compatible : one of:
+ "ste,nomadik-nhk15-display", "tpo,tpg110"
+ "tpo,tpg110"
- grestb-gpios : panel reset GPIO
-- scen-gpios : serial control enable GPIO
-- scl-gpios : serial control clock line GPIO
-- sda-gpios : serial control data line GPIO
+- width-mm : see display/panel/panel-common.txt
+- height-mm : see display/panel/panel-common.txt
+
+The device needs to be a child of an SPI bus, see
+spi/spi-bus.txt. The SPI child must set the following
+properties:
+- spi-3wire
+- spi-max-frequency = <3000000>;
+as these are characteristics of this device.
-Required nodes:
-- Video port for DPI input, see panel-dpi.txt
-- Panel timing for DPI setup, see panel-dpi.txt
+The device node can contain one 'port' child node with one child
+'endpoint' node, according to the bindings defined in
+media/video-interfaces.txt. This node should describe panel's video bus.
Example
-------
-panel {
- compatible = "tpo,tpg110", "panel-dpi";
- grestb-gpios = <&stmpe_gpio44 5 GPIO_ACTIVE_LOW>;
- scen-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
- scl-gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
- sda-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+panel: display@0 {
+ compatible = "tpo,tpg110";
+ reg = <0>;
+ spi-3wire;
+ /* 320 ns min period ~= 3 MHz */
+ spi-max-frequency = <3000000>;
+ /* Width and height from data sheet */
+ width-mm = <116>;
+ height-mm = <87>;
+ grestb-gpios = <&foo_gpio 5 GPIO_ACTIVE_LOW>;
backlight = <&bl>;
port {
nomadik_clcd_panel: endpoint {
- remote-endpoint = <&nomadik_clcd_pads>;
+ remote-endpoint = <&foo>;
};
};
-
- panel-timing {
- clock-frequency = <33200000>;
- hactive = <800>;
- hback-porch = <216>;
- hfront-porch = <40>;
- hsync-len = <1>;
- vactive = <480>;
- vback-porch = <35>;
- vfront-porch = <10>;
- vsync-len = <1>;
- };
};
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index 9de67be632d1..3c855d9f2719 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -4,7 +4,9 @@ Required Properties:
- compatible: must be one of the following.
- "renesas,du-r8a7743" for R8A7743 (RZ/G1M) compatible DU
+ - "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU
- "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
+ - "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
- "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
@@ -52,7 +54,9 @@ corresponding to each DU output.
Port0 Port1 Port2 Port3
-----------------------------------------------------------------------------
R8A7743 (RZ/G1M) DPAD 0 LVDS 0 - -
+ R8A7744 (RZ/G1N) DPAD 0 LVDS 0 - -
R8A7745 (RZ/G1E) DPAD 0 DPAD 1 - -
+ R8A77470 (RZ/G1C) DPAD 0 DPAD 1 LVDS 0 -
R8A7779 (R-Car H1) DPAD 0 DPAD 1 - -
R8A7790 (R-Car H2) DPAD 0 LVDS 0 LVDS 1 -
R8A7791 (R-Car M2-W) DPAD 0 LVDS 0 - -
diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
index adc94fc3c9f8..39143424a474 100644
--- a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
@@ -13,6 +13,7 @@ Required properties:
- compatible: should be one of the following:
"rockchip,rk3288-dw-hdmi"
+ "rockchip,rk3328-dw-hdmi"
"rockchip,rk3399-dw-hdmi"
- reg: See dw_hdmi.txt.
- reg-io-width: See dw_hdmi.txt. Shall be 4.
@@ -34,6 +35,8 @@ Optional properties
- clock-names: May contain "cec" as defined in dw_hdmi.txt.
- clock-names: May contain "grf", power for grf io.
- clock-names: May contain "vpll", external clock for some hdmi phy.
+- phys: from general PHY binding: the phandle for the PHY device.
+- phy-names: Should be "hdmi" if phys references an external phy.
Example:
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index 7854fff4fc16..f426bdb42f18 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -79,6 +79,7 @@ Required properties:
- compatible: value must be one of:
* "allwinner,sun8i-a83t-dw-hdmi"
* "allwinner,sun50i-a64-dw-hdmi", "allwinner,sun8i-a83t-dw-hdmi"
+ * "allwinner,sun50i-h6-dw-hdmi"
- reg: base address and size of memory-mapped region
- reg-io-width: See dw_hdmi.txt. Shall be 1.
- interrupts: HDMI interrupt number
@@ -86,9 +87,14 @@ Required properties:
* iahb: the HDMI bus clock
* isfr: the HDMI register clock
* tmds: TMDS clock
+ * cec: HDMI CEC clock (H6 only)
+ * hdcp: HDCP clock (H6 only)
+ * hdcp-bus: HDCP bus clock (H6 only)
- clock-names: the clock names mentioned above
- - resets: phandle to the reset controller
- - reset-names: must be "ctrl"
+ - resets:
+ * ctrl: HDMI controller reset
+ * hdcp: HDCP reset (H6 only)
+ - reset-names: reset names mentioned above
- phys: phandle to the DWC HDMI PHY
- phy-names: must be "phy"
@@ -109,6 +115,7 @@ Required properties:
* allwinner,sun8i-h3-hdmi-phy
* allwinner,sun8i-r40-hdmi-phy
* allwinner,sun50i-a64-hdmi-phy
+ * allwinner,sun50i-h6-hdmi-phy
- reg: base address and size of memory-mapped region
- clocks: phandles to the clocks feeding the HDMI PHY
* bus: the HDMI PHY interface clock
@@ -158,6 +165,7 @@ Required properties:
* allwinner,sun9i-a80-tcon-tv
* "allwinner,sun50i-a64-tcon-lcd", "allwinner,sun8i-a83t-tcon-lcd"
* "allwinner,sun50i-a64-tcon-tv", "allwinner,sun8i-a83t-tcon-tv"
+ * allwinner,sun50i-h6-tcon-tv, allwinner,sun8i-r40-tcon-tv
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the TCON.
@@ -220,24 +228,26 @@ It allows display pipeline to be configured in very different ways:
\ [3] TCON-TV1 [1] - TVE1/RGB
Note that both TCON TOP references same physical unit. Both mixers can be
-connected to any TCON.
+connected to any TCON. Not all TCON TOP variants support all features.
Required properties:
- compatible: value must be one of:
* allwinner,sun8i-r40-tcon-top
+ * allwinner,sun50i-h6-tcon-top
- reg: base address and size of the memory-mapped region.
- clocks: phandle to the clocks feeding the TCON TOP
* bus: TCON TOP interface clock
* tcon-tv0: TCON TV0 clock
- * tve0: TVE0 clock
- * tcon-tv1: TCON TV1 clock
- * tve1: TVE0 clock
- * dsi: MIPI DSI clock
+ * tve0: TVE0 clock (R40 only)
+ * tcon-tv1: TCON TV1 clock (R40 only)
+ * tve1: TVE0 clock (R40 only)
+ * dsi: MIPI DSI clock (R40 only)
- clock-names: clock name mentioned above
- resets: phandle to the reset line driving the TCON TOP
- #clock-cells : must contain 1
- clock-output-names: Names of clocks created for TCON TV0 channel clock,
- TCON TV1 channel clock and DSI channel clock, in that order.
+ TCON TV1 channel clock (R40 only) and DSI channel clock (R40 only), in
+ that order.
- ports: A ports node with endpoint definitions as defined in
Documentation/devicetree/bindings/media/video-interfaces.txt. 6 ports should
@@ -381,6 +391,7 @@ Required properties:
* allwinner,sun8i-v3s-de2-mixer
* allwinner,sun50i-a64-de2-mixer-0
* allwinner,sun50i-a64-de2-mixer-1
+ * allwinner,sun50i-h6-de3-mixer-0
- reg: base address and size of the memory-mapped region.
- clocks: phandles to the clocks feeding the mixer
* bus: the mixer interface clock
@@ -415,9 +426,10 @@ Required properties:
* allwinner,sun8i-v3s-display-engine
* allwinner,sun9i-a80-display-engine
* allwinner,sun50i-a64-display-engine
+ * allwinner,sun50i-h6-display-engine
- allwinner,pipelines: list of phandle to the display engine
- frontends (DE 1.0) or mixers (DE 2.0) available.
+ frontends (DE 1.0) or mixers (DE 2.0/3.0) available.
Example:
diff --git a/Documentation/devicetree/bindings/display/truly,nt35597.txt b/Documentation/devicetree/bindings/display/truly,nt35597.txt
new file mode 100644
index 000000000000..f39c77ee36ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/truly,nt35597.txt
@@ -0,0 +1,59 @@
+Truly model NT35597 DSI display driver
+
+The Truly NT35597 is a generic display driver, currently only configured
+for use in the 2K display on the Qualcomm SDM845 MTP board.
+
+Required properties:
+- compatible: should be "truly,nt35597-2K-display"
+- vdda-supply: phandle of the regulator that provides the supply voltage
+ Power IC supply
+- vdispp-supply: phandle of the regulator that provides the supply voltage
+ for positive LCD bias
+- vdispn-supply: phandle of the regulator that provides the supply voltage
+ for negative LCD bias
+- reset-gpios: phandle of gpio for reset line
+ This should be 8mA, gpio can be configured using mux, pinctrl, pinctrl-names
+ (active low)
+- mode-gpios: phandle of the gpio for choosing the mode of the display
+ for single DSI or Dual DSI
+ This should be low for dual DSI and high for single DSI mode
+- ports: This device has two video ports driven by two DSIs. Their connections
+ are modeled using the OF graph bindings specified in
+ Documentation/devicetree/bindings/graph.txt.
+ - port@0: DSI input port driven by master DSI
+ - port@1: DSI input port driven by secondary DSI
+
+Example:
+
+ dsi@ae94000 {
+ panel@0 {
+ compatible = "truly,nt35597-2K-display";
+ reg = <0>;
+ vdda-supply = <&pm8998_l14>;
+ vdispp-supply = <&lab_regulator>;
+ vdispn-supply = <&ibb_regulator>;
+ pinctrl-names = "default", "suspend";
+ pinctrl-0 = <&dpu_dsi_active>;
+ pinctrl-1 = <&dpu_dsi_suspend>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+ mode-gpios = <&tlmm 52 GPIO_ACTIVE_HIGH>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ panel0_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ panel1_in: endpoint {
+ remote-endpoint = <&dsi1_out>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-omap.txt b/Documentation/devicetree/bindings/i2c/i2c-omap.txt
index 7e49839d4124..4b90ba9f31b7 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-omap.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-omap.txt
@@ -1,8 +1,12 @@
I2C for OMAP platforms
Required properties :
-- compatible : Must be "ti,omap2420-i2c", "ti,omap2430-i2c", "ti,omap3-i2c"
- or "ti,omap4-i2c"
+- compatible : Must be
+ "ti,omap2420-i2c" for OMAP2420 SoCs
+ "ti,omap2430-i2c" for OMAP2430 SoCs
+ "ti,omap3-i2c" for OMAP3 SoCs
+ "ti,omap4-i2c" for OMAP4+ SoCs
+ "ti,am654-i2c", "ti,omap4-i2c" for AM654 SoCs
- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based)
- #address-cells = <1>;
- #size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
index 903a78da65be..3a9926f99937 100644
--- a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
+++ b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
@@ -17,7 +17,7 @@ Example:
reg = <1>;
clocks = <&clk32m>;
interrupt-parent = <&gpio4>;
- interrupts = <13 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
vdd-supply = <&reg5v0>;
xceiver-supply = <&reg5v0>;
};
diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt
index cc4372842bf3..9936b9ee67c3 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_can.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt
@@ -5,6 +5,7 @@ Required properties:
- compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
"renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC.
"renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
+ "renesas,can-r8a774a1" if CAN controller is a part of R8A774A1 SoC.
"renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
"renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
"renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
@@ -14,26 +15,32 @@ Required properties:
"renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC.
"renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC.
"renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC.
+ "renesas,can-r8a77965" if CAN controller is a part of R8A77965 SoC.
"renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device.
"renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1
compatible device.
- "renesas,rcar-gen3-can" for a generic R-Car Gen3 compatible device.
+ "renesas,rcar-gen3-can" for a generic R-Car Gen3 or RZ/G2
+ compatible device.
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first
followed by the generic version.
- reg: physical base address and size of the R-Car CAN register map.
- interrupts: interrupt specifier for the sole interrupt.
-- clocks: phandles and clock specifiers for 3 CAN clock inputs.
-- clock-names: 3 clock input name strings: "clkp1", "clkp2", "can_clk".
+- clocks: phandles and clock specifiers for 2 CAN clock inputs for RZ/G2
+ devices.
+ phandles and clock specifiers for 3 CAN clock inputs for every other
+ SoC.
+- clock-names: 2 clock input name strings for RZ/G2: "clkp1", "can_clk".
+ 3 clock input name strings for every other SoC: "clkp1", "clkp2",
+ "can_clk".
- pinctrl-0: pin control group to be used for this controller.
- pinctrl-names: must be "default".
-Required properties for "renesas,can-r8a7795" and "renesas,can-r8a7796"
-compatible:
-In R8A7795 and R8A7796 SoCs, "clkp2" can be CANFD clock. This is a div6 clock
-and can be used by both CAN and CAN FD controller at the same time. It needs to
-be scaled to maximum frequency if any of these controllers use it. This is done
+Required properties for R8A7795, R8A7796 and R8A77965:
+For the denoted SoCs, "clkp2" can be CANFD clock. This is a div6 clock and can
+be used by both CAN and CAN FD controller at the same time. It needs to be
+scaled to maximum frequency if any of these controllers use it. This is done
using the below properties:
- assigned-clocks: phandle of clkp2(CANFD) clock.
@@ -42,8 +49,9 @@ using the below properties:
Optional properties:
- renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are:
<0x0> (default) : Peripheral clock (clkp1)
- <0x1> : Peripheral clock (clkp2)
- <0x3> : Externally input clock
+ <0x1> : Peripheral clock (clkp2) (not supported by
+ RZ/G2 devices)
+ <0x3> : External input clock
Example
-------
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index 3ceeb8de1196..35694c0c376b 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -7,7 +7,7 @@ limitations.
Current Binding
---------------
-Switches are true Linux devices and can be probes by any means. Once
+Switches are true Linux devices and can be probed by any means. Once
probed, they register to the DSA framework, passing a node
pointer. This node is expected to fulfil the following binding, and
may contain additional properties as required by the device it is
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 4b1a2a8fcc16..a2f4451cf3f9 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -67,6 +67,7 @@ capella Capella Microsystems, Inc
cascoda Cascoda, Ltd.
cavium Cavium, Inc.
cdns Cadence Design Systems Inc.
+cdtech CDTech(H.K.) Electronics Limited
ceva Ceva, Inc.
chipidea Chipidea, Inc
chipone ChipOne
diff --git a/Documentation/gpu/amdgpu-dc.rst b/Documentation/gpu/amdgpu-dc.rst
new file mode 100644
index 000000000000..cc89b0fc11df
--- /dev/null
+++ b/Documentation/gpu/amdgpu-dc.rst
@@ -0,0 +1,68 @@
+===================================
+drm/amd/display - Display Core (DC)
+===================================
+
+*placeholder - general description of supported platforms, what dc is, etc.*
+
+Because it is partially shared with other operating systems, the Display Core
+Driver is divided in two pieces.
+
+1. **Display Core (DC)** contains the OS-agnostic components. Things like
+ hardware programming and resource management are handled here.
+2. **Display Manager (DM)** contains the OS-dependent components. Hooks to the
+ amdgpu base driver and DRM are implemented here.
+
+It doesn't help that the entire package is frequently referred to as DC. But
+with the context in mind, it should be clear.
+
+When CONFIG_DRM_AMD_DC is enabled, DC will be initialized by default for
+supported ASICs. To force disable, set `amdgpu.dc=0` on kernel command line.
+Likewise, to force enable on unsupported ASICs, set `amdgpu.dc=1`.
+
+To determine if DC is loaded, search dmesg for the following entry:
+
+``Display Core initialized with <version number here>``
+
+AMDgpu Display Manager
+======================
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+ :internal:
+
+Lifecycle
+---------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :doc: DM Lifecycle
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :functions: dm_hw_init dm_hw_fini
+
+Interrupts
+----------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :functions: register_hpd_handlers dm_crtc_high_irq dm_pflip_high_irq
+
+Atomic Implementation
+---------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :doc: atomic
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :functions: amdgpu_dm_atomic_check amdgpu_dm_atomic_commit_tail
+
+Display Core
+============
+
+**WIP**
diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index 7d2d3875ff1a..7c1672118a73 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -5,6 +5,7 @@ GPU Driver Documentation
.. toctree::
amdgpu
+ amdgpu-dc
i915
meson
pl111
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index f9cfcdcdf024..b422eb8edf16 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -59,19 +59,28 @@ Implementing Asynchronous Atomic Commit
.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
:doc: implementing nonblocking commit
+Helper Functions Reference
+--------------------------
+
+.. kernel-doc:: include/drm/drm_atomic_helper.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+ :export:
+
Atomic State Reset and Initialization
-------------------------------------
-.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c
:doc: atomic state reset and initialization
-Helper Functions Reference
---------------------------
+Atomic State Helper Reference
+-----------------------------
-.. kernel-doc:: include/drm/drm_atomic_helper.h
+.. kernel-doc:: include/drm/drm_atomic_state_helper.h
:internal:
-.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c
:export:
Simple KMS Helper Reference
@@ -223,6 +232,18 @@ MIPI DSI Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
:export:
+Display Stream Compression Helper Functions Reference
+=====================================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dsc.c
+ :doc: dsc helpers
+
+.. kernel-doc:: include/drm/drm_dsc.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_dsc.c
+ :export:
+
Output Probing Helper Functions Reference
=========================================
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 4b1501b4835b..75c882e09fee 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -554,6 +554,18 @@ Plane Composition Properties
.. kernel-doc:: drivers/gpu/drm/drm_blend.c
:export:
+FB_DAMAGE_CLIPS
+~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_damage_helper.h
+ :internal:
+
Color Management Properties
---------------------------
@@ -575,6 +587,13 @@ Explicit Fencing Properties
.. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c
:doc: explicit fencing properties
+
+Variable Refresh Properties
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+ :doc: Variable refresh properties
+
Existing KMS Properties
-----------------------
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index e725e8449e72..54a696d961a7 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -72,16 +72,13 @@ object TTM to provide a pool for buffer object allocation by clients and
the kernel itself. The type of this object should be
TTM_GLOBAL_TTM_BO, and its size should be sizeof(struct
ttm_bo_global). Again, driver-specific init and release functions may
-be provided, likely eventually calling ttm_bo_global_init() and
-ttm_bo_global_release(), respectively. Also, like the previous
+be provided, likely eventually calling ttm_bo_global_ref_init() and
+ttm_bo_global_ref_release(), respectively. Also, like the previous
object, ttm_global_item_ref() is used to create an initial reference
count for the TTM, which will call your initialization function.
See the radeon_ttm.c file for an example of usage.
-.. kernel-doc:: drivers/gpu/drm/drm_global.c
- :export:
-
The Graphics Execution Manager (GEM)
====================================
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index a2214cc1f821..4b4bf2c5eac5 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -197,6 +197,9 @@ EPERM/EACCESS:
difference between EACCESS and EPERM.
ENODEV:
+ The device is not (yet) present or fully initialized.
+
+EOPNOTSUPP:
Feature (like PRIME, modesetting, GEM) is not supported by the driver.
ENXIO:
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 77c2b3c25565..14191b64446d 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -28,22 +28,16 @@ them, but also all the virtual ones used by KVM, so everyone qualifies).
Contact: Daniel Vetter, Thierry Reding, respective driver maintainers
-Switch from reference/unreference to get/put
---------------------------------------------
-
-For some reason DRM core uses ``reference``/``unreference`` suffixes for
-refcounting functions, but kernel uses ``get``/``put`` (e.g.
-``kref_get``/``put()``). It would be good to switch over for consistency, and
-it's shorter. Needs to be done in 3 steps for each pair of functions:
-* Create new ``get``/``put`` functions, define the old names as compatibility
- wrappers
-* Switch over each file/driver using a cocci-generated spatch.
-* Once all users of the old names are gone, remove them.
+Remove custom dumb_map_offset implementations
+---------------------------------------------
-This way drivers/patches in the progress of getting merged won't break.
+All GEM based drivers should be using drm_gem_create_mmap_offset() instead.
+Audit each individual driver, make sure it'll work with the generic
+implementation (there's lots of outdated locking leftovers in various
+implementations), and then remove it.
-Contact: Daniel Vetter
+Contact: Daniel Vetter, respective driver maintainers
Convert existing KMS drivers to atomic modesetting
--------------------------------------------------
@@ -234,6 +228,34 @@ efficient.
Contact: Daniel Vetter
+Defaults for .gem_prime_import and export
+-----------------------------------------
+
+Most drivers don't need to set drm_driver->gem_prime_import and
+->gem_prime_export now that drm_gem_prime_import() and drm_gem_prime_export()
+are the default.
+
+struct drm_gem_object_funcs
+---------------------------
+
+GEM objects can now have a function table instead of having the callbacks on the
+DRM driver struct. This is now the preferred way and drivers can be moved over.
+
+Use DRM_MODESET_LOCK_ALL_* helpers instead of boilerplate
+---------------------------------------------------------
+
+For cases where drivers are attempting to grab the modeset locks with a local
+acquire context. Replace the boilerplate code surrounding
+drm_modeset_lock_all_ctx() with DRM_MODESET_LOCK_ALL_BEGIN() and
+DRM_MODESET_LOCK_ALL_END() instead.
+
+This should also be done for all places where drm_modest_lock_all() is still
+used.
+
+As a reference, take a look at the conversions already completed in drm core.
+
+Contact: Sean Paul, respective driver maintainers
+
Core refactorings
=================
@@ -339,6 +361,16 @@ Some of these date from the very introduction of KMS in 2008 ...
leftovers from older (never merged into upstream) KMS designs where modes
where set using their ID, including support to add/remove modes.
+- Make ->funcs and ->helper_private vtables optional. There's a bunch of empty
+ function tables in drivers, but before we can remove them we need to make sure
+ that all the users in helpers and drivers do correctly check for a NULL
+ vtable.
+
+- Cleanup up the various ->destroy callbacks. A lot of them just wrapt the
+ drm_*_cleanup implementations and can be removed. Some tack a kfree() at the
+ end, for which we could add drm_*_cleanup_kfree(). And then there's the (for
+ historical reasons) misnamed drm_primary_helper_destroy() function.
+
Better Testing
==============
diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst
index 0a6ea6216e41..7dfc349a4508 100644
--- a/Documentation/gpu/vkms.rst
+++ b/Documentation/gpu/vkms.rst
@@ -10,8 +10,8 @@
TODO
====
-CRC API
--------
+CRC API Improvements
+--------------------
- Optimize CRC computation ``compute_crc()`` and plane blending ``blend()``
@@ -22,3 +22,100 @@ CRC API
- Add igt test to check extreme alpha values i.e. fully opaque and fully
transparent (intermediate values are affected by hw-specific rounding modes).
+
+Vblank issues
+-------------
+
+Some IGT test cases are failing. Need to analyze why and fix the issues:
+
+- plain-flip-fb-recreate
+- plain-flip-ts-check
+- flip-vs-blocking-wf-vblank
+- plain-flip-fb-recreate-interruptible
+- flip-vs-wf_vblank-interruptible
+
+Runtime Configuration
+---------------------
+
+We want to be able to reconfigure vkms instance without having to reload the
+module. Use/Test-cases:
+
+- Hotplug/hotremove connectors on the fly (to be able to test DP MST handling of
+ compositors).
+
+- Configure planes/crtcs/connectors (we'd need some code to have more than 1 of
+ them first).
+
+- Change output configuration: Plug/unplug screens, change EDID, allow changing
+ the refresh rate.
+
+The currently proposed solution is to expose vkms configuration through
+configfs. All existing module options should be supported through configfs too.
+
+Add Plane Features
+------------------
+
+There's lots of plane features we could add support for:
+
+- Real overlay planes, not just cursor.
+
+- Full alpha blending on all planes.
+
+- Rotation, scaling.
+
+- Additional buffer formats, especially YUV formats for video like NV12.
+ Low/high bpp RGB formats would also be interesting.
+
+- Async updates (currently only possible on cursor plane using the legacy cursor
+ api).
+
+For all of these, we also want to review the igt test coverage and make sure all
+relevant igt testcases work on vkms.
+
+Writeback support
+-----------------
+
+Currently vkms only computes a CRC for each frame. Once we have additional plane
+features, we could write back the entire composited frame, and expose it as:
+
+- Writeback connector. This is useful for testing compositors if you don't have
+ hardware with writeback support.
+
+- As a v4l device. This is useful for debugging compositors on special vkms
+ configurations, so that developers see what's really going on.
+
+Prime Buffer Sharing
+--------------------
+
+We already have vgem, which is a gem driver for testing rendering, similar to
+how vkms is for testing the modeset side. Adding buffer sharing support to vkms
+allows us to test them together, to test synchronization and lots of other
+features. Also, this allows compositors to test whether they work correctly on
+SoC chips, where the display and rendering is very often split between 2
+drivers.
+
+Output Features
+---------------
+
+- Variable refresh rate/freesync support. This probably needs prime buffer
+ sharing support, so that we can use vgem fences to simulate rendering in
+ testing. Also needs support to specify the EDID.
+
+- Add support for link status, so that compositors can validate their runtime
+ fallbacks when e.g. a Display Port link goes bad.
+
+- All the hotplug handling describe under "Runtime Configuration".
+
+Atomic Check using eBPF
+-----------------------
+
+Atomic drivers have lots of restrictions which are not exposed to userspace in
+any explicit form through e.g. possible property values. Userspace can only
+inquiry about these limits through the atomic IOCTL, possibly using the
+TEST_ONLY flag. Trying to add configurable code for all these limits, to allow
+compositors to be tested against them, would be rather futile exercise. Instead
+we could add support for eBPF to validate any kind of atomic state, and
+implement a library of different restrictions.
+
+This needs a bunch of features (plane compositing, multiple outputs, ...)
+enabled already to make sense.
diff --git a/Documentation/i2c/busses/i2c-nvidia-gpu b/Documentation/i2c/busses/i2c-nvidia-gpu
new file mode 100644
index 000000000000..31884d2b2eb5
--- /dev/null
+++ b/Documentation/i2c/busses/i2c-nvidia-gpu
@@ -0,0 +1,18 @@
+Kernel driver i2c-nvidia-gpu
+
+Datasheet: not publicly available.
+
+Authors:
+ Ajay Gupta <ajayg@nvidia.com>
+
+Description
+-----------
+
+i2c-nvidia-gpu is a driver for I2C controller included in NVIDIA Turing
+and later GPUs and it is used to communicate with Type-C controller on GPUs.
+
+If your 'lspci -v' listing shows something like the following,
+
+01:00.3 Serial bus controller [0c80]: NVIDIA Corporation Device 1ad9 (rev a1)
+
+then this driver should support the I2C controller of your GPU.
diff --git a/Documentation/input/event-codes.rst b/Documentation/input/event-codes.rst
index cef220c176a4..a8c0873beb95 100644
--- a/Documentation/input/event-codes.rst
+++ b/Documentation/input/event-codes.rst
@@ -190,16 +190,7 @@ A few EV_REL codes have special meanings:
* REL_WHEEL, REL_HWHEEL:
- These codes are used for vertical and horizontal scroll wheels,
- respectively. The value is the number of "notches" moved on the wheel, the
- physical size of which varies by device. For high-resolution wheels (which
- report multiple events for each notch of movement, or do not have notches)
- this may be an approximation based on the high-resolution scroll events.
-
-* REL_WHEEL_HI_RES:
-
- - If a vertical scroll wheel supports high-resolution scrolling, this code
- will be emitted in addition to REL_WHEEL. The value is the (approximate)
- distance travelled by the user's finger, in microns.
+ respectively.
EV_ABS
------
diff --git a/Documentation/media/uapi/v4l/dev-meta.rst b/Documentation/media/uapi/v4l/dev-meta.rst
index f7ac8d0d3af1..b65dc078abeb 100644
--- a/Documentation/media/uapi/v4l/dev-meta.rst
+++ b/Documentation/media/uapi/v4l/dev-meta.rst
@@ -40,7 +40,7 @@ To use the :ref:`format` ioctls applications set the ``type`` field of the
the desired operation. Both drivers and applications must set the remainder of
the :c:type:`v4l2_format` structure to 0.
-.. _v4l2-meta-format:
+.. c:type:: v4l2_meta_format
.. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}|
diff --git a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
index 3ead350e099f..9ea494a8faca 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
@@ -133,6 +133,11 @@ The format as returned by :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` must be identical
- Definition of a data format, see :ref:`pixfmt`, used by SDR
capture and output devices.
* -
+ - struct :c:type:`v4l2_meta_format`
+ - ``meta``
+ - Definition of a metadata format, see :ref:`meta-formats`, used by
+ metadata capture devices.
+ * -
- __u8
- ``raw_data``\ [200]
- Place holder for future extensions.
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index 605e00cdd6be..89f1302d593a 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -1056,18 +1056,23 @@ The kernel interface functions are as follows:
u32 rxrpc_kernel_check_life(struct socket *sock,
struct rxrpc_call *call);
+ void rxrpc_kernel_probe_life(struct socket *sock,
+ struct rxrpc_call *call);
- This returns a number that is updated when ACKs are received from the peer
- (notably including PING RESPONSE ACKs which we can elicit by sending PING
- ACKs to see if the call still exists on the server). The caller should
- compare the numbers of two calls to see if the call is still alive after
- waiting for a suitable interval.
+ The first function returns a number that is updated when ACKs are received
+ from the peer (notably including PING RESPONSE ACKs which we can elicit by
+ sending PING ACKs to see if the call still exists on the server). The
+ caller should compare the numbers of two calls to see if the call is still
+ alive after waiting for a suitable interval.
This allows the caller to work out if the server is still contactable and
if the call is still alive on the server whilst waiting for the server to
process a client operation.
- This function may transmit a PING ACK.
+ The second function causes a ping ACK to be transmitted to try to provoke
+ the peer into responding, which would then cause the value returned by the
+ first function to change. Note that this must be called in TASK_RUNNING
+ state.
(*) Get reply timestamp.
diff --git a/Documentation/vm/unevictable-lru.rst b/Documentation/vm/unevictable-lru.rst
index fdd84cb8d511..b8e29f977f2d 100644
--- a/Documentation/vm/unevictable-lru.rst
+++ b/Documentation/vm/unevictable-lru.rst
@@ -143,7 +143,7 @@ using a number of wrapper functions:
Query the address space, and return true if it is completely
unevictable.
-These are currently used in two places in the kernel:
+These are currently used in three places in the kernel:
(1) By ramfs to mark the address spaces of its inodes when they are created,
and this mark remains for the life of the inode.
@@ -154,6 +154,10 @@ These are currently used in two places in the kernel:
swapped out; the application must touch the pages manually if it wants to
ensure they're in memory.
+ (3) By the i915 driver to mark pinned address space until it's unpinned. The
+ amount of unevictable memory marked by i915 driver is roughly the bounded
+ object size in debugfs/dri/0/i915_gem_objects.
+
Detecting Unevictable Pages
---------------------------
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 73aaaa3da436..804f9426ed17 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -34,23 +34,24 @@ __________________|____________|__________________|_________|___________________
____________________________________________________________|___________________________________________________________
| | | |
ffff800000000000 | -128 TB | ffff87ffffffffff | 8 TB | ... guard hole, also reserved for hypervisor
- ffff880000000000 | -120 TB | ffffc7ffffffffff | 64 TB | direct mapping of all physical memory (page_offset_base)
- ffffc80000000000 | -56 TB | ffffc8ffffffffff | 1 TB | ... unused hole
+ ffff880000000000 | -120 TB | ffff887fffffffff | 0.5 TB | LDT remap for PTI
+ ffff888000000000 | -119.5 TB | ffffc87fffffffff | 64 TB | direct mapping of all physical memory (page_offset_base)
+ ffffc88000000000 | -55.5 TB | ffffc8ffffffffff | 0.5 TB | ... unused hole
ffffc90000000000 | -55 TB | ffffe8ffffffffff | 32 TB | vmalloc/ioremap space (vmalloc_base)
ffffe90000000000 | -23 TB | ffffe9ffffffffff | 1 TB | ... unused hole
ffffea0000000000 | -22 TB | ffffeaffffffffff | 1 TB | virtual memory map (vmemmap_base)
ffffeb0000000000 | -21 TB | ffffebffffffffff | 1 TB | ... unused hole
ffffec0000000000 | -20 TB | fffffbffffffffff | 16 TB | KASAN shadow memory
- fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
- | | | | vaddr_end for KASLR
- fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
- fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | LDT remap for PTI
- ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
__________________|____________|__________________|_________|____________________________________________________________
|
- | Identical layout to the 47-bit one from here on:
+ | Identical layout to the 56-bit one from here on:
____________________________________________________________|____________________________________________________________
| | | |
+ fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
+ | | | | vaddr_end for KASLR
+ fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
+ ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
@@ -83,7 +84,7 @@ Notes:
__________________|____________|__________________|_________|___________________________________________________________
| | | |
0000800000000000 | +64 PB | ffff7fffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical
- | | | | virtual memory addresses up to the -128 TB
+ | | | | virtual memory addresses up to the -64 PB
| | | | starting offset of kernel mappings.
__________________|____________|__________________|_________|___________________________________________________________
|
@@ -91,23 +92,24 @@ __________________|____________|__________________|_________|___________________
____________________________________________________________|___________________________________________________________
| | | |
ff00000000000000 | -64 PB | ff0fffffffffffff | 4 PB | ... guard hole, also reserved for hypervisor
- ff10000000000000 | -60 PB | ff8fffffffffffff | 32 PB | direct mapping of all physical memory (page_offset_base)
- ff90000000000000 | -28 PB | ff9fffffffffffff | 4 PB | LDT remap for PTI
+ ff10000000000000 | -60 PB | ff10ffffffffffff | 0.25 PB | LDT remap for PTI
+ ff11000000000000 | -59.75 PB | ff90ffffffffffff | 32 PB | direct mapping of all physical memory (page_offset_base)
+ ff91000000000000 | -27.75 PB | ff9fffffffffffff | 3.75 PB | ... unused hole
ffa0000000000000 | -24 PB | ffd1ffffffffffff | 12.5 PB | vmalloc/ioremap space (vmalloc_base)
ffd2000000000000 | -11.5 PB | ffd3ffffffffffff | 0.5 PB | ... unused hole
ffd4000000000000 | -11 PB | ffd5ffffffffffff | 0.5 PB | virtual memory map (vmemmap_base)
ffd6000000000000 | -10.5 PB | ffdeffffffffffff | 2.25 PB | ... unused hole
ffdf000000000000 | -8.25 PB | fffffdffffffffff | ~8 PB | KASAN shadow memory
- fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
- | | | | vaddr_end for KASLR
- fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
- fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
- ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
__________________|____________|__________________|_________|____________________________________________________________
|
| Identical layout to the 47-bit one from here on:
____________________________________________________________|____________________________________________________________
| | | |
+ fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
+ | | | | vaddr_end for KASLR
+ fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
+ ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
diff --git a/Documentation/x86/zero-page.txt b/Documentation/x86/zero-page.txt
index 97b7adbceda4..68aed077f7b6 100644
--- a/Documentation/x86/zero-page.txt
+++ b/Documentation/x86/zero-page.txt
@@ -25,7 +25,7 @@ Offset Proto Name Meaning
0C8/004 ALL ext_cmd_line_ptr cmd_line_ptr high 32bits
140/080 ALL edid_info Video mode setup (struct edid_info)
1C0/020 ALL efi_info EFI 32 information (struct efi_info)
-1E0/004 ALL alk_mem_k Alternative mem check, in KB
+1E0/004 ALL alt_mem_k Alternative mem check, in KB
1E4/004 ALL scratch Scratch field for the kernel setup code
1E8/001 ALL e820_entries Number of entries in e820_table (below)
1E9/001 ALL eddbuf_entries Number of entries in eddbuf (below)
diff --git a/MAINTAINERS b/MAINTAINERS
index f4855974f325..e12da05e2feb 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -180,6 +180,7 @@ F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER
M: Realtek linux nic maintainers <nic_swsd@realtek.com>
+M: Heiner Kallweit <hkallweit1@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/realtek/r8169.c
@@ -717,7 +718,7 @@ F: include/linux/mfd/altera-a10sr.h
F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
ALTERA TRIPLE SPEED ETHERNET DRIVER
-M: Vince Bridgers <vbridger@opensource.altera.com>
+M: Thor Thayer <thor.thayer@linux.intel.com>
L: netdev@vger.kernel.org
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
S: Maintained
@@ -3276,6 +3277,12 @@ F: include/uapi/linux/caif/
F: include/net/caif/
F: net/caif/
+CAKE QDISC
+M: Toke Høiland-Jørgensen <toke@toke.dk>
+L: cake@lists.bufferbloat.net (moderated for non-subscribers)
+S: Maintained
+F: net/sched/sch_cake.c
+
CALGARY x86-64 IOMMU
M: Muli Ben-Yehuda <mulix@mulix.org>
M: Jon Mason <jdmason@kudzu.us>
@@ -4655,6 +4662,13 @@ S: Maintained
F: drivers/gpu/drm/tinydrm/ili9225.c
F: Documentation/devicetree/bindings/display/ilitek,ili9225.txt
+DRM DRIVER FOR HX8357D PANELS
+M: Eric Anholt <eric@anholt.net>
+T: git git://anongit.freedesktop.org/drm/drm-misc
+S: Maintained
+F: drivers/gpu/drm/tinydrm/hx8357d.c
+F: Documentation/devicetree/bindings/display/himax,hx8357d.txt
+
DRM DRIVER FOR INTEL I810 VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/i810/
@@ -4696,6 +4710,12 @@ S: Supported
F: drivers/gpu/drm/nouveau/
F: include/uapi/drm/nouveau_drm.h
+DRM DRIVER FOR OLIMEX LCD-OLINUXINO PANELS
+M: Stefan Mavrodiev <stefan@olimex.com>
+S: Maintained
+F: drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+F: Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
+
DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
@@ -4761,10 +4781,8 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVER FOR VMWARE VIRTUAL GPU
M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
-M: Sinclair Yeh <syeh@vmware.com>
M: Thomas Hellstrom <thellstrom@vmware.com>
L: dri-devel@lists.freedesktop.org
-T: git git://people.freedesktop.org/~syeh/repos_linux
T: git git://people.freedesktop.org/~thomash/linux
S: Supported
F: drivers/gpu/drm/vmwgfx/
@@ -5528,6 +5546,7 @@ F: net/bridge/
ETHERNET PHY LIBRARY
M: Andrew Lunn <andrew@lunn.ch>
M: Florian Fainelli <f.fainelli@gmail.com>
+M: Heiner Kallweit <hkallweit1@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/sysfs-bus-mdio
@@ -6299,6 +6318,7 @@ F: tools/testing/selftests/gpio/
GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
+M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
L: linux-gpio@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
S: Maintained
@@ -6607,9 +6627,9 @@ F: arch/*/include/asm/suspend*.h
HID CORE LAYER
M: Jiri Kosina <jikos@kernel.org>
-R: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M: Benjamin Tissoires <benjamin.tissoires@redhat.com>
L: linux-input@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
S: Maintained
F: drivers/hid/
F: include/linux/hid*
@@ -6861,6 +6881,13 @@ L: linux-acpi@vger.kernel.org
S: Maintained
F: drivers/i2c/i2c-core-acpi.c
+I2C CONTROLLER DRIVER FOR NVIDIA GPU
+M: Ajay Gupta <ajayg@nvidia.com>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/i2c/busses/i2c-nvidia-gpu
+F: drivers/i2c/busses/i2c-nvidia-gpu.c
+
I2C MUXES
M: Peter Rosin <peda@axentia.se>
L: linux-i2c@vger.kernel.org
@@ -7429,6 +7456,20 @@ S: Maintained
F: Documentation/fb/intelfb.txt
F: drivers/video/fbdev/intelfb/
+INTEL GPIO DRIVERS
+M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+L: linux-gpio@vger.kernel.org
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
+F: drivers/gpio/gpio-ich.c
+F: drivers/gpio/gpio-intel-mid.c
+F: drivers/gpio/gpio-lynxpoint.c
+F: drivers/gpio/gpio-merrifield.c
+F: drivers/gpio/gpio-ml-ioh.c
+F: drivers/gpio/gpio-pch.c
+F: drivers/gpio/gpio-sch.c
+F: drivers/gpio/gpio-sodaville.c
+
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
M: Zhenyu Wang <zhenyuw@linux.intel.com>
M: Zhi Wang <zhi.a.wang@intel.com>
@@ -7439,12 +7480,6 @@ T: git https://github.com/intel/gvt-linux.git
S: Supported
F: drivers/gpu/drm/i915/gvt/
-INTEL PMIC GPIO DRIVER
-R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-S: Maintained
-F: drivers/gpio/gpio-*cove.c
-F: drivers/gpio/gpio-msic.c
-
INTEL HID EVENT DRIVER
M: Alex Hung <alex.hung@canonical.com>
L: platform-driver-x86@vger.kernel.org
@@ -7532,12 +7567,6 @@ W: https://01.org/linux-acpi
S: Supported
F: drivers/platform/x86/intel_menlow.c
-INTEL MERRIFIELD GPIO DRIVER
-M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-L: linux-gpio@vger.kernel.org
-S: Maintained
-F: drivers/gpio/gpio-merrifield.c
-
INTEL MIC DRIVERS (mic)
M: Sudeep Dutt <sudeep.dutt@intel.com>
M: Ashutosh Dixit <ashutosh.dixit@intel.com>
@@ -7570,6 +7599,13 @@ F: drivers/platform/x86/intel_punit_ipc.c
F: arch/x86/include/asm/intel_pmc_ipc.h
F: arch/x86/include/asm/intel_punit_ipc.h
+INTEL PMIC GPIO DRIVERS
+M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
+F: drivers/gpio/gpio-*cove.c
+F: drivers/gpio/gpio-msic.c
+
INTEL MULTIFUNCTION PMIC DEVICE DRIVERS
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
S: Maintained
@@ -8367,7 +8403,7 @@ F: drivers/media/dvb-frontends/lgdt3305.*
LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
M: Viresh Kumar <vireshk@kernel.org>
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: include/linux/pata_arasan_cf_data.h
F: drivers/ata/pata_arasan_cf.c
@@ -8384,7 +8420,7 @@ F: drivers/ata/ata_generic.c
LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: drivers/ata/pata_ftide010.c
F: drivers/ata/sata_gemini.c
@@ -8403,7 +8439,7 @@ F: include/linux/ahci_platform.h
LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER
M: Mikael Pettersson <mikpelinux@gmail.com>
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: drivers/ata/sata_promise.*
@@ -10784,6 +10820,14 @@ L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod.*
+OMAP I2C DRIVER
+M: Vignesh R <vigneshr@ti.com>
+L: linux-omap@vger.kernel.org
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-omap.txt
+F: drivers/i2c/busses/i2c-omap.c
+
OMAP IMAGING SUBSYSTEM (OMAP3 ISP and OMAP4 ISS)
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
@@ -10793,9 +10837,9 @@ F: drivers/media/platform/omap3isp/
F: drivers/staging/media/omap4iss/
OMAP MMC SUPPORT
-M: Jarkko Lavinen <jarkko.lavinen@nokia.com>
+M: Aaro Koskinen <aaro.koskinen@iki.fi>
L: linux-omap@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: drivers/mmc/host/omap.c
OMAP POWER MANAGEMENT SUPPORT
@@ -11730,6 +11774,7 @@ F: Documentation/devicetree/bindings/pinctrl/fsl,*
PIN CONTROLLER - INTEL
M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
S: Maintained
F: drivers/pinctrl/intel/
@@ -13962,11 +14007,10 @@ F: drivers/tty/serial/sunzilog.h
F: drivers/tty/vcc.c
SPARSE CHECKER
-M: "Christopher Li" <sparse@chrisli.org>
+M: "Luc Van Oostenryck" <luc.vanoostenryck@gmail.com>
L: linux-sparse@vger.kernel.org
W: https://sparse.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/devel/sparse/sparse.git
-T: git git://git.kernel.org/pub/scm/devel/sparse/chrisl/sparse.git
S: Maintained
F: include/linux/compiler.h
@@ -14063,6 +14107,7 @@ F: Documentation/devicetree/bindings/iio/proximity/vl53l0x.txt
STABLE BRANCH
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+M: Sasha Levin <sashal@kernel.org>
L: stable@vger.kernel.org
S: Supported
F: Documentation/process/stable-kernel-rules.rst
@@ -15436,9 +15481,9 @@ F: include/linux/usb/gadget*
USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
M: Jiri Kosina <jikos@kernel.org>
-R: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M: Benjamin Tissoires <benjamin.tissoires@redhat.com>
L: linux-usb@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
S: Maintained
F: Documentation/hid/hiddev.txt
F: drivers/hid/usbhid/
diff --git a/Makefile b/Makefile
index 9fce8b91c15f..0ce4e29ee342 100644
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
VERSION = 4
PATCHLEVEL = 20
SUBLEVEL = 0
-EXTRAVERSION = -rc1
-NAME = "People's Front"
+EXTRAVERSION = -rc4
+NAME = Shy Crocodile
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h
index 6a8c53dec57e..b7c77bb1bfd2 100644
--- a/arch/alpha/include/asm/termios.h
+++ b/arch/alpha/include/asm/termios.h
@@ -73,9 +73,15 @@
})
#define user_termios_to_kernel_termios(k, u) \
- copy_from_user(k, u, sizeof(struct termios))
+ copy_from_user(k, u, sizeof(struct termios2))
#define kernel_termios_to_user_termios(u, k) \
+ copy_to_user(u, k, sizeof(struct termios2))
+
+#define user_termios_to_kernel_termios_1(k, u) \
+ copy_from_user(k, u, sizeof(struct termios))
+
+#define kernel_termios_to_user_termios_1(u, k) \
copy_to_user(u, k, sizeof(struct termios))
#endif /* _ALPHA_TERMIOS_H */
diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h
index 1e9121c9b3c7..971311605288 100644
--- a/arch/alpha/include/uapi/asm/ioctls.h
+++ b/arch/alpha/include/uapi/asm/ioctls.h
@@ -32,6 +32,11 @@
#define TCXONC _IO('t', 30)
#define TCFLSH _IO('t', 31)
+#define TCGETS2 _IOR('T', 42, struct termios2)
+#define TCSETS2 _IOW('T', 43, struct termios2)
+#define TCSETSW2 _IOW('T', 44, struct termios2)
+#define TCSETSF2 _IOW('T', 45, struct termios2)
+
#define TIOCSWINSZ _IOW('t', 103, struct winsize)
#define TIOCGWINSZ _IOR('t', 104, struct winsize)
#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
diff --git a/arch/alpha/include/uapi/asm/termbits.h b/arch/alpha/include/uapi/asm/termbits.h
index de6c8360fbe3..4575ba34a0ea 100644
--- a/arch/alpha/include/uapi/asm/termbits.h
+++ b/arch/alpha/include/uapi/asm/termbits.h
@@ -26,6 +26,19 @@ struct termios {
speed_t c_ospeed; /* output speed */
};
+/* Alpha has identical termios and termios2 */
+
+struct termios2 {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_cc[NCCS]; /* control characters */
+ cc_t c_line; /* line discipline (== c_cc[19]) */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
/* Alpha has matching termios and ktermios */
struct ktermios {
@@ -152,6 +165,7 @@ struct ktermios {
#define B3000000 00034
#define B3500000 00035
#define B4000000 00036
+#define BOTHER 00037
#define CSIZE 00001400
#define CS5 00000000
@@ -169,6 +183,9 @@ struct ktermios {
#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
+#define CIBAUD 07600000
+#define IBSHIFT 16
+
/* c_lflag bits */
#define ISIG 0x00000080
#define ICANON 0x00000100
diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
index b560ff88459b..5ff9a179c83c 100644
--- a/arch/arm/boot/dts/imx53-ppd.dts
+++ b/arch/arm/boot/dts/imx53-ppd.dts
@@ -55,7 +55,7 @@
};
chosen {
- stdout-path = "&uart1:115200n8";
+ stdout-path = "serial0:115200n8";
};
memory@70000000 {
diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi
index ed9a980bce85..beefa1b2049d 100644
--- a/arch/arm/boot/dts/imx6sll.dtsi
+++ b/arch/arm/boot/dts/imx6sll.dtsi
@@ -740,7 +740,7 @@
i2c1: i2c@21a0000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fs,imx6sll-i2c", "fsl,imx21-i2c";
+ compatible = "fsl,imx6sll-i2c", "fsl,imx21-i2c";
reg = <0x021a0000 0x4000>;
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SLL_CLK_I2C1>;
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index 53b3408b5fab..7d7d679945d2 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -117,7 +117,9 @@
regulator-name = "enet_3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
+ gpio = <&gpio2 6 GPIO_ACTIVE_LOW>;
+ regulator-boot-on;
+ regulator-always-on;
};
reg_pcie_gpio: regulator-pcie-gpio {
@@ -180,6 +182,7 @@
phy-supply = <&reg_enet_3v3>;
phy-mode = "rgmii";
phy-handle = <&ethphy1>;
+ phy-reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
status = "okay";
mdio {
@@ -373,6 +376,8 @@
MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3 0x3081
MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN 0x3081
MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M 0x91
+ /* phy reset */
+ MX6SX_PAD_ENET2_CRS__GPIO2_IO_7 0x10b0
>;
};
diff --git a/arch/arm/boot/dts/vf610m4-colibri.dts b/arch/arm/boot/dts/vf610m4-colibri.dts
index 41ec66a96990..ca6249558760 100644
--- a/arch/arm/boot/dts/vf610m4-colibri.dts
+++ b/arch/arm/boot/dts/vf610m4-colibri.dts
@@ -50,8 +50,8 @@
compatible = "fsl,vf610m4";
chosen {
- bootargs = "console=ttyLP2,115200 clk_ignore_unused init=/linuxrc rw";
- stdout-path = "&uart2";
+ bootargs = "clk_ignore_unused init=/linuxrc rw";
+ stdout-path = "serial2:115200";
};
memory@8c000000 {
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 1c7616815a86..63af6234c1b6 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
CONFIG_CGROUPS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 0d289240b6ca..775cac3c02bb 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -111,6 +111,7 @@
#include <linux/kernel.h>
extern unsigned int processor_id;
+struct proc_info_list *lookup_processor(u32 midr);
#ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 92fd2c8a9af0..12659ce5c1f3 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -10,7 +10,7 @@
#ifndef _ASM_PGTABLE_2LEVEL_H
#define _ASM_PGTABLE_2LEVEL_H
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
/*
* Hardware-wise, we have a two level page table structure, where the first
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index e25f4392e1b2..e1b6f280ab08 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -23,7 +23,7 @@ struct mm_struct;
/*
* Don't change this structure - ASM code relies on it.
*/
-extern struct processor {
+struct processor {
/* MISC
* get data abort address/flags
*/
@@ -79,9 +79,13 @@ extern struct processor {
unsigned int suspend_size;
void (*do_suspend)(void *);
void (*do_resume)(void *);
-} processor;
+};
#ifndef MULTI_CPU
+static inline void init_proc_vtable(const struct processor *p)
+{
+}
+
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
extern void cpu_do_suspend(void *);
extern void cpu_do_resume(void *);
#else
-#define cpu_proc_init processor._proc_init
-#define cpu_proc_fin processor._proc_fin
-#define cpu_reset processor.reset
-#define cpu_do_idle processor._do_idle
-#define cpu_dcache_clean_area processor.dcache_clean_area
-#define cpu_set_pte_ext processor.set_pte_ext
-#define cpu_do_switch_mm processor.switch_mm
-/* These three are private to arch/arm/kernel/suspend.c */
-#define cpu_do_suspend processor.do_suspend
-#define cpu_do_resume processor.do_resume
+extern struct processor processor;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+#include <linux/smp.h>
+/*
+ * This can't be a per-cpu variable because we need to access it before
+ * per-cpu has been initialised. We have a couple of functions that are
+ * called in a pre-emptible context, and so can't use smp_processor_id()
+ * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
+ * function pointers for these are identical across all CPUs.
+ */
+extern struct processor *cpu_vtable[];
+#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
+#define PROC_TABLE(f) cpu_vtable[0]->f
+static inline void init_proc_vtable(const struct processor *p)
+{
+ unsigned int cpu = smp_processor_id();
+ *cpu_vtable[cpu] = *p;
+ WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
+ cpu_vtable[0]->dcache_clean_area);
+ WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
+ cpu_vtable[0]->set_pte_ext);
+}
+#else
+#define PROC_VTABLE(f) processor.f
+#define PROC_TABLE(f) processor.f
+static inline void init_proc_vtable(const struct processor *p)
+{
+ processor = *p;
+}
+#endif
+
+#define cpu_proc_init PROC_VTABLE(_proc_init)
+#define cpu_check_bugs PROC_VTABLE(check_bugs)
+#define cpu_proc_fin PROC_VTABLE(_proc_fin)
+#define cpu_reset PROC_VTABLE(reset)
+#define cpu_do_idle PROC_VTABLE(_do_idle)
+#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
+#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
+#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
+
+/* These two are private to arch/arm/kernel/suspend.c */
+#define cpu_do_suspend PROC_VTABLE(do_suspend)
+#define cpu_do_resume PROC_VTABLE(do_resume)
#endif
extern void cpu_resume(void);
diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
index 7be511310191..d41d3598e5e5 100644
--- a/arch/arm/kernel/bugs.c
+++ b/arch/arm/kernel/bugs.c
@@ -6,8 +6,8 @@
void check_other_bugs(void)
{
#ifdef MULTI_CPU
- if (processor.check_bugs)
- processor.check_bugs();
+ if (cpu_check_bugs)
+ cpu_check_bugs();
#endif
}
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 6e0375e7db05..997b02302c31 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -145,6 +145,9 @@ __mmap_switched_data:
#endif
.size __mmap_switched_data, . - __mmap_switched_data
+ __FINIT
+ .text
+
/*
* This provides a C-API version of __lookup_processor_type
*/
@@ -156,9 +159,6 @@ ENTRY(lookup_processor_type)
ldmfd sp!, {r4 - r6, r9, pc}
ENDPROC(lookup_processor_type)
- __FINIT
- .text
-
/*
* Read processor ID register (CP#15, CR0), and look up in the linker-built
* supported processor list. Note that we can't use the absolute addresses
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index ac7e08886863..375b13f7e780 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -114,6 +114,11 @@ EXPORT_SYMBOL(elf_hwcap2);
#ifdef MULTI_CPU
struct processor processor __ro_after_init;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+struct processor *cpu_vtable[NR_CPUS] = {
+ [0] = &processor,
+};
+#endif
#endif
#ifdef MULTI_TLB
struct cpu_tlb_fns cpu_tlb __ro_after_init;
@@ -666,28 +671,33 @@ static void __init smp_build_mpidr_hash(void)
}
#endif
-static void __init setup_processor(void)
+/*
+ * locate processor in the list of supported processor types. The linker
+ * builds this table for us from the entries in arch/arm/mm/proc-*.S
+ */
+struct proc_info_list *lookup_processor(u32 midr)
{
- struct proc_info_list *list;
+ struct proc_info_list *list = lookup_processor_type(midr);
- /*
- * locate processor in the list of supported processor
- * types. The linker builds this table for us from the
- * entries in arch/arm/mm/proc-*.S
- */
- list = lookup_processor_type(read_cpuid_id());
if (!list) {
- pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
- read_cpuid_id());
- while (1);
+ pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
+ smp_processor_id(), midr);
+ while (1)
+ /* can't use cpu_relax() here as it may require MMU setup */;
}
+ return list;
+}
+
+static void __init setup_processor(void)
+{
+ unsigned int midr = read_cpuid_id();
+ struct proc_info_list *list = lookup_processor(midr);
+
cpu_name = list->cpu_name;
__cpu_architecture = __get_cpu_architecture();
-#ifdef MULTI_CPU
- processor = *list->proc;
-#endif
+ init_proc_vtable(list->proc);
#ifdef MULTI_TLB
cpu_tlb = *list->tlb;
#endif
@@ -699,7 +709,7 @@ static void __init setup_processor(void)
#endif
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
- cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
+ list->cpu_name, midr, midr & 15,
proc_arch[cpu_architecture()], get_cr());
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 0978282d5fc2..12a6172263c0 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -42,6 +42,7 @@
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/procinfo.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
#endif
}
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+ if (!cpu_vtable[cpu])
+ cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
+
+ return cpu_vtable[cpu] ? 0 : -ENOMEM;
+}
+
+static void secondary_biglittle_init(void)
+{
+ init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
+}
+#else
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+ return 0;
+}
+
+static void secondary_biglittle_init(void)
+{
+}
+#endif
+
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
int ret;
@@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
if (!smp_ops.smp_boot_secondary)
return -ENOSYS;
+ ret = secondary_biglittle_prepare(cpu);
+ if (ret)
+ return ret;
+
/*
* We need to tell the secondary core where to find
* its stack and the page tables.
@@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
struct mm_struct *mm = &init_mm;
unsigned int cpu;
+ secondary_biglittle_init();
+
/*
* The identity mapping is uncached (strongly ordered), so
* switch away from it before attempting any exclusive accesses.
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 9500b6e27380..f86b72d1d59e 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -209,11 +209,61 @@ static int __init omapdss_init_fbdev(void)
return 0;
}
-#else
-static inline int omapdss_init_fbdev(void)
+
+static const char * const omapdss_compat_names[] __initconst = {
+ "ti,omap2-dss",
+ "ti,omap3-dss",
+ "ti,omap4-dss",
+ "ti,omap5-dss",
+ "ti,dra7-dss",
+};
+
+static struct device_node * __init omapdss_find_dss_of_node(void)
{
- return 0;
+ struct device_node *node;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
+ node = of_find_compatible_node(NULL, NULL,
+ omapdss_compat_names[i]);
+ if (node)
+ return node;
+ }
+
+ return NULL;
}
+
+static int __init omapdss_init_of(void)
+{
+ int r;
+ struct device_node *node;
+ struct platform_device *pdev;
+
+ /* only create dss helper devices if dss is enabled in the .dts */
+
+ node = omapdss_find_dss_of_node();
+ if (!node)
+ return 0;
+
+ if (!of_device_is_available(node))
+ return 0;
+
+ pdev = of_find_device_by_node(node);
+
+ if (!pdev) {
+ pr_err("Unable to find DSS platform device\n");
+ return -ENODEV;
+ }
+
+ r = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (r) {
+ pr_err("Unable to populate DSS submodule devices\n");
+ return r;
+ }
+
+ return omapdss_init_fbdev();
+}
+omap_device_initcall(omapdss_init_of);
#endif /* CONFIG_FB_OMAP2 */
static void dispc_disable_outputs(void)
@@ -361,58 +411,3 @@ int omap_dss_reset(struct omap_hwmod *oh)
return r;
}
-
-static const char * const omapdss_compat_names[] __initconst = {
- "ti,omap2-dss",
- "ti,omap3-dss",
- "ti,omap4-dss",
- "ti,omap5-dss",
- "ti,dra7-dss",
-};
-
-static struct device_node * __init omapdss_find_dss_of_node(void)
-{
- struct device_node *node;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
- node = of_find_compatible_node(NULL, NULL,
- omapdss_compat_names[i]);
- if (node)
- return node;
- }
-
- return NULL;
-}
-
-static int __init omapdss_init_of(void)
-{
- int r;
- struct device_node *node;
- struct platform_device *pdev;
-
- /* only create dss helper devices if dss is enabled in the .dts */
-
- node = omapdss_find_dss_of_node();
- if (!node)
- return 0;
-
- if (!of_device_is_available(node))
- return 0;
-
- pdev = of_find_device_by_node(node);
-
- if (!pdev) {
- pr_err("Unable to find DSS platform device\n");
- return -ENODEV;
- }
-
- r = of_platform_populate(node, NULL, NULL, &pdev->dev);
- if (r) {
- pr_err("Unable to populate DSS submodule devices\n");
- return r;
- }
-
- return omapdss_init_fbdev();
-}
-omap_device_initcall(omapdss_init_of);
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
index 5544b82a2e7a..9a07916af8dd 100644
--- a/arch/arm/mm/proc-v7-bugs.c
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
case ARM_CPU_PART_CORTEX_A17:
case ARM_CPU_PART_CORTEX_A73:
case ARM_CPU_PART_CORTEX_A75:
- if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
- goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_bpiall;
spectre_v2_method = "BPIALL";
@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_BRAHMA_B15:
- if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
- goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_iciallu;
spectre_v2_method = "ICIALLU";
@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0)
break;
- if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
- goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
call_hvc_arch_workaround_1;
- processor.switch_mm = cpu_v7_hvc_switch_mm;
+ cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
spectre_v2_method = "hypervisor";
break;
@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0)
break;
- if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
- goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
call_smc_arch_workaround_1;
- processor.switch_mm = cpu_v7_smc_switch_mm;
+ cpu_do_switch_mm = cpu_v7_smc_switch_mm;
spectre_v2_method = "firmware";
break;
@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
if (spectre_v2_method)
pr_info("CPU%u: Spectre v2: using %s workaround\n",
smp_processor_id(), spectre_v2_method);
- return;
-
-bl_error:
- pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
- cpu);
}
#else
static void cpu_v7_spectre_init(void)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 6fe52819e014..339eb17c9808 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_hvc_switch_mm)
hvc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
-ENDPROC(cpu_v7_smc_switch_mm)
+ENDPROC(cpu_v7_hvc_switch_mm)
#endif
ENTRY(cpu_v7_iciallu_switch_mm)
mov r3, #0
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index aff6e6eadc70..ee7b07938dd5 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -573,7 +573,7 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
*/
ufp_exc->fpexc = hwstate->fpexc;
ufp_exc->fpinst = hwstate->fpinst;
- ufp_exc->fpinst2 = ufp_exc->fpinst2;
+ ufp_exc->fpinst2 = hwstate->fpinst2;
/* Ensure that VFP is disabled. */
vfp_flush_hwstate(thread);
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 8253a1a9e985..fef7351e9f67 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -139,6 +139,7 @@
clock-names = "stmmaceth";
tx-fifo-depth = <16384>;
rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
status = "disabled";
};
@@ -154,6 +155,7 @@
clock-names = "stmmaceth";
tx-fifo-depth = <16384>;
rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
status = "disabled";
};
@@ -169,6 +171,7 @@
clock-names = "stmmaceth";
tx-fifo-depth = <16384>;
rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index b5f2273caca4..a79c8d369e0b 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -652,7 +652,7 @@
clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac1 0x35>, <&dmac1 0x34>,
<&dmac2 0x35>, <&dmac2 0x34>;
- dma-names = "tx", "rx";
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
resets = <&cpg 518>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
index fe2e2c051cc9..5a7012be0d6a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
@@ -15,7 +15,7 @@
aliases {
serial0 = &scif0;
- ethernet0 = &avb;
+ ethernet0 = &gether;
};
chosen {
@@ -97,23 +97,6 @@
};
};
-&avb {
- pinctrl-0 = <&avb_pins>;
- pinctrl-names = "default";
-
- phy-mode = "rgmii-id";
- phy-handle = <&phy0>;
- renesas,no-ether-link;
- status = "okay";
-
- phy0: ethernet-phy@0 {
- rxc-skew-ps = <1500>;
- reg = <0>;
- interrupt-parent = <&gpio1>;
- interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
- };
-};
-
&canfd {
pinctrl-0 = <&canfd0_pins>;
pinctrl-names = "default";
@@ -139,6 +122,23 @@
clock-frequency = <32768>;
};
+&gether {
+ pinctrl-0 = <&gether_pins>;
+ pinctrl-names = "default";
+
+ phy-mode = "rgmii-id";
+ phy-handle = <&phy0>;
+ renesas,no-ether-link;
+ status = "okay";
+
+ phy0: ethernet-phy@0 {
+ rxc-skew-ps = <1500>;
+ reg = <0>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
&i2c0 {
pinctrl-0 = <&i2c0_pins>;
pinctrl-names = "default";
@@ -236,16 +236,17 @@
};
&pfc {
- avb_pins: avb {
- groups = "avb_mdio", "avb_rgmii";
- function = "avb";
- };
-
canfd0_pins: canfd0 {
groups = "canfd0_data_a";
function = "canfd0";
};
+ gether_pins: gether {
+ groups = "gether_mdio_a", "gether_rgmii",
+ "gether_txcrefclk", "gether_txcrefclk_mega";
+ function = "gether";
+ };
+
i2c0_pins: i2c0 {
groups = "i2c0";
function = "i2c0";
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 3e2091708b8e..6b0d4dff5012 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -24,6 +24,14 @@
#define KERNEL_DS UL(-1)
#define USER_DS (TASK_SIZE_64 - 1)
+/*
+ * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
+ * no point in shifting all network buffers by 2 bytes just to make some IP
+ * header fields appear aligned in memory, potentially sacrificing some DMA
+ * performance on some platforms.
+ */
+#define NET_IP_ALIGN 0
+
#ifndef __ASSEMBLY__
#ifdef __KERNEL__
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 0c909c4a932f..842fb9572661 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -468,7 +468,7 @@
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
-#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
+#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL
#error "Inconsistent SCTLR_EL2 set/clear bits"
#endif
@@ -509,7 +509,7 @@
SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0)
-#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
+#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL
#error "Inconsistent SCTLR_EL1 set/clear bits"
#endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index af50064dea51..aec5ecb85737 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1333,7 +1333,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_hw_dbm,
},
#endif
-#ifdef CONFIG_ARM64_SSBD
{
.desc = "CRC32 instructions",
.capability = ARM64_HAS_CRC32,
@@ -1343,6 +1342,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64ISAR0_CRC32_SHIFT,
.min_field_value = 1,
},
+#ifdef CONFIG_ARM64_SSBD
{
.desc = "Speculative Store Bypassing Safe (SSBS)",
.capability = ARM64_SSBS,
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 953e316521fc..f4fc1e0544b7 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -313,6 +313,7 @@ void __init setup_arch(char **cmdline_p)
arm64_memblock_init();
paging_init();
+ efi_apply_persistent_mem_reservations();
acpi_table_upgrade();
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 9d9582cac6c4..9b432d9fcada 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -483,8 +483,6 @@ void __init arm64_memblock_init(void)
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
dma_contiguous_reserve(arm64_dma_phys_limit);
-
- memblock_allow_resize();
}
void __init bootmem_init(void)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 394b8d554def..d1d6601b385d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -659,6 +659,8 @@ void __init paging_init(void)
memblock_free(__pa_symbol(init_pg_dir),
__pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
+
+ memblock_allow_resize();
}
/*
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index 6181e4134483..fe3ddd73a0cc 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -55,12 +55,12 @@
*/
#ifdef CONFIG_SUN3
#define PTRS_PER_PTE 16
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 2048
#elif defined(CONFIG_COLDFIRE)
#define PTRS_PER_PTE 512
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 1024
#else
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index f64ebb9c9a41..e14b6621c933 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -63,7 +63,7 @@ extern int mem_init_done;
#include <asm-generic/4level-fixup.h>
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 75108ec669eb..6c79e8a16a26 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -67,7 +67,7 @@ void (*cvmx_override_pko_queue_priority) (int pko_port,
void (*cvmx_override_ipd_port_setup) (int ipd_port);
/* Port count per interface */
-static int interface_port_count[5];
+static int interface_port_count[9];
/**
* Return the number of interfaces the chip has. Each interface
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index 490b12af103c..c52d0efacd14 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -140,6 +140,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_STAGING=y
CONFIG_OCTEON_ETHERNET=y
+CONFIG_OCTEON_USB=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_RAS=y
CONFIG_EXT4_FS=y
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index ea09ed6a80a9..8c6c48ed786a 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -794,6 +794,7 @@ static void __init arch_mem_init(char **cmdline_p)
/* call board setup routine */
plat_mem_setup();
+ memblock_set_bottom_up(true);
/*
* Make sure all kernel memory is in the maps. The "UP" and
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 0f852e1b5891..15e103c6d799 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2260,10 +2260,8 @@ void __init trap_init(void)
unsigned long size = 0x200 + VECTORSPACING*64;
phys_addr_t ebase_pa;
- memblock_set_bottom_up(true);
ebase = (unsigned long)
memblock_alloc_from(size, 1 << fls(size), 0);
- memblock_set_bottom_up(false);
/*
* Try to ensure ebase resides in KSeg0 if possible.
@@ -2307,6 +2305,7 @@ void __init trap_init(void)
if (board_ebase_setup)
board_ebase_setup();
per_cpu_trap_init(true);
+ memblock_set_bottom_up(false);
/*
* Copy the generic exception handlers to their final destination.
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
index 622761878cd1..60bf0a1cb757 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/loongson-3/numa.c
@@ -231,6 +231,8 @@ static __init void prom_meminit(void)
cpumask_clear(&__node_data[(node)]->cpumask);
}
}
+ max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+
for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
node = cpu / loongson_sysconf.cores_per_node;
if (node >= num_online_nodes())
@@ -248,19 +250,9 @@ static __init void prom_meminit(void)
void __init paging_init(void)
{
- unsigned node;
unsigned long zones_size[MAX_NR_ZONES] = {0, };
pagetable_init();
-
- for_each_online_node(node) {
- unsigned long start_pfn, end_pfn;
-
- get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
-
- if (end_pfn > max_low_pfn)
- max_low_pfn = end_pfn;
- }
#ifdef CONFIG_ZONE_DMA32
zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index e6c9485cadcf..cb38461391cb 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -50,7 +50,7 @@ void *arch_dma_alloc(struct device *dev, size_t size,
void *ret;
ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
- if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
+ if (ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
dma_cache_wback_inv((unsigned long) ret, size);
ret = (void *)UNCAC_ADDR(ret);
}
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index d8b8444d6795..813d13f92957 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -435,6 +435,7 @@ void __init prom_meminit(void)
mlreset();
szmem();
+ max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
for (node = 0; node < MAX_COMPACT_NODES; node++) {
if (node_online(node)) {
@@ -455,18 +456,8 @@ extern void setup_zero_pages(void);
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0, };
- unsigned node;
pagetable_init();
-
- for_each_online_node(node) {
- unsigned long start_pfn, end_pfn;
-
- get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
-
- if (end_pfn > max_low_pfn)
- max_low_pfn = end_pfn;
- }
zones_size[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(zones_size);
}
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index d3e19a55cf53..9f52db930c00 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -4,7 +4,7 @@
#ifndef _ASMNDS32_PGTABLE_H
#define _ASMNDS32_PGTABLE_H
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#include <asm-generic/4level-fixup.h>
#include <asm-generic/sizes.h>
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index b941ac7d4e70..c7bb74e22436 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -111,7 +111,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#if CONFIG_PGTABLE_LEVELS == 3
#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
#else
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#define BITS_PER_PMD 0
#endif
#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 16aec9ba2580..8a63515f03bf 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
volatile unsigned int *a;
a = __ldcw_align(x);
- /* Release with ordered store. */
- __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
+ mb();
+ *a = 1;
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 9505c317818d..a9bc90dc4ae7 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -640,7 +640,8 @@ cas_action:
sub,<> %r28, %r25, %r0
2: stw %r24, 0(%r26)
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
@@ -654,7 +655,8 @@ cas_action:
3:
/* Error occurred on load or store */
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
#endif
@@ -855,7 +857,8 @@ cas2_action:
cas2_end:
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
@@ -865,7 +868,8 @@ cas2_end:
22:
/* Error occurred on load or store */
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28
b lws_exit
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 3ef40b703c4a..e746becd9d6f 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -268,19 +268,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
* their hooks, a bitfield is reserved for use by the platform near the
* top of MMIO addresses (not PIO, those have to cope the hard way).
*
- * This bit field is 12 bits and is at the top of the IO virtual
- * addresses PCI_IO_INDIRECT_TOKEN_MASK.
+ * The highest address in the kernel virtual space are:
*
- * The kernel virtual space is thus:
+ * d0003fffffffffff # with Hash MMU
+ * c00fffffffffffff # with Radix MMU
*
- * 0xD000000000000000 : vmalloc
- * 0xD000080000000000 : PCI PHB IO space
- * 0xD000080080000000 : ioremap
- * 0xD0000fffffffffff : end of ioremap region
- *
- * Since the top 4 bits are reserved as the region ID, we use thus
- * the next 12 bits and keep 4 bits available for the future if the
- * virtual address space is ever to be extended.
+ * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
+ * that can be used for the field.
*
* The direct IO mapping operations will then mask off those bits
* before doing the actual access, though that only happen when
@@ -292,8 +286,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
*/
#ifdef CONFIG_PPC_INDIRECT_MMIO
-#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul
-#define PCI_IO_IND_TOKEN_SHIFT 48
+#define PCI_IO_IND_TOKEN_SHIFT 52
+#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT)
#define PCI_FIX_ADDR(addr) \
((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
#define PCI_GET_ADDR_TOKEN(addr) \
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 6093bc8f74e5..a6e9e314c707 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -493,6 +493,8 @@
__PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
__PPC_RT(t) | __PPC_RB(b))
+#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
+ ___PPC_RT(t) | ___PPC_RB(b))
#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \
__PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
/* PASemi instructions */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index f73886a1a7f5..0b8a735b6d85 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -54,6 +54,7 @@ struct pt_regs
#ifdef CONFIG_PPC64
unsigned long ppr;
+ unsigned long __pad; /* Maintain 16 byte interrupt stack alignment */
#endif
};
#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 2a51e4cc8246..236c1151a3a7 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -636,6 +636,8 @@ static void *__init alloc_stack(unsigned long limit, int cpu)
{
unsigned long pa;
+ BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
+
pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
early_cpu_to_node(cpu), MEMBLOCK_NONE);
if (!pa) {
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 491b0f715d6b..ea1d7c808319 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -6,8 +6,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
/*
* Tracepoint for guest mode entry.
@@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests,
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
index ac640e81fdc5..3837842986aa 100644
--- a/arch/powerpc/kvm/trace_booke.h
+++ b/arch/powerpc/kvm/trace_booke.h
@@ -6,8 +6,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_booke
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_booke
#define kvm_trace_symbol_exit \
{0, "CRITICAL"}, \
@@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
#endif
/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_booke
+
#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index bcfe8a987f6a..8a1e3b0047f1 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -9,8 +9,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_hv
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_hv
#define kvm_trace_symbol_hcall \
{H_REMOVE, "H_REMOVE"}, \
@@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
#endif /* _TRACE_KVM_HV_H */
/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_hv
+
#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
index 2f9a8829552b..46a46d328fbf 100644
--- a/arch/powerpc/kvm/trace_pr.h
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -8,8 +8,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_pr
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_pr
TRACE_EVENT(kvm_book3s_reenter,
TP_PROTO(int r, struct kvm_vcpu *vcpu),
@@ -257,4 +255,11 @@ TRACE_EVENT(kvm_exit,
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_pr
+
#include <trace/define_trace.h>
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 3a048e98a132..ce28ae5ca080 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1178,7 +1178,7 @@ static long vphn_get_associativity(unsigned long cpu,
switch (rc) {
case H_FUNCTION:
- printk(KERN_INFO
+ printk_once(KERN_INFO
"VPHN is not supported. Disabling polling...\n");
stop_topology_update();
break;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index c3fdf2969d9f..bc3914d54e26 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -19,6 +19,7 @@
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
+#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <asm/smp.h>
@@ -58,27 +59,19 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
}
-static void assert_slb_exists(unsigned long ea)
+static void assert_slb_presence(bool present, unsigned long ea)
{
#ifdef CONFIG_DEBUG_VM
unsigned long tmp;
WARN_ON_ONCE(mfmsr() & MSR_EE);
- asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
- WARN_ON(tmp == 0);
-#endif
-}
-
-static void assert_slb_notexists(unsigned long ea)
-{
-#ifdef CONFIG_DEBUG_VM
- unsigned long tmp;
+ if (!cpu_has_feature(CPU_FTR_ARCH_206))
+ return;
- WARN_ON_ONCE(mfmsr() & MSR_EE);
+ asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
- asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
- WARN_ON(tmp != 0);
+ WARN_ON(present == (tmp == 0));
#endif
}
@@ -114,7 +107,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
*/
slb_shadow_update(ea, ssize, flags, index);
- assert_slb_notexists(ea);
+ assert_slb_presence(false, ea);
asm volatile("slbmte %0,%1" :
: "r" (mk_vsid_data(ea, ssize, flags)),
"r" (mk_esid_data(ea, ssize, index))
@@ -137,7 +130,7 @@ void __slb_restore_bolted_realmode(void)
"r" (be64_to_cpu(p->save_area[index].esid)));
}
- assert_slb_exists(local_paca->kstack);
+ assert_slb_presence(true, local_paca->kstack);
}
/*
@@ -185,7 +178,7 @@ void slb_flush_and_restore_bolted(void)
:: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
"r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
: "memory");
- assert_slb_exists(get_paca()->kstack);
+ assert_slb_presence(true, get_paca()->kstack);
get_paca()->slb_cache_ptr = 0;
@@ -443,9 +436,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
ea = (unsigned long)
get_paca()->slb_cache[i] << SID_SHIFT;
/*
- * Could assert_slb_exists here, but hypervisor
- * or machine check could have come in and
- * removed the entry at this point.
+ * Could assert_slb_presence(true) here, but
+ * hypervisor or machine check could have come
+ * in and removed the entry at this point.
*/
slbie_data = ea;
@@ -676,7 +669,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
* User preloads should add isync afterwards in case the kernel
* accesses user memory before it returns to userspace with rfid.
*/
- assert_slb_notexists(ea);
+ assert_slb_presence(false, ea);
asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
barrier();
@@ -715,7 +708,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
return -EFAULT;
if (ea < H_VMALLOC_END)
- flags = get_paca()->vmalloc_sllp;
+ flags = local_paca->vmalloc_sllp;
else
flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
} else {
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 6f60e0931922..75b935252981 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -102,63 +102,6 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
}
EXPORT_SYMBOL(pnv_pci_get_npu_dev);
-#define NPU_DMA_OP_UNSUPPORTED() \
- dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
- __func__)
-
-static void *dma_npu_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- unsigned long attrs)
-{
- NPU_DMA_OP_UNSUPPORTED();
- return NULL;
-}
-
-static void dma_npu_free(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- NPU_DMA_OP_UNSUPPORTED();
-}
-
-static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- NPU_DMA_OP_UNSUPPORTED();
- return 0;
-}
-
-static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction,
- unsigned long attrs)
-{
- NPU_DMA_OP_UNSUPPORTED();
- return 0;
-}
-
-static int dma_npu_dma_supported(struct device *dev, u64 mask)
-{
- NPU_DMA_OP_UNSUPPORTED();
- return 0;
-}
-
-static u64 dma_npu_get_required_mask(struct device *dev)
-{
- NPU_DMA_OP_UNSUPPORTED();
- return 0;
-}
-
-static const struct dma_map_ops dma_npu_ops = {
- .map_page = dma_npu_map_page,
- .map_sg = dma_npu_map_sg,
- .alloc = dma_npu_alloc,
- .free = dma_npu_free,
- .dma_supported = dma_npu_dma_supported,
- .get_required_mask = dma_npu_get_required_mask,
-};
-
/*
* Returns the PE assoicated with the PCI device of the given
* NPU. Returns the linked pci device if pci_dev != NULL.
@@ -270,10 +213,11 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
/*
- * We don't initialise npu_pe->tce32_table as we always use
- * dma_npu_ops which are nops.
+ * NVLink devices use the same TCE table configuration as
+ * their parent device so drivers shouldn't be doing DMA
+ * operations directly on these devices.
*/
- set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
+ set_dma_ops(&npe->pdev->dev, NULL);
}
/*
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index d10146197533..4b594f2e4f7e 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -71,10 +71,27 @@ KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
# arch specific predefines for sparse
CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
+# Default target when executing plain make
+boot := arch/riscv/boot
+KBUILD_IMAGE := $(boot)/Image.gz
+
head-y := arch/riscv/kernel/head.o
core-y += arch/riscv/kernel/ arch/riscv/mm/
libs-y += arch/riscv/lib/
-all: vmlinux
+PHONY += vdso_install
+vdso_install:
+ $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
+
+all: Image.gz
+
+Image: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+Image.%: Image
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+zinstall install:
+ $(Q)$(MAKE) $(build)=$(boot) $@
diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore
new file mode 100644
index 000000000000..8dab0bb6ae66
--- /dev/null
+++ b/arch/riscv/boot/.gitignore
@@ -0,0 +1,2 @@
+Image
+Image.gz
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
new file mode 100644
index 000000000000..0990a9fdbe5d
--- /dev/null
+++ b/arch/riscv/boot/Makefile
@@ -0,0 +1,33 @@
+#
+# arch/riscv/boot/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2018, Anup Patel.
+# Author: Anup Patel <anup@brainfault.org>
+#
+# Based on the ia64 and arm64 boot/Makefile.
+#
+
+OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+targets := Image
+
+$(obj)/Image: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/Image.gz: $(obj)/Image FORCE
+ $(call if_changed,gzip)
+
+install:
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+ $(obj)/Image System.map "$(INSTALL_PATH)"
+
+zinstall:
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+ $(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/riscv/boot/install.sh b/arch/riscv/boot/install.sh
new file mode 100644
index 000000000000..18c39159c0ff
--- /dev/null
+++ b/arch/riscv/boot/install.sh
@@ -0,0 +1,60 @@
+#!/bin/sh
+#
+# arch/riscv/boot/install.sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+# Adapted from code in arch/i386/boot/install.sh by Russell King
+#
+# "make install" script for the RISC-V Linux port
+#
+# Arguments:
+# $1 - kernel version
+# $2 - kernel image file
+# $3 - kernel map file
+# $4 - default install path (blank if root directory)
+#
+
+verify () {
+ if [ ! -f "$1" ]; then
+ echo "" 1>&2
+ echo " *** Missing file: $1" 1>&2
+ echo ' *** You need to run "make" before "make install".' 1>&2
+ echo "" 1>&2
+ exit 1
+ fi
+}
+
+# Make sure the files actually exist
+verify "$2"
+verify "$3"
+
+# User may have a custom install script
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+
+if [ "$(basename $2)" = "Image.gz" ]; then
+# Compressed install
+ echo "Installing compressed kernel"
+ base=vmlinuz
+else
+# Normal install
+ echo "Installing normal kernel"
+ base=vmlinux
+fi
+
+if [ -f $4/$base-$1 ]; then
+ mv $4/$base-$1 $4/$base-$1.old
+fi
+cat $2 > $4/$base-$1
+
+# Install system map file
+if [ -f $4/System.map-$1 ]; then
+ mv $4/System.map-$1 $4/System.map-$1.old
+fi
+cp $3 $4/System.map-$1
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 07fa9ea75fea..ef4f15df9adf 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -76,4 +76,5 @@ CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_PRINTK_TIME=y
# CONFIG_RCU_TRACE is not set
diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h
index 349df33808c4..cd2af4b013e3 100644
--- a/arch/riscv/include/asm/module.h
+++ b/arch/riscv/include/asm/module.h
@@ -8,6 +8,7 @@
#define MODULE_ARCH_VERMAGIC "riscv"
+struct module;
u64 module_emit_got_entry(struct module *mod, u64 val);
u64 module_emit_plt_entry(struct module *mod, u64 val);
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index 2c5df945d43c..bbe1862e8f80 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -56,8 +56,8 @@ struct pt_regs {
unsigned long sstatus;
unsigned long sbadaddr;
unsigned long scause;
- /* a0 value before the syscall */
- unsigned long orig_a0;
+ /* a0 value before the syscall */
+ unsigned long orig_a0;
};
#ifdef CONFIG_64BIT
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 473cfc84e412..8c3e3e3c8be1 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -400,13 +400,13 @@ extern unsigned long __must_check __asm_copy_from_user(void *to,
static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- return __asm_copy_to_user(to, from, n);
+ return __asm_copy_from_user(to, from, n);
}
static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- return __asm_copy_from_user(to, from, n);
+ return __asm_copy_to_user(to, from, n);
}
extern long strncpy_from_user(char *dest, const char __user *src, long count);
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index eff7aa9aa163..fef96f117b4d 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -13,10 +13,9 @@
/*
* There is explicitly no include guard here because this file is expected to
- * be included multiple times. See uapi/asm/syscalls.h for more info.
+ * be included multiple times.
*/
-#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SYS_CLONE
+
#include <uapi/asm/unistd.h>
-#include <uapi/asm/syscalls.h>
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/unistd.h
index 206dc4b0f6ea..1f3bd3ebbb0d 100644
--- a/arch/riscv/include/uapi/asm/syscalls.h
+++ b/arch/riscv/include/uapi/asm/unistd.h
@@ -1,13 +1,25 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * Copyright (C) 2017-2018 SiFive
+ * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-/*
- * There is explicitly no include guard here because this file is expected to
- * be included multiple times in order to define the syscall macros via
- * __SYSCALL.
- */
+#ifdef __LP64__
+#define __ARCH_WANT_NEW_STAT
+#endif /* __LP64__ */
+
+#include <asm-generic/unistd.h>
/*
* Allows the instruction cache to be flushed from userspace. Despite RISC-V
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 3a5a2ee31547..b4a7d4427fbb 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -64,7 +64,7 @@ int riscv_of_processor_hartid(struct device_node *node)
static void print_isa(struct seq_file *f, const char *orig_isa)
{
- static const char *ext = "mafdc";
+ static const char *ext = "mafdcsu";
const char *isa = orig_isa;
const char *e;
@@ -88,11 +88,14 @@ static void print_isa(struct seq_file *f, const char *orig_isa)
/*
* Check the rest of the ISA string for valid extensions, printing those
* we find. RISC-V ISA strings define an order, so we only print the
- * extension bits when they're in order.
+ * extension bits when they're in order. Hide the supervisor (S)
+ * extension from userspace as it's not accessible from there.
*/
for (e = ext; *e != '\0'; ++e) {
if (isa[0] == e[0]) {
- seq_write(f, isa, 1);
+ if (isa[0] != 's')
+ seq_write(f, isa, 1);
+
isa++;
}
}
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 711190d473d4..fe884cd69abd 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -44,6 +44,16 @@ ENTRY(_start)
amoadd.w a3, a2, (a3)
bnez a3, .Lsecondary_start
+ /* Clear BSS for flat non-ELF images */
+ la a3, __bss_start
+ la a4, __bss_stop
+ ble a4, a3, clear_bss_done
+clear_bss:
+ REG_S zero, (a3)
+ add a3, a3, RISCV_SZPTR
+ blt a3, a4, clear_bss
+clear_bss_done:
+
/* Save hart ID and DTB physical address */
mv s0, a0
mv s1, a1
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 3303ed2cd419..7dd308129b40 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -21,7 +21,7 @@ static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
{
if (v != (u32)v) {
pr_err("%s: value %016llx out of range for 32-bit field\n",
- me->name, v);
+ me->name, (long long)v);
return -EINVAL;
}
*location = v;
@@ -102,7 +102,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
if (offset != (s32)offset) {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
- me->name, v, location);
+ me->name, (long long)v, location);
return -EINVAL;
}
@@ -144,7 +144,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
if (IS_ENABLED(CMODEL_MEDLOW)) {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
- me->name, v, location);
+ me->name, (long long)v, location);
return -EINVAL;
}
@@ -188,7 +188,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
} else {
pr_err(
"%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
- me->name, v, location);
+ me->name, (long long)v, location);
return -EINVAL;
}
@@ -212,7 +212,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
} else {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
- me->name, v, location);
+ me->name, (long long)v, location);
return -EINVAL;
}
}
@@ -234,7 +234,7 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location,
if (offset != fill_v) {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
- me->name, v, location);
+ me->name, (long long)v, location);
return -EINVAL;
}
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index ece84991609c..65df1dfdc303 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -74,7 +74,7 @@ SECTIONS
*(.sbss*)
}
- BSS_SECTION(0, 0, 0)
+ BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
EXCEPTION_TABLE(0x10)
NOTES
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 5739bd05d289..4e2e600f7d53 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -3,6 +3,6 @@ lib-y += memcpy.o
lib-y += memset.o
lib-y += uaccess.o
-lib-(CONFIG_64BIT) += tishift.o
+lib-$(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_32BIT) += udivdi3.o
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 0b33577932c3..e21053e5e0da 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -27,7 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
UTS_MACHINE := s390x
-STACK_SIZE := $(if $(CONFIG_KASAN),32768,16384)
+STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384)
CHECKFLAGS += -D__s390__ -D__s390x__
export LD_BFD
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 593039620487..b1bdd15e3429 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -22,10 +22,10 @@ OBJCOPYFLAGS :=
OBJECTS := $(addprefix $(obj)/,$(obj-y))
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
-$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS)
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE
$(call if_changed,ld)
-OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info
+OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load
$(obj)/info.bin: vmlinux FORCE
$(call if_changed,objcopy)
@@ -46,17 +46,17 @@ suffix-$(CONFIG_KERNEL_LZMA) := .lzma
suffix-$(CONFIG_KERNEL_LZO) := .lzo
suffix-$(CONFIG_KERNEL_XZ) := .xz
-$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,gzip)
-$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
$(call if_changed,bzip2)
-$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lz4)
-$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lzma)
-$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lzo)
-$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,xzkern)
OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 259d1698ac50..c69cb04b7a59 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -64,6 +64,8 @@ CONFIG_NUMA=y
CONFIG_PREEMPT=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@@ -84,9 +86,11 @@ CONFIG_PCI_DEBUG=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_CHSC_SCH=y
+CONFIG_VFIO_AP=m
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -161,8 +165,6 @@ CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
@@ -365,6 +367,8 @@ CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
@@ -461,6 +465,7 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
+CONFIG_ISM=m
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -486,9 +491,12 @@ CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
+CONFIG_VFIO_MDEV=m
+CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -615,7 +623,6 @@ CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
CONFIG_FAILSLAB=y
@@ -727,3 +734,4 @@ CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 37fd60c20e22..32f539dc9c19 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -65,6 +65,8 @@ CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@@ -82,9 +84,11 @@ CONFIG_PCI=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_CHSC_SCH=y
+CONFIG_VFIO_AP=m
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -159,8 +163,6 @@ CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
@@ -362,6 +364,8 @@ CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
@@ -458,6 +462,7 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
+CONFIG_ISM=m
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -483,9 +488,12 @@ CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
+CONFIG_VFIO_MDEV=m
+CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -666,3 +674,4 @@ CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 7cb6a52f727d..4d58a92b5d97 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -26,14 +26,23 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_LIVEPATCH=y
+CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
+CONFIG_HZ_100=y
+CONFIG_KEXEC_FILE=y
+CONFIG_CRASH_DUMP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
+CONFIG_CMM=m
CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
@@ -44,11 +53,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_DEFAULT_DEADLINE=y
-CONFIG_LIVEPATCH=y
-CONFIG_NR_CPUS=256
-CONFIG_NUMA=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
+CONFIG_BINFMT_MISC=m
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@@ -60,9 +65,6 @@ CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
CONFIG_IDLE_PAGE_TRACKING=y
-CONFIG_CRASH_DUMP=y
-CONFIG_BINFMT_MISC=m
-CONFIG_HIBERNATION=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -98,6 +100,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_VIRTIO_BLK=y
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
@@ -131,6 +134,7 @@ CONFIG_EQUALIZER=m
CONFIG_TUN=m
CONFIG_VIRTIO_NET=y
# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
@@ -157,33 +161,6 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_INFO_DWARF4=y
-CONFIG_GDB_SCRIPTS=y
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DEBUG_SECTION_MISMATCH=y
-CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_PAGEALLOC=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_PANIC_ON_OOPS=y
-CONFIG_PROVE_LOCKING=y
-CONFIG_LOCK_STAT=y
-CONFIG_DEBUG_LOCKDEP=y
-CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_SG=y
-CONFIG_DEBUG_NOTIFIERS=y
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_LATENCYTOP=y
-CONFIG_SCHED_TRACER=y
-CONFIG_FTRACE_SYSCALLS=y
-CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
-CONFIG_STACK_TRACER=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_FUNCTION_PROFILER=y
-# CONFIG_RUNTIME_TESTING_MENU is not set
-CONFIG_S390_PTDUMP=y
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
@@ -193,6 +170,7 @@ CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_CMAC=m
@@ -231,7 +209,6 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m
-CONFIG_ZCRYPT_MULTIDEVNODES=y
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
@@ -247,4 +224,30 @@ CONFIG_CRC7=m
# CONFIG_XZ_DEC_ARM is not set
# CONFIG_XZ_DEC_ARMTHUMB is not set
# CONFIG_XZ_DEC_SPARC is not set
-CONFIG_CMM=m
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCK_STAT=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_FUNCTION_PROFILER=y
+# CONFIG_RUNTIME_TESTING_MENU is not set
+CONFIG_S390_PTDUMP=y
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index dbd689d556ce..ccbb53e22024 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -46,8 +46,6 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.asce_limit = STACK_TOP_MAX;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
- /* pgd_alloc() did not account this pud */
- mm_inc_nr_puds(mm);
break;
case -PAGE_SIZE:
/* forked 5-level task, set new asce with new_mm->pgd */
@@ -63,9 +61,6 @@ static inline int init_new_context(struct task_struct *tsk,
/* forked 2-level compat task, set new asce with new mm->pgd */
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
- /* pgd_alloc() did not account this pmd */
- mm_inc_nr_pmds(mm);
- mm_inc_nr_puds(mm);
}
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index f0f9bcf94c03..5ee733720a57 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -36,11 +36,11 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{
- if (mm->context.asce_limit <= _REGION3_SIZE)
+ if (mm_pmd_folded(mm))
return _SEGMENT_ENTRY_EMPTY;
- if (mm->context.asce_limit <= _REGION2_SIZE)
+ if (mm_pud_folded(mm))
return _REGION3_ENTRY_EMPTY;
- if (mm->context.asce_limit <= _REGION1_SIZE)
+ if (mm_p4d_folded(mm))
return _REGION2_ENTRY_EMPTY;
return _REGION1_ENTRY_EMPTY;
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 411d435e7a7d..063732414dfb 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -493,6 +493,24 @@ static inline int is_module_addr(void *addr)
_REGION_ENTRY_PROTECT | \
_REGION_ENTRY_NOEXEC)
+static inline bool mm_p4d_folded(struct mm_struct *mm)
+{
+ return mm->context.asce_limit <= _REGION1_SIZE;
+}
+#define mm_p4d_folded(mm) mm_p4d_folded(mm)
+
+static inline bool mm_pud_folded(struct mm_struct *mm)
+{
+ return mm->context.asce_limit <= _REGION2_SIZE;
+}
+#define mm_pud_folded(mm) mm_pud_folded(mm)
+
+static inline bool mm_pmd_folded(struct mm_struct *mm)
+{
+ return mm->context.asce_limit <= _REGION3_SIZE;
+}
+#define mm_pmd_folded(mm) mm_pmd_folded(mm)
+
static inline int mm_has_pgste(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 302795c47c06..81038ab357ce 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -236,7 +236,7 @@ static inline unsigned long current_stack_pointer(void)
return sp;
}
-static __no_sanitize_address_or_inline unsigned short stap(void)
+static __no_kasan_or_inline unsigned short stap(void)
{
unsigned short cpu_address;
@@ -330,7 +330,7 @@ static inline void __load_psw(psw_t psw)
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
-static __no_sanitize_address_or_inline void __load_psw_mask(unsigned long mask)
+static __no_kasan_or_inline void __load_psw_mask(unsigned long mask)
{
unsigned long addr;
psw_t psw;
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 27248f42a03c..ce4e17c9aad6 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -14,7 +14,7 @@
* General size of kernel stacks
*/
#ifdef CONFIG_KASAN
-#define THREAD_SIZE_ORDER 3
+#define THREAD_SIZE_ORDER 4
#else
#define THREAD_SIZE_ORDER 2
#endif
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 457b7ba0fbb6..b31c779cf581 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -136,7 +136,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
- if (tlb->mm->context.asce_limit <= _REGION3_SIZE)
+ if (mm_pmd_folded(tlb->mm))
return;
pgtable_pmd_page_dtor(virt_to_page(pmd));
tlb_remove_table(tlb, pmd);
@@ -152,7 +152,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long address)
{
- if (tlb->mm->context.asce_limit <= _REGION1_SIZE)
+ if (mm_p4d_folded(tlb->mm))
return;
tlb_remove_table(tlb, p4d);
}
@@ -167,7 +167,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
- if (tlb->mm->context.asce_limit <= _REGION2_SIZE)
+ if (mm_pud_folded(tlb->mm))
return;
tlb_remove_table(tlb, pud);
}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 724fba4d09d2..39191a0feed1 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -236,10 +236,10 @@ ENTRY(__switch_to)
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
lghi %r4,__TASK_stack
lghi %r1,__TASK_thread
- lg %r5,0(%r4,%r3) # start of kernel stack of next
+ llill %r5,STACK_INIT
stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
- lgr %r15,%r5
- aghi %r15,STACK_INIT # end of kernel stack of next
+ lg %r15,0(%r4,%r3) # start of kernel stack of next
+ agr %r15,%r5 # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index cc085e2d2ce9..74091fd3101e 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -373,7 +373,7 @@ static int __hw_perf_event_init(struct perf_event *event)
return -ENOENT;
if (ev > PERF_CPUM_CF_MAX_CTR)
- return -EINVAL;
+ return -ENOENT;
/* Obtain the counter set to which the specified counter belongs */
set = get_counter_set(ev);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 7bf604ff50a1..bfabeb1889cc 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1842,10 +1842,30 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags)
CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
-static struct attribute *cpumsf_pmu_events_attr[] = {
- CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
- NULL,
- NULL,
+/* Attribute list for CPU_SF.
+ *
+ * The availablitiy depends on the CPU_MF sampling facility authorization
+ * for basic + diagnositic samples. This is determined at initialization
+ * time by the sampling facility device driver.
+ * If the authorization for basic samples is turned off, it should be
+ * also turned off for diagnostic sampling.
+ *
+ * During initialization of the device driver, check the authorization
+ * level for diagnostic sampling and installs the attribute
+ * file for diagnostic sampling if necessary.
+ *
+ * For now install a placeholder to reference all possible attributes:
+ * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG.
+ * Add another entry for the final NULL pointer.
+ */
+enum {
+ SF_CYCLES_BASIC_ATTR_IDX = 0,
+ SF_CYCLES_BASIC_DIAG_ATTR_IDX,
+ SF_CYCLES_ATTR_MAX
+};
+
+static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = {
+ [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC)
};
PMU_FORMAT_ATTR(event, "config:0-63");
@@ -2040,7 +2060,10 @@ static int __init init_cpum_sampling_pmu(void)
if (si.ad) {
sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
- cpumsf_pmu_events_attr[1] =
+ /* Sampling of diagnostic data authorized,
+ * install event into attribute list of PMU device.
+ */
+ cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] =
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
}
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index eb8aebea3ea7..e76309fbbcb3 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -37,7 +37,7 @@ KASAN_SANITIZE := n
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
# link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
+$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
$(call if_changed,vdso32ld)
# strip rule for the .so file
@@ -46,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# assembly rules for the .S files
-$(obj-vdso32): %.o: %.S
+$(obj-vdso32): %.o: %.S FORCE
$(call if_changed_dep,vdso32as)
# actual build commands
quiet_cmd_vdso32ld = VDSO32L $@
- cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+ cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index a22b2cf86eec..f849ac61c5da 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -37,7 +37,7 @@ KASAN_SANITIZE := n
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
+$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
$(call if_changed,vdso64ld)
# strip rule for the .so file
@@ -46,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# assembly rules for the .S files
-$(obj-vdso64): %.o: %.S
+$(obj-vdso64): %.o: %.S FORCE
$(call if_changed_dep,vdso64as)
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+ cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 21eb7407d51b..8429ab079715 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -154,14 +154,14 @@ SECTIONS
* uncompressed image info used by the decompressor
* it should match struct vmlinux_info
*/
- .vmlinux.info 0 : {
+ .vmlinux.info 0 (INFO) : {
QUAD(_stext) /* default_lma */
QUAD(startup_continue) /* entry */
QUAD(__bss_start - _stext) /* image_size */
QUAD(__bss_stop - __bss_start) /* bss_size */
QUAD(__boot_data_start) /* bootdata_off */
QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */
- }
+ } :NONE
/* Debugging sections. */
STABS_DEBUG
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 76d89ee8b428..814f26520aa2 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -101,6 +101,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
mm->context.asce_limit = _REGION1_SIZE;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ mm_inc_nr_puds(mm);
} else {
crst_table_init(table, _REGION1_ENTRY_EMPTY);
pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index ae0d9e889534..d31bde0870d8 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -53,6 +53,7 @@ int __node_distance(int a, int b)
{
return mode->distance ? mode->distance(a, b) : 0;
}
+EXPORT_SYMBOL(__node_distance);
int numa_debug_enabled;
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 74c002ddc0ce..28c40624bcb6 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1305,6 +1305,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
io_req->fds[0] = dev->cow.fd;
else
io_req->fds[0] = dev->fd;
+ io_req->error = 0;
if (req_op(req) == REQ_OP_FLUSH) {
io_req->op = UBD_FLUSH;
@@ -1313,9 +1314,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
io_req->cow_offset = -1;
io_req->offset = off;
io_req->length = bvec->bv_len;
- io_req->error = 0;
io_req->sector_mask = 0;
-
io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE;
io_req->offsets[0] = 0;
io_req->offsets[1] = dev->cow.data_offset;
@@ -1341,11 +1340,14 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
+ struct ubd *ubd_dev = hctx->queue->queuedata;
struct request *req = bd->rq;
int ret = 0;
blk_mq_start_request(req);
+ spin_lock_irq(&ubd_dev->lock);
+
if (req_op(req) == REQ_OP_FLUSH) {
ret = ubd_queue_one_vec(hctx, req, 0, NULL);
} else {
@@ -1361,9 +1363,11 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
}
}
out:
- if (ret < 0) {
+ spin_unlock_irq(&ubd_dev->lock);
+
+ if (ret < 0)
blk_mq_requeue_request(req, true);
- }
+
return BLK_STS_OK;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ba7e3464ee92..9d734f3c8234 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -525,7 +525,6 @@ config X86_VSMP
bool "ScaleMP vSMP"
select HYPERVISOR_GUEST
select PARAVIRT
- select PARAVIRT_XXL
depends on X86_64 && PCI
depends on X86_EXTENDED_PLATFORM
depends on SMP
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 5b562e464009..88398fdf8129 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -213,8 +213,6 @@ ifdef CONFIG_X86_64
KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
endif
-# Speed up the build
-KBUILD_CFLAGS += -pipe
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
@@ -239,7 +237,7 @@ archheaders:
archmacros:
$(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s
-ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s -Wa,-
+ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s
export ASM_MACRO_FLAGS
KBUILD_CFLAGS += $(ASM_MACRO_FLAGS)
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index e17ab885b1e9..cb46d602a6b8 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -129,8 +129,15 @@ struct intel_uncore_box {
struct intel_uncore_extra_reg shared_regs[0];
};
-#define UNCORE_BOX_FLAG_INITIATED 0
-#define UNCORE_BOX_FLAG_CTL_OFFS8 1 /* event config registers are 8-byte apart */
+/* CFL uncore 8th cbox MSRs */
+#define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70
+#define CFL_UNC_CBO_7_PER_CTR0 0xf76
+
+#define UNCORE_BOX_FLAG_INITIATED 0
+/* event config registers are 8-byte apart */
+#define UNCORE_BOX_FLAG_CTL_OFFS8 1
+/* CFL 8th CBOX has different MSR space */
+#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
struct uncore_event_desc {
struct kobj_attribute attr;
@@ -297,17 +304,27 @@ unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
static inline
unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
{
- return box->pmu->type->event_ctl +
- (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
- uncore_msr_box_offset(box);
+ if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
+ return CFL_UNC_CBO_7_PERFEVTSEL0 +
+ (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
+ } else {
+ return box->pmu->type->event_ctl +
+ (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
+ uncore_msr_box_offset(box);
+ }
}
static inline
unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
{
- return box->pmu->type->perf_ctr +
- (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
- uncore_msr_box_offset(box);
+ if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
+ return CFL_UNC_CBO_7_PER_CTR0 +
+ (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
+ } else {
+ return box->pmu->type->perf_ctr +
+ (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
+ uncore_msr_box_offset(box);
+ }
}
static inline
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 8527c3e1038b..2593b0d7aeee 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -15,6 +15,25 @@
#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
+#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
+#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
+#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
+#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
+#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
+#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
+#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
+#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
+#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
+#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f
+#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f
+#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2
+#define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30
+#define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18
+#define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6
+#define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31
+#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
+#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
+#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -202,6 +221,10 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
}
+
+ /* The 8th CBOX has different MSR space */
+ if (box->pmu->pmu_idx == 7)
+ __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
}
static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
@@ -228,7 +251,7 @@ static struct intel_uncore_ops skl_uncore_msr_ops = {
static struct intel_uncore_type skl_uncore_cbox = {
.name = "cbox",
.num_counters = 4,
- .num_boxes = 5,
+ .num_boxes = 8,
.perf_ctr_bits = 44,
.fixed_ctr_bits = 48,
.perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
@@ -569,7 +592,82 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
-
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
{ /* end: all zeroes */ },
};
@@ -618,6 +716,25 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
+ IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
+ IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
+ IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
+ IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */
+ IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */
+ IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */
+ IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */
+ IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */
+ IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */
+ IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */
+ IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */
+ IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */
+ IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */
+ IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */
+ IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */
+ IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */
+ IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
+ IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
+ IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
{ /* end marker */ }
};
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 4da9b1c58d28..c1a812bd5a27 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -221,6 +221,8 @@ static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_am
int mce_available(struct cpuinfo_x86 *c);
bool mce_is_memory_error(struct mce *m);
+bool mce_is_correctable(struct mce *m);
+int mce_usable_address(struct mce *m);
DECLARE_PER_CPU(unsigned, mce_exception_count);
DECLARE_PER_CPU(unsigned, mce_poll_count);
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 0d6271cce198..1d0a7778e163 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -232,7 +232,7 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
: "cc");
}
#endif
- return hv_status;
+ return hv_status;
}
/*
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index cd0cf1c568b4..8f657286d599 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -33,12 +33,14 @@
/*
* Set __PAGE_OFFSET to the most negative possible address +
- * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
- * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
- * what Xen requires.
+ * PGDIR_SIZE*17 (pgd slot 273).
+ *
+ * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for
+ * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary,
+ * but it's what Xen requires.
*/
-#define __PAGE_OFFSET_BASE_L5 _AC(0xff10000000000000, UL)
-#define __PAGE_OFFSET_BASE_L4 _AC(0xffff880000000000, UL)
+#define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL)
+#define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL)
#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
#define __PAGE_OFFSET page_offset_base
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 04edd2d58211..84bd9bdc1987 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -111,9 +111,7 @@ extern unsigned int ptrs_per_p4d;
*/
#define MAXMEM (1UL << MAX_PHYSMEM_BITS)
-#define LDT_PGD_ENTRY_L4 -3UL
-#define LDT_PGD_ENTRY_L5 -112UL
-#define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
+#define LDT_PGD_ENTRY -240UL
#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
#define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 87623c6b13db..bd5ac6cc37db 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -13,12 +13,15 @@
#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
{
- u32 val = 0;
-
- if (GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
- "I", _Q_PENDING_OFFSET))
- val |= _Q_PENDING_VAL;
+ u32 val;
+ /*
+ * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
+ * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
+ * statement expression, which GCC doesn't like.
+ */
+ val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
+ "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
return val;
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 123e669bf363..790ce08e41f2 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -9,7 +9,7 @@
#include <linux/mm.h>
#include <linux/device.h>
-#include <linux/uaccess.h>
+#include <asm/extable.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -93,12 +93,39 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
*/
static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
{
- return __put_user(val, (unsigned long __user *)addr);
+ int ret = 0;
+
+ asm volatile("1: mov %[val], %[ptr]\n"
+ "2:\n"
+ ".section .fixup, \"ax\"\n"
+ "3: sub $1, %[ret]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : [ret] "+r" (ret), [ptr] "=m" (*addr)
+ : [val] "r" (val));
+
+ return ret;
}
-static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
+static inline int xen_safe_read_ulong(const unsigned long *addr,
+ unsigned long *val)
{
- return __get_user(*val, (unsigned long __user *)addr);
+ int ret = 0;
+ unsigned long rval = ~0ul;
+
+ asm volatile("1: mov %[ptr], %[rval]\n"
+ "2:\n"
+ ".section .fixup, \"ax\"\n"
+ "3: sub $1, %[ret]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : [ret] "+r" (ret), [rval] "+r" (rval)
+ : [ptr] "m" (*addr));
+ *val = rval;
+
+ return ret;
}
#ifdef CONFIG_XEN_PV
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8c66d2fc8f81..36d2696c9563 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -485,7 +485,7 @@ static void mce_report_event(struct pt_regs *regs)
* be somewhat complicated (e.g. segment offset would require an instruction
* parser). So only support physical addresses up to page granuality for now.
*/
-static int mce_usable_address(struct mce *m)
+int mce_usable_address(struct mce *m)
{
if (!(m->status & MCI_STATUS_ADDRV))
return 0;
@@ -505,6 +505,7 @@ static int mce_usable_address(struct mce *m)
return 1;
}
+EXPORT_SYMBOL_GPL(mce_usable_address);
bool mce_is_memory_error(struct mce *m)
{
@@ -534,7 +535,7 @@ bool mce_is_memory_error(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_is_memory_error);
-static bool mce_is_correctable(struct mce *m)
+bool mce_is_correctable(struct mce *m)
{
if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
return false;
@@ -547,6 +548,7 @@ static bool mce_is_correctable(struct mce *m)
return true;
}
+EXPORT_SYMBOL_GPL(mce_is_correctable);
static bool cec_add_mce(struct mce *m)
{
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 1c72f3819eb1..e81a2db42df7 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kexec.h>
+#include <linux/i8253.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
#include <asm/hyperv-tlfs.h>
@@ -295,6 +296,16 @@ static void __init ms_hyperv_init_platform(void)
if (efi_enabled(EFI_BOOT))
x86_platform.get_nmi_reason = hv_get_nmi_reason;
+ /*
+ * Hyper-V VMs have a PIT emulation quirk such that zeroing the
+ * counter register during PIT shutdown restarts the PIT. So it
+ * continues to interrupt @18.2 HZ. Setting i8253_clear_counter
+ * to false tells pit_shutdown() not to zero the counter so that
+ * the PIT really is shutdown. Generation 2 VMs don't have a PIT,
+ * and setting this value has no effect.
+ */
+ i8253_clear_counter_on_shutdown = false;
+
#if IS_ENABLED(CONFIG_HYPERV)
/*
* Setup the hook to get control post apic initialization.
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index d9ab49bed8af..0eda91f8eeac 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -77,7 +77,7 @@ static __init int setup_vmw_sched_clock(char *s)
}
early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
-static unsigned long long vmware_sched_clock(void)
+static unsigned long long notrace vmware_sched_clock(void)
{
unsigned long long ns;
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index ab18e0884dc6..6135ae8ce036 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -199,14 +199,6 @@ static void sanity_check_ldt_mapping(struct mm_struct *mm)
/*
* If PTI is enabled, this maps the LDT into the kernelmode and
* usermode tables for the given mm.
- *
- * There is no corresponding unmap function. Even if the LDT is freed, we
- * leave the PTEs around until the slot is reused or the mm is destroyed.
- * This is harmless: the LDT is always in ordinary memory, and no one will
- * access the freed slot.
- *
- * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
- * it useful, and the flush would slow down modify_ldt().
*/
static int
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
@@ -214,8 +206,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
unsigned long va;
bool is_vmalloc;
spinlock_t *ptl;
- pgd_t *pgd;
- int i;
+ int i, nr_pages;
if (!static_cpu_has(X86_FEATURE_PTI))
return 0;
@@ -229,16 +220,11 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
/* Check if the current mappings are sane */
sanity_check_ldt_mapping(mm);
- /*
- * Did we already have the top level entry allocated? We can't
- * use pgd_none() for this because it doens't do anything on
- * 4-level page table kernels.
- */
- pgd = pgd_offset(mm, LDT_BASE_ADDR);
-
is_vmalloc = is_vmalloc_addr(ldt->entries);
- for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
+ nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
+
+ for (i = 0; i < nr_pages; i++) {
unsigned long offset = i << PAGE_SHIFT;
const void *src = (char *)ldt->entries + offset;
unsigned long pfn;
@@ -272,13 +258,39 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
/* Propagate LDT mapping to the user page-table */
map_ldt_struct_to_user(mm);
- va = (unsigned long)ldt_slot_va(slot);
- flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT, false);
-
ldt->slot = slot;
return 0;
}
+static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
+{
+ unsigned long va;
+ int i, nr_pages;
+
+ if (!ldt)
+ return;
+
+ /* LDT map/unmap is only required for PTI */
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
+
+ nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
+
+ for (i = 0; i < nr_pages; i++) {
+ unsigned long offset = i << PAGE_SHIFT;
+ spinlock_t *ptl;
+ pte_t *ptep;
+
+ va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
+ ptep = get_locked_pte(mm, va, &ptl);
+ pte_clear(mm, va, ptep);
+ pte_unmap_unlock(ptep, ptl);
+ }
+
+ va = (unsigned long)ldt_slot_va(ldt->slot);
+ flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
+}
+
#else /* !CONFIG_PAGE_TABLE_ISOLATION */
static int
@@ -286,6 +298,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
{
return 0;
}
+
+static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
+{
+}
#endif /* CONFIG_PAGE_TABLE_ISOLATION */
static void free_ldt_pgtables(struct mm_struct *mm)
@@ -524,6 +540,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
}
install_ldt(mm, new_ldt);
+ unmap_ldt_struct(mm, old_ldt);
free_ldt_struct(old_ldt);
error = 0;
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 1eae5af491c2..891a75dbc131 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -26,65 +26,8 @@
#define TOPOLOGY_REGISTER_OFFSET 0x10
-#if defined CONFIG_PCI && defined CONFIG_PARAVIRT_XXL
-/*
- * Interrupt control on vSMPowered systems:
- * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
- * and vice versa.
- */
-
-asmlinkage __visible unsigned long vsmp_save_fl(void)
-{
- unsigned long flags = native_save_fl();
-
- if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
- flags &= ~X86_EFLAGS_IF;
- return flags;
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
-
-__visible void vsmp_restore_fl(unsigned long flags)
-{
- if (flags & X86_EFLAGS_IF)
- flags &= ~X86_EFLAGS_AC;
- else
- flags |= X86_EFLAGS_AC;
- native_restore_fl(flags);
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
-
-asmlinkage __visible void vsmp_irq_disable(void)
-{
- unsigned long flags = native_save_fl();
-
- native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
-
-asmlinkage __visible void vsmp_irq_enable(void)
-{
- unsigned long flags = native_save_fl();
-
- native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
-
-static unsigned __init vsmp_patch(u8 type, void *ibuf,
- unsigned long addr, unsigned len)
-{
- switch (type) {
- case PARAVIRT_PATCH(irq.irq_enable):
- case PARAVIRT_PATCH(irq.irq_disable):
- case PARAVIRT_PATCH(irq.save_fl):
- case PARAVIRT_PATCH(irq.restore_fl):
- return paravirt_patch_default(type, ibuf, addr, len);
- default:
- return native_patch(type, ibuf, addr, len);
- }
-
-}
-
-static void __init set_vsmp_pv_ops(void)
+#ifdef CONFIG_PCI
+static void __init set_vsmp_ctl(void)
{
void __iomem *address;
unsigned int cap, ctl, cfg;
@@ -109,28 +52,12 @@ static void __init set_vsmp_pv_ops(void)
}
#endif
- if (cap & ctl & (1 << 4)) {
- /* Setup irq ops and turn on vSMP IRQ fastpath handling */
- pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
- pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
- pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
- pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
- pv_ops.init.patch = vsmp_patch;
- ctl &= ~(1 << 4);
- }
writel(ctl, address + 4);
ctl = readl(address + 4);
pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
early_iounmap(address, 8);
}
-#else
-static void __init set_vsmp_pv_ops(void)
-{
-}
-#endif
-
-#ifdef CONFIG_PCI
static int is_vsmp = -1;
static void __init detect_vsmp_box(void)
@@ -164,11 +91,14 @@ static int is_vsmp_box(void)
{
return 0;
}
+static void __init set_vsmp_ctl(void)
+{
+}
#endif
static void __init vsmp_cap_cpus(void)
{
-#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP)
+#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
void __iomem *address;
unsigned int cfg, topology, node_shift, maxcpus;
@@ -221,6 +151,6 @@ void __init vsmp_init(void)
vsmp_cap_cpus();
- set_vsmp_pv_ops();
+ set_vsmp_ctl();
return;
}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 0d7b3ae4960b..a5d7ed125337 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1905,7 +1905,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
init_top_pgt[0] = __pgd(0);
/* Pre-constructed entries are in pfn, so convert to mfn */
- /* L4[272] -> level3_ident_pgt */
+ /* L4[273] -> level3_ident_pgt */
/* L4[511] -> level3_kernel_pgt */
convert_pfn_mfn(init_top_pgt);
@@ -1925,8 +1925,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
addr[0] = (unsigned long)pgd;
addr[1] = (unsigned long)l3;
addr[2] = (unsigned long)l2;
- /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
- * Both L4[272][0] and L4[511][510] have entries that point to the same
+ /* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
+ * Both L4[273][0] and L4[511][510] have entries that point to the same
* L2 (PMD) tables. Meaning that if you modify it in __va space
* it will be also modified in the __ka space! (But if you just
* modify the PMD table to point to other PTE's or none, then you
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index b06731705529..055e37e43541 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -656,8 +656,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
/*
* The interface requires atomic updates on p2m elements.
- * xen_safe_write_ulong() is using __put_user which does an atomic
- * store via asm().
+ * xen_safe_write_ulong() is using an atomic store via asm().
*/
if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
return true;
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 441c88262169..1c8a8816a402 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -9,6 +9,7 @@
#include <linux/log2.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <linux/atomic.h>
#include <asm/paravirt.h>
#include <asm/qspinlock.h>
@@ -21,6 +22,7 @@
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name);
+static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
static bool xen_pvspin = true;
static void xen_qlock_kick(int cpu)
@@ -39,25 +41,25 @@ static void xen_qlock_kick(int cpu)
*/
static void xen_qlock_wait(u8 *byte, u8 val)
{
- unsigned long flags;
int irq = __this_cpu_read(lock_kicker_irq);
+ atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
/* If kicker interrupts not initialized yet, just spin */
if (irq == -1 || in_nmi())
return;
- /* Guard against reentry. */
- local_irq_save(flags);
+ /* Detect reentry. */
+ atomic_inc(nest_cnt);
- /* If irq pending already clear it. */
- if (xen_test_irq_pending(irq)) {
+ /* If irq pending already and no nested call clear it. */
+ if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
xen_clear_irq_pending(irq);
} else if (READ_ONCE(*byte) == val) {
/* Block until irq becomes pending (or a spurious wakeup) */
xen_poll_irq(irq);
}
- local_irq_restore(flags);
+ atomic_dec(nest_cnt);
}
static irqreturn_t dummy_handler(int irq, void *dev_id)
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index be9bfd9aa865..34a23016dd14 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -23,7 +23,11 @@
# error Linux requires the Xtensa Windowed Registers Option.
#endif
-#define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH
+/* Xtensa ABI requires stack alignment to be at least 16 */
+
+#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
+
+#define ARCH_SLAB_MINALIGN STACK_ALIGN
/*
* User space process size: 1 GB.
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 2f76118ecf62..9053a5622d2c 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -88,9 +88,12 @@ _SetupMMU:
initialize_mmu
#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
rsr a2, excsave1
- movi a3, 0x08000000
+ movi a3, XCHAL_KSEG_PADDR
+ bltu a2, a3, 1f
+ sub a2, a2, a3
+ movi a3, XCHAL_KSEG_SIZE
bgeu a2, a3, 1f
- movi a3, 0xd0000000
+ movi a3, XCHAL_KSEG_CACHED_VADDR
add a2, a2, a3
wsr a2, excsave1
1:
diff --git a/block/bio.c b/block/bio.c
index d5368a445561..4f4d9884443b 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -605,6 +605,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
if (bio_flagged(bio_src, BIO_THROTTLED))
bio_set_flag(bio, BIO_THROTTLED);
bio->bi_opf = bio_src->bi_opf;
+ bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
@@ -1260,6 +1261,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
if (ret)
goto cleanup;
} else {
+ zero_fill_bio(bio);
iov_iter_advance(iter, bio->bi_iter.bi_size);
}
diff --git a/block/blk-core.c b/block/blk-core.c
index ce12515f9b9b..deb56932f8c4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -798,9 +798,8 @@ void blk_cleanup_queue(struct request_queue *q)
* dispatch may still be in-progress since we dispatch requests
* from more than one contexts.
*
- * No need to quiesce queue if it isn't initialized yet since
- * blk_freeze_queue() should be enough for cases of passthrough
- * request.
+ * We rely on driver to deal with the race in case that queue
+ * initialization isn't done.
*/
if (q->mq_ops && blk_queue_init_done(q))
blk_mq_quiesce_queue(q);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 76f867ea9a9b..5f2c429d4378 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -51,16 +51,14 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
- while (nr_sects) {
- unsigned int req_sects = nr_sects;
- sector_t end_sect;
+ if (!nr_sects)
+ return -EINVAL;
- if (!req_sects)
- goto fail;
- if (req_sects > UINT_MAX >> 9)
- req_sects = UINT_MAX >> 9;
+ while (nr_sects) {
+ sector_t req_sects = min_t(sector_t, nr_sects,
+ bio_allowed_max_sectors(q));
- end_sect = sector + req_sects;
+ WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
@@ -68,8 +66,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bio_set_op_attrs(bio, op, 0);
bio->bi_iter.bi_size = req_sects << 9;
+ sector += req_sects;
nr_sects -= req_sects;
- sector = end_sect;
/*
* We can loop for a long time in here, if someone does
@@ -82,14 +80,6 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
*biop = bio;
return 0;
-
-fail:
- if (bio) {
- submit_bio_wait(bio);
- bio_put(bio);
- }
- *biop = NULL;
- return -EOPNOTSUPP;
}
EXPORT_SYMBOL(__blkdev_issue_discard);
@@ -161,7 +151,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
return -EOPNOTSUPP;
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
- max_write_same_sectors = UINT_MAX >> 9;
+ max_write_same_sectors = bio_allowed_max_sectors(q);
while (nr_sects) {
bio = blk_next_bio(bio, 1, gfp_mask);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 6b5ad275ed56..e7696c47489a 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -46,7 +46,7 @@ static inline bool bio_will_gap(struct request_queue *q,
bio_get_first_bvec(prev_rq->bio, &pb);
else
bio_get_first_bvec(prev, &pb);
- if (pb.bv_offset)
+ if (pb.bv_offset & queue_virt_boundary(q))
return true;
/*
@@ -90,7 +90,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
- max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+ max_discard_sectors = min(q->limits.max_discard_sectors,
+ bio_allowed_max_sectors(q));
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors)) {
diff --git a/block/blk.h b/block/blk.h
index a1841b8ff129..0089fefdf771 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -169,7 +169,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
static inline bool __bvec_gap_to_prev(struct request_queue *q,
struct bio_vec *bprv, unsigned int offset)
{
- return offset ||
+ return (offset & queue_virt_boundary(q)) ||
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
}
@@ -396,6 +396,16 @@ static inline unsigned long blk_rq_deadline(struct request *rq)
}
/*
+ * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
+ * is defined as 'unsigned int', meantime it has to aligned to with logical
+ * block size which is the minimum accepted unit by hardware.
+ */
+static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
+{
+ return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
+}
+
+/*
* Internal io_context interface
*/
void get_io_context(struct io_context *ioc);
diff --git a/block/bounce.c b/block/bounce.c
index 36869afc258c..559c55bda040 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -248,6 +248,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
return NULL;
bio->bi_disk = bio_src->bi_disk;
bio->bi_opf = bio_src->bi_opf;
+ bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c
index e41f6cc33fff..784748dbb19f 100644
--- a/crypto/crypto_user_base.c
+++ b/crypto/crypto_user_base.c
@@ -84,7 +84,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_cipher rcipher;
- strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
+ strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
rcipher.blocksize = alg->cra_blocksize;
rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
@@ -103,7 +103,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rcomp;
- strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
+ strncpy(rcomp.type, "compression", sizeof(rcomp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(struct crypto_report_comp), &rcomp))
goto nla_put_failure;
@@ -117,7 +117,7 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_acomp racomp;
- strlcpy(racomp.type, "acomp", sizeof(racomp.type));
+ strncpy(racomp.type, "acomp", sizeof(racomp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
sizeof(struct crypto_report_acomp), &racomp))
@@ -132,7 +132,7 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
- strlcpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+ strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
sizeof(struct crypto_report_akcipher), &rakcipher))
@@ -147,7 +147,7 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_kpp rkpp;
- strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
+ strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
sizeof(struct crypto_report_kpp), &rkpp))
@@ -161,10 +161,10 @@ nla_put_failure:
static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb)
{
- strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
- strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
+ strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
+ strncpy(ualg->cru_driver_name, alg->cra_driver_name,
sizeof(ualg->cru_driver_name));
- strlcpy(ualg->cru_module_name, module_name(alg->cra_module),
+ strncpy(ualg->cru_module_name, module_name(alg->cra_module),
sizeof(ualg->cru_module_name));
ualg->cru_type = 0;
@@ -177,7 +177,7 @@ static int crypto_report_one(struct crypto_alg *alg,
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
struct crypto_report_larval rl;
- strlcpy(rl.type, "larval", sizeof(rl.type));
+ strncpy(rl.type, "larval", sizeof(rl.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
sizeof(struct crypto_report_larval), &rl))
goto nla_put_failure;
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
index 021ad06bbb62..1dfaa0ccd555 100644
--- a/crypto/crypto_user_stat.c
+++ b/crypto/crypto_user_stat.c
@@ -37,6 +37,8 @@ static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64;
u32 v32;
+ memset(&raead, 0, sizeof(raead));
+
strncpy(raead.type, "aead", sizeof(raead.type));
v32 = atomic_read(&alg->encrypt_cnt);
@@ -65,6 +67,8 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64;
u32 v32;
+ memset(&rcipher, 0, sizeof(rcipher));
+
strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
v32 = atomic_read(&alg->encrypt_cnt);
@@ -93,6 +97,8 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64;
u32 v32;
+ memset(&rcomp, 0, sizeof(rcomp));
+
strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
v32 = atomic_read(&alg->compress_cnt);
rcomp.stat_compress_cnt = v32;
@@ -120,6 +126,8 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64;
u32 v32;
+ memset(&racomp, 0, sizeof(racomp));
+
strlcpy(racomp.type, "acomp", sizeof(racomp.type));
v32 = atomic_read(&alg->compress_cnt);
racomp.stat_compress_cnt = v32;
@@ -147,6 +155,8 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64;
u32 v32;
+ memset(&rakcipher, 0, sizeof(rakcipher));
+
strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
v32 = atomic_read(&alg->encrypt_cnt);
rakcipher.stat_encrypt_cnt = v32;
@@ -177,6 +187,8 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
struct crypto_stat rkpp;
u32 v;
+ memset(&rkpp, 0, sizeof(rkpp));
+
strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
v = atomic_read(&alg->setsecret_cnt);
@@ -203,6 +215,8 @@ static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64;
u32 v32;
+ memset(&rhash, 0, sizeof(rhash));
+
strncpy(rhash.type, "ahash", sizeof(rhash.type));
v32 = atomic_read(&alg->hash_cnt);
@@ -227,6 +241,8 @@ static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64;
u32 v32;
+ memset(&rhash, 0, sizeof(rhash));
+
strncpy(rhash.type, "shash", sizeof(rhash.type));
v32 = atomic_read(&alg->hash_cnt);
@@ -251,6 +267,8 @@ static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64;
u32 v32;
+ memset(&rrng, 0, sizeof(rrng));
+
strncpy(rrng.type, "rng", sizeof(rrng.type));
v32 = atomic_read(&alg->generate_cnt);
@@ -275,6 +293,8 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg,
struct sk_buff *skb)
{
+ memset(ualg, 0, sizeof(*ualg));
+
strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
sizeof(ualg->cru_driver_name));
@@ -291,6 +311,7 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
struct crypto_stat rl;
+ memset(&rl, 0, sizeof(rl));
strlcpy(rl.type, "larval", sizeof(rl.type));
if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL,
sizeof(struct crypto_stat), &rl))
diff --git a/crypto/simd.c b/crypto/simd.c
index ea7240be3001..78e8d037ae2b 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -124,8 +124,9 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm)
ctx->cryptd_tfm = cryptd_tfm;
- reqsize = sizeof(struct skcipher_request);
- reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base);
+ reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm));
+ reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base));
+ reqsize += sizeof(struct skcipher_request);
crypto_skcipher_set_reqsize(tfm, reqsize);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 8f3a444c6ea9..7cea769c37df 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -512,7 +512,7 @@ config CRC_PMIC_OPREGION
config XPOWER_PMIC_OPREGION
bool "ACPI operation region support for XPower AXP288 PMIC"
- depends on MFD_AXP20X_I2C && IOSF_MBI
+ depends on MFD_AXP20X_I2C && IOSF_MBI=y
help
This config adds ACPI operation region support for XPower AXP288 PMIC.
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index eaa60c94205a..1f32caa87686 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = {
{"PNP0200", 0}, /* AT DMA Controller */
{"ACPI0009", 0}, /* IOxAPIC */
{"ACPI000A", 0}, /* IOAPIC */
+ {"SMB0001", 0}, /* ACPI SMBUS virtual device */
{"", 0},
};
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f8c638f3c946..14d9f5bea015 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2928,9 +2928,9 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
return rc;
if (ars_status_process_records(acpi_desc))
- return -ENOMEM;
+ dev_err(acpi_desc->dev, "Failed to process ARS records\n");
- return 0;
+ return rc;
}
static int ars_register(struct acpi_nfit_desc *acpi_desc,
@@ -3341,8 +3341,6 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
- struct nfit_spa *nfit_spa;
- int rc = 0;
if (nvdimm)
return 0;
@@ -3355,17 +3353,10 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
* just needs guarantees that any ARS it initiates are not
* interrupted by any intervening start requests from userspace.
*/
- mutex_lock(&acpi_desc->init_mutex);
- list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
- if (acpi_desc->scrub_spa
- || test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)
- || test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) {
- rc = -EBUSY;
- break;
- }
- mutex_unlock(&acpi_desc->init_mutex);
+ if (work_busy(&acpi_desc->dwork.work))
+ return -EBUSY;
- return rc;
+ return 0;
}
int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index e9626bf6ca29..d6c1b10f6c25 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -25,8 +25,12 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
struct acpi_nfit_desc *acpi_desc;
struct nfit_spa *nfit_spa;
- /* We only care about memory errors */
- if (!mce_is_memory_error(mce))
+ /* We only care about uncorrectable memory errors */
+ if (!mce_is_memory_error(mce) || mce_is_correctable(mce))
+ return NOTIFY_DONE;
+
+ /* Verify the address reported in the MCE is valid. */
+ if (!mce_usable_address(mce))
return NOTIFY_DONE;
/*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6e594644cb1d..a7f5202a4815 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4553,7 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
- { "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, },
+ { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 10ecb232245d..4b1ff5bc256a 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Renesas R-Car SATA driver
*
* Author: Vladimir Barinov <source@cogentembedded.com>
* Copyright (C) 2013-2015 Cogent Embedded, Inc.
* Copyright (C) 2013-2015 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index a8cfa011c284..fb23578e9a41 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4148,10 +4148,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
bio.bi_end_io = floppy_rb0_cb;
bio_set_op_attrs(&bio, REQ_OP_READ, 0);
+ init_completion(&cbdata.complete);
+
submit_bio(&bio);
process_fd_request();
- init_completion(&cbdata.complete);
wait_for_completion(&cbdata.complete);
__free_page(page);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 56452cabce5b..0ed4b200fa58 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1919,6 +1919,7 @@ static int negotiate_mq(struct blkfront_info *info)
GFP_KERNEL);
if (!info->rinfo) {
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
+ info->nr_rings = 0;
return -ENOMEM;
}
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index ef0ca9414f37..ff83e899df71 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -210,6 +210,7 @@ static int of_fixed_factor_clk_remove(struct platform_device *pdev)
{
struct clk *clk = platform_get_drvdata(pdev);
+ of_clk_del_provider(pdev->dev.of_node);
clk_unregister_fixed_factor(clk);
return 0;
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index c981159b02c0..792735d7e46e 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -325,6 +325,7 @@ static struct clk_regmap axg_fclk_div2 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div2_div" },
.num_parents = 1,
+ .flags = CLK_IS_CRITICAL,
},
};
@@ -349,6 +350,18 @@ static struct clk_regmap axg_fclk_div3 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div3_div" },
.num_parents = 1,
+ /*
+ * FIXME:
+ * This clock, as fdiv2, is used by the SCPI FW and is required
+ * by the platform to operate correctly.
+ * Until the following condition are met, we need this clock to
+ * be marked as critical:
+ * a) The SCPI generic driver claims and enable all the clocks
+ * it needs
+ * b) CCF has a clock hand-off mechanism to make the sure the
+ * clock stays on until the proper driver comes along
+ */
+ .flags = CLK_IS_CRITICAL,
},
};
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 9309cfaaa464..4ada9668fd49 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -506,6 +506,18 @@ static struct clk_regmap gxbb_fclk_div3 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div3_div" },
.num_parents = 1,
+ /*
+ * FIXME:
+ * This clock, as fdiv2, is used by the SCPI FW and is required
+ * by the platform to operate correctly.
+ * Until the following condition are met, we need this clock to
+ * be marked as critical:
+ * a) The SCPI generic driver claims and enable all the clocks
+ * it needs
+ * b) CCF has a clock hand-off mechanism to make the sure the
+ * clock stays on until the proper driver comes along
+ */
+ .flags = CLK_IS_CRITICAL,
},
};
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index e4ca6a45f313..ef1b267cb058 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -265,7 +265,7 @@ static struct clk_fixed_factor cxo = {
.div = 1,
.hw.init = &(struct clk_init_data){
.name = "cxo",
- .parent_names = (const char *[]){ "xo_board" },
+ .parent_names = (const char *[]){ "xo-board" },
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
},
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 9c38895542f4..d4350bb10b83 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -20,6 +20,13 @@
DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
+/*
+ * Handle PIT quirk in pit_shutdown() where zeroing the counter register
+ * restarts the PIT, negating the shutdown. On platforms with the quirk,
+ * platform specific code can set this to false.
+ */
+bool i8253_clear_counter_on_shutdown __ro_after_init = true;
+
#ifdef CONFIG_CLKSRC_I8253
/*
* Since the PIT overflows every tick, its not very useful
@@ -109,8 +116,11 @@ static int pit_shutdown(struct clock_event_device *evt)
raw_spin_lock(&i8253_lock);
outb_p(0x30, PIT_MODE);
- outb_p(0, PIT_CH0);
- outb_p(0, PIT_CH0);
+
+ if (i8253_clear_counter_on_shutdown) {
+ outb_p(0, PIT_CH0);
+ outb_p(0, PIT_CH0);
+ }
raw_spin_unlock(&i8253_lock);
return 0;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 8cfee0ab804b..d8c3595e9023 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -160,8 +160,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* Ensure the arm clock divider is what we expect */
ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
if (ret) {
+ int ret1;
+
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
- regulator_set_voltage_tol(arm_reg, volt_old, 0);
+ ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0);
+ if (ret1)
+ dev_warn(cpu_dev,
+ "failed to restore vddarm voltage: %d\n", ret1);
return ret;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 3f0e2a14895a..22b53bf26817 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -201,19 +201,28 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{},
};
+static const struct of_device_id *ti_cpufreq_match_node(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+
+ np = of_find_node_by_path("/");
+ match = of_match_node(ti_cpufreq_of_match, np);
+ of_node_put(np);
+
+ return match;
+}
+
static int ti_cpufreq_probe(struct platform_device *pdev)
{
u32 version[VERSION_COUNT];
- struct device_node *np;
const struct of_device_id *match;
struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
const char * const reg_names[] = {"vdd", "vbb"};
int ret;
- np = of_find_node_by_path("/");
- match = of_match_node(ti_cpufreq_of_match, np);
- of_node_put(np);
+ match = dev_get_platdata(&pdev->dev);
if (!match)
return -ENODEV;
@@ -290,7 +299,14 @@ fail_put_node:
static int ti_cpufreq_init(void)
{
- platform_device_register_simple("ti-cpufreq", -1, NULL, 0);
+ const struct of_device_id *match;
+
+ /* Check to ensure we are on a compatible platform */
+ match = ti_cpufreq_match_node();
+ if (match)
+ platform_device_register_data(NULL, "ti-cpufreq", -1, match,
+ sizeof(*match));
+
return 0;
}
module_init(ti_cpufreq_init);
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 073557f433eb..3a407a3ef22b 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -82,7 +82,6 @@ static int __init arm_idle_init_cpu(int cpu)
{
int ret;
struct cpuidle_driver *drv;
- struct cpuidle_device *dev;
drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
if (!drv)
@@ -103,13 +102,6 @@ static int __init arm_idle_init_cpu(int cpu)
goto out_kfree_drv;
}
- ret = cpuidle_register_driver(drv);
- if (ret) {
- if (ret != -EBUSY)
- pr_err("Failed to register cpuidle driver\n");
- goto out_kfree_drv;
- }
-
/*
* Call arch CPU operations in order to initialize
* idle states suspend back-end specific data
@@ -117,37 +109,21 @@ static int __init arm_idle_init_cpu(int cpu)
ret = arm_cpuidle_init(cpu);
/*
- * Skip the cpuidle device initialization if the reported
+ * Allow the initialization to continue for other CPUs, if the reported
* failure is a HW misconfiguration/breakage (-ENXIO).
*/
- if (ret == -ENXIO)
- return 0;
-
if (ret) {
pr_err("CPU %d failed to init idle CPU ops\n", cpu);
- goto out_unregister_drv;
- }
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- ret = -ENOMEM;
- goto out_unregister_drv;
+ ret = ret == -ENXIO ? 0 : ret;
+ goto out_kfree_drv;
}
- dev->cpu = cpu;
- ret = cpuidle_register_device(dev);
- if (ret) {
- pr_err("Failed to register cpuidle device for CPU %d\n",
- cpu);
- goto out_kfree_dev;
- }
+ ret = cpuidle_register(drv, NULL);
+ if (ret)
+ goto out_kfree_drv;
return 0;
-out_kfree_dev:
- kfree(dev);
-out_unregister_drv:
- cpuidle_unregister_driver(drv);
out_kfree_drv:
kfree(drv);
return ret;
@@ -178,9 +154,7 @@ out_fail:
while (--cpu >= 0) {
dev = per_cpu(cpuidle_devices, cpu);
drv = cpuidle_get_cpu_driver(dev);
- cpuidle_unregister_device(dev);
- cpuidle_unregister_driver(drv);
- kfree(dev);
+ cpuidle_unregister(drv);
kfree(drv);
}
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index f7d6d690116e..cdc4f9a171d9 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -732,6 +732,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
int *splits_in_nents;
int *splits_out_nents = NULL;
struct sec_request_el *el, *temp;
+ bool split = skreq->src != skreq->dst;
mutex_init(&sec_req->lock);
sec_req->req_base = &skreq->base;
@@ -750,7 +751,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
if (ret)
goto err_free_split_sizes;
- if (skreq->src != skreq->dst) {
+ if (split) {
sec_req->len_out = sg_nents(skreq->dst);
ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
&splits_out, &splits_out_nents,
@@ -785,8 +786,9 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
split_sizes[i],
skreq->src != skreq->dst,
splits_in[i], splits_in_nents[i],
- splits_out[i],
- splits_out_nents[i], info);
+ split ? splits_out[i] : NULL,
+ split ? splits_out_nents[i] : 0,
+ info);
if (IS_ERR(el)) {
ret = PTR_ERR(el);
goto err_free_elements;
@@ -806,13 +808,6 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
* more refined but this is unlikely to happen so no need.
*/
- /* Cleanup - all elements in pointer arrays have been coppied */
- kfree(splits_in_nents);
- kfree(splits_in);
- kfree(splits_out_nents);
- kfree(splits_out);
- kfree(split_sizes);
-
/* Grab a big lock for a long time to avoid concurrency issues */
mutex_lock(&queue->queuelock);
@@ -827,13 +822,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
(!queue->havesoftqueue ||
kfifo_avail(&queue->softqueue) > steps)) ||
!list_empty(&ctx->backlog)) {
+ ret = -EBUSY;
if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
list_add_tail(&sec_req->backlog_head, &ctx->backlog);
mutex_unlock(&queue->queuelock);
- return -EBUSY;
+ goto out;
}
- ret = -EBUSY;
mutex_unlock(&queue->queuelock);
goto err_free_elements;
}
@@ -842,7 +837,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
if (ret)
goto err_free_elements;
- return -EINPROGRESS;
+ ret = -EINPROGRESS;
+out:
+ /* Cleanup - all elements in pointer arrays have been copied */
+ kfree(splits_in_nents);
+ kfree(splits_in);
+ kfree(splits_out_nents);
+ kfree(splits_out);
+ kfree(split_sizes);
+ return ret;
err_free_elements:
list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
@@ -854,7 +857,7 @@ err_free_elements:
crypto_skcipher_ivsize(atfm),
DMA_BIDIRECTIONAL);
err_unmap_out_sg:
- if (skreq->src != skreq->dst)
+ if (split)
sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
splits_out_nents, sec_req->len_out,
info->dev);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 1551ca7df394..136ec04d683f 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -30,13 +30,16 @@
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
+static DEFINE_SPINLOCK(dma_fence_stub_lock);
+static struct dma_fence dma_fence_stub;
+
/*
* fence context counter: each execution context should have its own
* fence context, this allows checking if fences belong to the same
* context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another.
*/
-static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
+static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
/**
* DOC: DMA fences overview
@@ -68,6 +71,37 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
* &dma_buf.resv pointer.
*/
+static const char *dma_fence_stub_get_name(struct dma_fence *fence)
+{
+ return "stub";
+}
+
+static const struct dma_fence_ops dma_fence_stub_ops = {
+ .get_driver_name = dma_fence_stub_get_name,
+ .get_timeline_name = dma_fence_stub_get_name,
+};
+
+/**
+ * dma_fence_get_stub - return a signaled fence
+ *
+ * Return a stub fence which is already signaled.
+ */
+struct dma_fence *dma_fence_get_stub(void)
+{
+ spin_lock(&dma_fence_stub_lock);
+ if (!dma_fence_stub.ops) {
+ dma_fence_init(&dma_fence_stub,
+ &dma_fence_stub_ops,
+ &dma_fence_stub_lock,
+ 0, 0);
+ dma_fence_signal_locked(&dma_fence_stub);
+ }
+ spin_unlock(&dma_fence_stub_lock);
+
+ return dma_fence_get(&dma_fence_stub);
+}
+EXPORT_SYMBOL(dma_fence_get_stub);
+
/**
* dma_fence_context_alloc - allocate an array of fence contexts
* @num: amount of contexts to allocate
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 6c95f61a32e7..c1618335ca99 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -56,9 +56,10 @@ const char reservation_seqcount_string[] = "reservation_seqcount";
EXPORT_SYMBOL(reservation_seqcount_string);
/**
- * reservation_object_reserve_shared - Reserve space to add a shared
- * fence to a reservation_object.
+ * reservation_object_reserve_shared - Reserve space to add shared fences to
+ * a reservation_object.
* @obj: reservation object
+ * @num_fences: number of fences we want to add
*
* Should be called before reservation_object_add_shared_fence(). Must
* be called with obj->lock held.
@@ -66,107 +67,27 @@ EXPORT_SYMBOL(reservation_seqcount_string);
* RETURNS
* Zero for success, or -errno
*/
-int reservation_object_reserve_shared(struct reservation_object *obj)
+int reservation_object_reserve_shared(struct reservation_object *obj,
+ unsigned int num_fences)
{
- struct reservation_object_list *fobj, *old;
- u32 max;
+ struct reservation_object_list *old, *new;
+ unsigned int i, j, k, max;
old = reservation_object_get_list(obj);
if (old && old->shared_max) {
- if (old->shared_count < old->shared_max) {
- /* perform an in-place update */
- kfree(obj->staged);
- obj->staged = NULL;
+ if ((old->shared_count + num_fences) <= old->shared_max)
return 0;
- } else
- max = old->shared_max * 2;
- } else
- max = 4;
-
- /*
- * resize obj->staged or allocate if it doesn't exist,
- * noop if already correct size
- */
- fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
- GFP_KERNEL);
- if (!fobj)
- return -ENOMEM;
-
- obj->staged = fobj;
- fobj->shared_max = max;
- return 0;
-}
-EXPORT_SYMBOL(reservation_object_reserve_shared);
-
-static void
-reservation_object_add_shared_inplace(struct reservation_object *obj,
- struct reservation_object_list *fobj,
- struct dma_fence *fence)
-{
- struct dma_fence *signaled = NULL;
- u32 i, signaled_idx;
-
- dma_fence_get(fence);
-
- preempt_disable();
- write_seqcount_begin(&obj->seq);
-
- for (i = 0; i < fobj->shared_count; ++i) {
- struct dma_fence *old_fence;
-
- old_fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(obj));
-
- if (old_fence->context == fence->context) {
- /* memory barrier is added by write_seqcount_begin */
- RCU_INIT_POINTER(fobj->shared[i], fence);
- write_seqcount_end(&obj->seq);
- preempt_enable();
-
- dma_fence_put(old_fence);
- return;
- }
-
- if (!signaled && dma_fence_is_signaled(old_fence)) {
- signaled = old_fence;
- signaled_idx = i;
- }
- }
-
- /*
- * memory barrier is added by write_seqcount_begin,
- * fobj->shared_count is protected by this lock too
- */
- if (signaled) {
- RCU_INIT_POINTER(fobj->shared[signaled_idx], fence);
+ else
+ max = max(old->shared_count + num_fences,
+ old->shared_max * 2);
} else {
- BUG_ON(fobj->shared_count >= fobj->shared_max);
- RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
- fobj->shared_count++;
+ max = 4;
}
- write_seqcount_end(&obj->seq);
- preempt_enable();
-
- dma_fence_put(signaled);
-}
-
-static void
-reservation_object_add_shared_replace(struct reservation_object *obj,
- struct reservation_object_list *old,
- struct reservation_object_list *fobj,
- struct dma_fence *fence)
-{
- unsigned i, j, k;
-
- dma_fence_get(fence);
-
- if (!old) {
- RCU_INIT_POINTER(fobj->shared[0], fence);
- fobj->shared_count = 1;
- goto done;
- }
+ new = kmalloc(offsetof(typeof(*new), shared[max]), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
/*
* no need to bump fence refcounts, rcu_read access
@@ -174,46 +95,45 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
* references from the old struct are carried over to
* the new.
*/
- for (i = 0, j = 0, k = fobj->shared_max; i < old->shared_count; ++i) {
- struct dma_fence *check;
-
- check = rcu_dereference_protected(old->shared[i],
- reservation_object_held(obj));
+ for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
+ struct dma_fence *fence;
- if (check->context == fence->context ||
- dma_fence_is_signaled(check))
- RCU_INIT_POINTER(fobj->shared[--k], check);
+ fence = rcu_dereference_protected(old->shared[i],
+ reservation_object_held(obj));
+ if (dma_fence_is_signaled(fence))
+ RCU_INIT_POINTER(new->shared[--k], fence);
else
- RCU_INIT_POINTER(fobj->shared[j++], check);
+ RCU_INIT_POINTER(new->shared[j++], fence);
}
- fobj->shared_count = j;
- RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
- fobj->shared_count++;
+ new->shared_count = j;
+ new->shared_max = max;
-done:
preempt_disable();
write_seqcount_begin(&obj->seq);
/*
* RCU_INIT_POINTER can be used here,
* seqcount provides the necessary barriers
*/
- RCU_INIT_POINTER(obj->fence, fobj);
+ RCU_INIT_POINTER(obj->fence, new);
write_seqcount_end(&obj->seq);
preempt_enable();
if (!old)
- return;
+ return 0;
/* Drop the references to the signaled fences */
- for (i = k; i < fobj->shared_max; ++i) {
- struct dma_fence *f;
+ for (i = k; i < new->shared_max; ++i) {
+ struct dma_fence *fence;
- f = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(obj));
- dma_fence_put(f);
+ fence = rcu_dereference_protected(new->shared[i],
+ reservation_object_held(obj));
+ dma_fence_put(fence);
}
kfree_rcu(old, rcu);
+
+ return 0;
}
+EXPORT_SYMBOL(reservation_object_reserve_shared);
/**
* reservation_object_add_shared_fence - Add a fence to a shared slot
@@ -226,15 +146,39 @@ done:
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct dma_fence *fence)
{
- struct reservation_object_list *old, *fobj = obj->staged;
+ struct reservation_object_list *fobj;
+ unsigned int i, count;
- old = reservation_object_get_list(obj);
- obj->staged = NULL;
+ dma_fence_get(fence);
- if (!fobj)
- reservation_object_add_shared_inplace(obj, old, fence);
- else
- reservation_object_add_shared_replace(obj, old, fobj, fence);
+ fobj = reservation_object_get_list(obj);
+ count = fobj->shared_count;
+
+ preempt_disable();
+ write_seqcount_begin(&obj->seq);
+
+ for (i = 0; i < count; ++i) {
+ struct dma_fence *old_fence;
+
+ old_fence = rcu_dereference_protected(fobj->shared[i],
+ reservation_object_held(obj));
+ if (old_fence->context == fence->context ||
+ dma_fence_is_signaled(old_fence)) {
+ dma_fence_put(old_fence);
+ goto replace;
+ }
+ }
+
+ BUG_ON(fobj->shared_count >= fobj->shared_max);
+ count++;
+
+replace:
+ RCU_INIT_POINTER(fobj->shared[i], fence);
+ /* pointer update must be visible before we extend the shared_count */
+ smp_store_mb(fobj->shared_count, count);
+
+ write_seqcount_end(&obj->seq);
+ preempt_enable();
}
EXPORT_SYMBOL(reservation_object_add_shared_fence);
@@ -343,9 +287,6 @@ retry:
new = dma_fence_get_rcu_safe(&src->fence_excl);
rcu_read_unlock();
- kfree(dst->staged);
- dst->staged = NULL;
-
src_list = reservation_object_get_list(dst);
old = reservation_object_get_excl(dst);
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 5b44ef226904..fc359ca4503d 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -184,6 +184,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head,
exp_info.ops = &udmabuf_ops;
exp_info.size = ubuf->pagecount << PAGE_SHIFT;
exp_info.priv = ubuf;
+ exp_info.flags = O_RDWR;
buf = dma_buf_export(&exp_info);
if (IS_ERR(buf)) {
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 388a929baf95..1a6a77df8a5e 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -265,6 +265,10 @@ void __init efi_init(void)
(params.mmap & ~PAGE_MASK)));
init_screen_info();
+
+ /* ARM does not permit early mappings to persist across paging_init() */
+ if (IS_ENABLED(CONFIG_ARM))
+ efi_memmap_unmap();
}
static int __init register_gop_device(void)
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 922cfb813109..a00934d263c5 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -110,7 +110,7 @@ static int __init arm_enable_runtime_services(void)
{
u64 mapsize;
- if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) {
+ if (!efi_enabled(EFI_BOOT)) {
pr_info("EFI services will not be available.\n");
return 0;
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 249eb70691b0..fad7c62cfc0e 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -592,7 +592,11 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
early_memunmap(tbl, sizeof(*tbl));
}
+ return 0;
+}
+int __init efi_apply_persistent_mem_reservations(void)
+{
if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
unsigned long prsv = efi.mem_reserve;
@@ -963,36 +967,43 @@ bool efi_is_table_address(unsigned long phys_addr)
}
static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
+static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
int efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
{
- struct linux_efi_memreserve *rsv, *parent;
+ struct linux_efi_memreserve *rsv;
- if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
+ if (!efi_memreserve_root)
return -ENODEV;
- rsv = kmalloc(sizeof(*rsv), GFP_KERNEL);
+ rsv = kmalloc(sizeof(*rsv), GFP_ATOMIC);
if (!rsv)
return -ENOMEM;
- parent = memremap(efi.mem_reserve, sizeof(*rsv), MEMREMAP_WB);
- if (!parent) {
- kfree(rsv);
- return -ENOMEM;
- }
-
rsv->base = addr;
rsv->size = size;
spin_lock(&efi_mem_reserve_persistent_lock);
- rsv->next = parent->next;
- parent->next = __pa(rsv);
+ rsv->next = efi_memreserve_root->next;
+ efi_memreserve_root->next = __pa(rsv);
spin_unlock(&efi_mem_reserve_persistent_lock);
- memunmap(parent);
+ return 0;
+}
+static int __init efi_memreserve_root_init(void)
+{
+ if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ efi_memreserve_root = memremap(efi.mem_reserve,
+ sizeof(*efi_memreserve_root),
+ MEMREMAP_WB);
+ if (!efi_memreserve_root)
+ return -ENOMEM;
return 0;
}
+early_initcall(efi_memreserve_root_init);
#ifdef CONFIG_KEXEC
static int update_efi_random_seed(struct notifier_block *nb,
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 30ac0c975f8a..3d36142cf812 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -75,6 +75,9 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
efi_status_t status;
+ if (IS_ENABLED(CONFIG_ARM))
+ return;
+
status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
(void **)&rsv);
if (status != EFI_SUCCESS) {
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 8830fa601e45..0c0d2312f4a8 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -158,6 +158,10 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
return efi_status;
}
}
+
+ /* shrink the FDT back to its minimum size */
+ fdt_pack(fdt);
+
return EFI_SUCCESS;
fdt_set_fail:
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index fa2904fb841f..38b686c67b17 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -118,6 +118,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data)
void __init efi_memmap_unmap(void)
{
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
if (!efi.memmap.late) {
unsigned long size;
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index a19d845bdb06..8903b9ccfc2b 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -67,7 +67,7 @@ struct efi_runtime_work efi_rts_work;
} \
\
init_completion(&efi_rts_work.efi_rts_comp); \
- INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \
+ INIT_WORK(&efi_rts_work.work, efi_call_rts); \
efi_rts_work.arg1 = _arg1; \
efi_rts_work.arg2 = _arg2; \
efi_rts_work.arg3 = _arg3; \
diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c
index b01ba4438501..31e891f00175 100644
--- a/drivers/gnss/serial.c
+++ b/drivers/gnss/serial.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/sched.h>
#include <linux/serdev.h>
#include <linux/slab.h>
@@ -63,7 +64,7 @@ static int gnss_serial_write_raw(struct gnss_device *gdev,
int ret;
/* write is only buffered synchronously */
- ret = serdev_device_write(serdev, buf, count, 0);
+ ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 79cb98950013..71d014edd167 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -16,6 +16,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/sched.h>
#include <linux/serdev.h>
#include <linux/slab.h>
#include <linux/wait.h>
@@ -83,7 +84,7 @@ static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
int ret;
/* write is only buffered synchronously */
- ret = serdev_device_write(serdev, buf, count, 0);
+ ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 8269cffc2967..6a50f9f59c90 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -35,8 +35,8 @@
#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
enum {
- GPIO_MOCKUP_DIR_OUT = 0,
- GPIO_MOCKUP_DIR_IN = 1,
+ GPIO_MOCKUP_DIR_IN = 0,
+ GPIO_MOCKUP_DIR_OUT = 1,
};
/*
@@ -131,7 +131,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
- return chip->lines[offset].dir;
+ return !chip->lines[offset].dir;
}
static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index bfe4c5c9f41c..e9600b556f39 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -268,8 +268,8 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
if (pxa_gpio_has_pinctrl()) {
ret = pinctrl_gpio_direction_input(chip->base + offset);
- if (!ret)
- return 0;
+ if (ret)
+ return ret;
}
spin_lock_irqsave(&gpio_lock, flags);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 230e41562462..a2cbb474901c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1295,7 +1295,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
if (!gdev->descs) {
status = -ENOMEM;
- goto err_free_gdev;
+ goto err_free_ida;
}
if (chip->ngpio == 0) {
@@ -1427,8 +1427,9 @@ err_free_label:
kfree_const(gdev->label);
err_free_descs:
kfree(gdev->descs);
-err_free_gdev:
+err_free_ida:
ida_simple_remove(&gpio_ida, gdev->id);
+err_free_gdev:
/* failures here can mean systems won't boot... */
pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
gdev->base, gdev->base + gdev->ngpio - 1,
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index bc6a16a3c36e..ce8d1d384319 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -10,8 +10,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_scatter.o drm_pci.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
- drm_info.o drm_encoder_slave.o \
- drm_trace_points.o drm_global.o drm_prime.o \
+ drm_encoder_slave.o \
+ drm_trace_points.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
drm_modeset_lock.o drm_atomic.o drm_bridge.o \
drm_framebuffer.o drm_connector.o drm_blend.o \
@@ -32,11 +32,12 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
- drm_scdc_helper.o drm_gem_framebuffer_helper.o
+ drm_scdc_helper.o drm_gem_framebuffer_helper.o \
+ drm_atomic_state_helper.o drm_damage_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 138cb787d27e..f76bcb9c45e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -53,7 +53,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
- amdgpu_gmc.o amdgpu_xgmi.o
+ amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@@ -105,6 +105,7 @@ amdgpu-y += \
# add GFX block
amdgpu-y += \
amdgpu_gfx.o \
+ amdgpu_rlc.o \
gfx_v8_0.o \
gfx_v9_0.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d0102cfc8efb..88db3c263e5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -75,11 +75,14 @@
#include "amdgpu_sdma.h"
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
+#include "amdgpu_csa.h"
#include "amdgpu_gart.h"
#include "amdgpu_debugfs.h"
#include "amdgpu_job.h"
#include "amdgpu_bo_list.h"
#include "amdgpu_gem.h"
+#include "amdgpu_doorbell.h"
+#include "amdgpu_amdkfd.h"
#define MAX_GPU_INSTANCE 16
@@ -151,6 +154,7 @@ extern int amdgpu_compute_multipipe;
extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
+extern uint amdgpu_dc_feature_mask;
extern struct amdgpu_mgpu_info mgpu_info;
#ifdef CONFIG_DRM_AMDGPU_SI
@@ -160,6 +164,7 @@ extern int amdgpu_si_support;
extern int amdgpu_cik_support;
#endif
+#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
@@ -359,123 +364,6 @@ int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
/*
- * GPU doorbell structures, functions & helpers
- */
-typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
-{
- AMDGPU_DOORBELL_KIQ = 0x000,
- AMDGPU_DOORBELL_HIQ = 0x001,
- AMDGPU_DOORBELL_DIQ = 0x002,
- AMDGPU_DOORBELL_MEC_RING0 = 0x010,
- AMDGPU_DOORBELL_MEC_RING1 = 0x011,
- AMDGPU_DOORBELL_MEC_RING2 = 0x012,
- AMDGPU_DOORBELL_MEC_RING3 = 0x013,
- AMDGPU_DOORBELL_MEC_RING4 = 0x014,
- AMDGPU_DOORBELL_MEC_RING5 = 0x015,
- AMDGPU_DOORBELL_MEC_RING6 = 0x016,
- AMDGPU_DOORBELL_MEC_RING7 = 0x017,
- AMDGPU_DOORBELL_GFX_RING0 = 0x020,
- AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
- AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
- AMDGPU_DOORBELL_IH = 0x1E8,
- AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
- AMDGPU_DOORBELL_INVALID = 0xFFFF
-} AMDGPU_DOORBELL_ASSIGNMENT;
-
-struct amdgpu_doorbell {
- /* doorbell mmio */
- resource_size_t base;
- resource_size_t size;
- u32 __iomem *ptr;
- u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
-};
-
-/*
- * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
- */
-typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
-{
- /*
- * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
- * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
- * Compute related doorbells are allocated from 0x00 to 0x8a
- */
-
-
- /* kernel scheduling */
- AMDGPU_DOORBELL64_KIQ = 0x00,
-
- /* HSA interface queue and debug queue */
- AMDGPU_DOORBELL64_HIQ = 0x01,
- AMDGPU_DOORBELL64_DIQ = 0x02,
-
- /* Compute engines */
- AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
- AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
- AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
- AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
- AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
- AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
- AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
- AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
-
- /* User queue doorbell range (128 doorbells) */
- AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
- AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
-
- /* Graphics engine */
- AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
-
- /*
- * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
- * Graphics voltage island aperture 1
- * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
- */
-
- /* sDMA engines reserved from 0xe0 -oxef */
- AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1,
- AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xE9,
-
- /* For vega10 sriov, the sdma doorbell must be fixed as follow
- * to keep the same setting with host driver, or it will
- * happen conflicts
- */
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 = 0xF0,
- AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 = 0xF2,
- AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
-
- /* Interrupt handler */
- AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
- AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
- AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
-
- /* VCN engine use 32 bits doorbell */
- AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
- AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
- AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
- AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
-
- /* overlap the doorbell assignment with VCN as they are mutually exclusive
- * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
- */
- AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
- AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
- AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
- AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
-
- AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
- AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
- AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
- AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
-
- AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
- AMDGPU_DOORBELL64_INVALID = 0xFFFF
-} AMDGPU_DOORBELL64_ASSIGNMENT;
-
-/*
* IRQS.
*/
@@ -652,6 +540,8 @@ struct amdgpu_asic_funcs {
struct amdgpu_ring *ring);
/* check if the asic needs a full reset of if soft reset will work */
bool (*need_full_reset)(struct amdgpu_device *adev);
+ /* initialize doorbell layout for specific asic*/
+ void (*init_doorbell_index)(struct amdgpu_device *adev);
};
/*
@@ -830,7 +720,6 @@ struct amdgpu_device {
bool need_dma32;
bool need_swiotlb;
bool accel_working;
- struct work_struct reset_work;
struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
@@ -975,6 +864,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* KFD */
+ struct amdgpu_kfd_dev kfd;
+
/* display related functionality */
struct amdgpu_display_manager dm;
@@ -988,9 +880,6 @@ struct amdgpu_device {
atomic64_t visible_pin_size;
atomic64_t gart_pin_size;
- /* amdkfd interface */
- struct kfd_dev *kfd;
-
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
@@ -1022,6 +911,10 @@ struct amdgpu_device {
unsigned long last_mm_index;
bool in_gpu_reset;
struct mutex lock_reset;
+ struct amdgpu_doorbell_index doorbell_index;
+
+ int asic_reset_res;
+ struct work_struct xgmi_reset_work;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1046,11 +939,6 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
-u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
-u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
-
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
@@ -1112,11 +1000,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
-#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
-#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
-#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
-#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
-
#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
@@ -1158,6 +1041,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
+#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
/* Common functions */
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
@@ -1218,12 +1102,6 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-
-/*
- * functions used by amdgpu_xgmi.c
- */
-int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
-
/*
* functions used by amdgpu_encoder.c
*/
@@ -1251,6 +1129,9 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
+
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+ struct amdgpu_dm_backlight_caps *caps);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 7f0afc526419..4376b17ca594 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -41,28 +41,21 @@ struct amdgpu_atif_notification_cfg {
};
struct amdgpu_atif_notifications {
- bool display_switch;
- bool expansion_mode_change;
bool thermal_state;
bool forced_power_state;
bool system_power_state;
- bool display_conf_change;
- bool px_gfx_switch;
bool brightness_change;
bool dgpu_display_event;
+ bool gpu_package_power_limit;
};
struct amdgpu_atif_functions {
bool system_params;
bool sbios_requests;
- bool select_active_disp;
- bool lid_state;
- bool get_tv_standard;
- bool set_tv_standard;
- bool get_panel_expansion_mode;
- bool set_panel_expansion_mode;
bool temperature_change;
- bool graphics_device_types;
+ bool query_backlight_transfer_characteristics;
+ bool ready_to_undock;
+ bool external_gpu_information;
};
struct amdgpu_atif {
@@ -72,6 +65,7 @@ struct amdgpu_atif {
struct amdgpu_atif_functions functions;
struct amdgpu_atif_notification_cfg notification_cfg;
struct amdgpu_encoder *encoder_for_bl;
+ struct amdgpu_dm_backlight_caps backlight_caps;
};
/* Call the ATIF method
@@ -137,15 +131,12 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
*/
static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask)
{
- n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
- n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
- n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
- n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+ n->gpu_package_power_limit = mask & ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED;
}
/**
@@ -162,14 +153,11 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
{
f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
- f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
- f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
- f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
- f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
- f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
- f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
- f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+ f->query_backlight_transfer_characteristics =
+ mask & ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED;
+ f->ready_to_undock = mask & ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED;
+ f->external_gpu_information = mask & ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED;
}
/**
@@ -311,6 +299,65 @@ out:
}
/**
+ * amdgpu_atif_query_backlight_caps - get min and max backlight input signal
+ *
+ * @handle: acpi handle
+ *
+ * Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function
+ * to determine the acceptable range of backlight values
+ *
+ * Backlight_caps.caps_valid will be set to true if the query is successful
+ *
+ * The input signals are in range 0-255
+ *
+ * This function assumes the display with backlight is the first LCD
+ *
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
+{
+ union acpi_object *info;
+ struct atif_qbtc_output characteristics;
+ struct atif_qbtc_arguments arguments;
+ struct acpi_buffer params;
+ size_t size;
+ int err = 0;
+
+ arguments.size = sizeof(arguments);
+ arguments.requested_display = ATIF_QBTC_REQUEST_LCD1;
+
+ params.length = sizeof(arguments);
+ params.pointer = (void *)&arguments;
+
+ info = amdgpu_atif_call(atif,
+ ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS,
+ &params);
+ if (!info) {
+ err = -EIO;
+ goto out;
+ }
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 10) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(&characteristics, 0, sizeof(characteristics));
+ size = min(sizeof(characteristics), size);
+ memcpy(&characteristics, info->buffer.pointer, size);
+
+ atif->backlight_caps.caps_valid = true;
+ atif->backlight_caps.min_input_signal =
+ characteristics.min_input_signal;
+ atif->backlight_caps.max_input_signal =
+ characteristics.max_input_signal;
+out:
+ kfree(info);
+ return err;
+}
+
+/**
* amdgpu_atif_get_sbios_requests - get requested sbios event
*
* @handle: acpi handle
@@ -799,6 +846,17 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
}
+ if (atif->functions.query_backlight_transfer_characteristics) {
+ ret = amdgpu_atif_query_backlight_caps(atif);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS failed: %d\n",
+ ret);
+ atif->backlight_caps.caps_valid = false;
+ }
+ } else {
+ atif->backlight_caps.caps_valid = false;
+ }
+
out:
adev->acpi_nb.notifier_call = amdgpu_acpi_event;
register_acpi_notifier(&adev->acpi_nb);
@@ -806,6 +864,18 @@ out:
return ret;
}
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+ struct amdgpu_dm_backlight_caps *caps)
+{
+ if (!adev->atif) {
+ caps->caps_valid = false;
+ return;
+ }
+ caps->caps_valid = adev->atif->backlight_caps.caps_valid;
+ caps->min_input_signal = adev->atif->backlight_caps.min_input_signal;
+ caps->max_input_signal = adev->atif->backlight_caps.max_input_signal;
+}
+
/**
* amdgpu_acpi_fini - tear down driver acpi support
*
@@ -816,6 +886,5 @@ out:
void amdgpu_acpi_fini(struct amdgpu_device *adev)
{
unregister_acpi_notifier(&adev->acpi_nb);
- if (adev->atif)
- kfree(adev->atif);
+ kfree(adev->atif);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index c31a8849e9f8..2dfaf158ef07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -26,15 +26,26 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include <linux/module.h>
+#include <linux/dma-buf.h>
const struct kgd2kfd_calls *kgd2kfd;
static const unsigned int compute_vmid_bitmap = 0xFF00;
+/* Total memory size in system memory and all GPU VRAM. Used to
+ * estimate worst case amount of memory to reserve for page tables
+ */
+uint64_t amdgpu_amdkfd_total_mem_size;
+
int amdgpu_amdkfd_init(void)
{
+ struct sysinfo si;
int ret;
+ si_meminfo(&si);
+ amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
+ amdgpu_amdkfd_total_mem_size *= si.mem_unit;
+
#ifdef CONFIG_HSA_AMD
ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
if (ret)
@@ -73,9 +84,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
@@ -85,8 +98,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
return;
}
- adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->kfd.dev = kgd2kfd->probe((struct kgd_dev *)adev,
+ adev->pdev, kfd2kgd);
+
+ if (adev->kfd.dev)
+ amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
}
/**
@@ -126,7 +142,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i, n;
int last_valid_bit;
- if (adev->kfd) {
+
+ if (adev->kfd.dev) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = compute_vmid_bitmap,
.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
@@ -144,7 +161,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
KGD_MAX_QUEUES);
/* remove the KIQ bit as well */
- if (adev->gfx.kiq.ring.ready)
+ if (adev->gfx.kiq.ring.sched.ready)
clear_bit(amdgpu_gfx_queue_to_bit(adev,
adev->gfx.kiq.ring.me - 1,
adev->gfx.kiq.ring.pipe,
@@ -165,7 +182,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
&gpu_resources.doorbell_start_offset);
if (adev->asic_type < CHIP_VEGA10) {
- kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
return;
}
@@ -179,25 +196,14 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
* process in case of 64-bit doorbells so we
* can use each doorbell assignment twice.
*/
- if (adev->asic_type == CHIP_VEGA10) {
- gpu_resources.sdma_doorbell[0][i] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
- gpu_resources.sdma_doorbell[0][i+1] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i+1] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
- } else {
- gpu_resources.sdma_doorbell[0][i] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
- gpu_resources.sdma_doorbell[0][i+1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i+1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
- }
+ gpu_resources.sdma_doorbell[0][i] =
+ adev->doorbell_index.sdma_engine0 + (i >> 1);
+ gpu_resources.sdma_doorbell[0][i+1] =
+ adev->doorbell_index.sdma_engine0 + 0x200 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i] =
+ adev->doorbell_index.sdma_engine1 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i+1] =
+ adev->doorbell_index.sdma_engine1 + 0x200 + (i >> 1);
}
/* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
* SDMA, IH and VCN. So don't use them for the CP.
@@ -205,37 +211,37 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
gpu_resources.reserved_doorbell_mask = 0x1e0;
gpu_resources.reserved_doorbell_val = 0x0e0;
- kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
}
}
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
{
- if (adev->kfd) {
- kgd2kfd->device_exit(adev->kfd);
- adev->kfd = NULL;
+ if (adev->kfd.dev) {
+ kgd2kfd->device_exit(adev->kfd.dev);
+ adev->kfd.dev = NULL;
}
}
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry)
{
- if (adev->kfd)
- kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
+ if (adev->kfd.dev)
+ kgd2kfd->interrupt(adev->kfd.dev, ih_ring_entry);
}
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
{
- if (adev->kfd)
- kgd2kfd->suspend(adev->kfd);
+ if (adev->kfd.dev)
+ kgd2kfd->suspend(adev->kfd.dev);
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->resume(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->resume(adev->kfd.dev);
return r;
}
@@ -244,8 +250,8 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->pre_reset(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->pre_reset(adev->kfd.dev);
return r;
}
@@ -254,8 +260,8 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->post_reset(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->post_reset(adev->kfd.dev);
return r;
}
@@ -268,9 +274,9 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
amdgpu_device_gpu_recover(adev, NULL);
}
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr, bool mqd_gfx9)
+int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr, bool mqd_gfx9)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
@@ -340,7 +346,7 @@ allocate_mem_reserve_bo_failed:
return r;
}
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
{
struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
@@ -351,8 +357,8 @@ void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
amdgpu_bo_unref(&(bo));
}
-void get_local_mem_info(struct kgd_dev *kgd,
- struct kfd_local_mem_info *mem_info)
+void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
@@ -383,7 +389,7 @@ void get_local_mem_info(struct kgd_dev *kgd,
mem_info->mem_clk_max = 100;
}
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -392,7 +398,7 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
return 0;
}
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -405,7 +411,7 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
return 100;
}
-void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
+void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
@@ -428,6 +434,62 @@ void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
cu_info->lds_size = acu_info.lds_size;
}
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dma_buf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint64_t metadata_flags;
+ int r = -EINVAL;
+
+ dma_buf = dma_buf_get(dma_buf_fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* Can't handle non-graphics buffers */
+ goto out_put;
+
+ obj = dma_buf->priv;
+ if (obj->dev->driver != adev->ddev->driver)
+ /* Can't handle buffers from different drivers */
+ goto out_put;
+
+ adev = obj->dev->dev_private;
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ goto out_put;
+
+ r = 0;
+ if (dma_buf_kgd)
+ *dma_buf_kgd = (struct kgd_dev *)adev;
+ if (bo_size)
+ *bo_size = amdgpu_bo_size(bo);
+ if (metadata_size)
+ *metadata_size = bo->metadata_size;
+ if (metadata_buffer)
+ r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
+ metadata_size, &metadata_flags);
+ if (flags) {
+ *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ *flags |= ALLOC_MEM_FLAGS_PUBLIC;
+ }
+
+out_put:
+ dma_buf_put(dma_buf);
+ return r;
+}
+
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -501,13 +563,16 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- amdgpu_dpm_switch_power_profile(adev,
- PP_SMC_POWER_PROFILE_COMPUTE, !idle);
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->switch_power_profile)
+ amdgpu_dpm_switch_power_profile(adev,
+ PP_SMC_POWER_PROFILE_COMPUTE,
+ !idle);
}
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
{
- if (adev->kfd) {
+ if (adev->kfd.dev) {
if ((1 << vmid) & compute_vmid_bitmap)
return true;
}
@@ -521,7 +586,7 @@ bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
return false;
}
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 8e0d4f7196b4..70429f7aa9a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
#include <linux/workqueue.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -35,6 +34,7 @@
#include "amdgpu_vm.h"
extern const struct kgd2kfd_calls *kgd2kfd;
+extern uint64_t amdgpu_amdkfd_total_mem_size;
struct amdgpu_device;
@@ -77,6 +77,11 @@ struct amdgpu_amdkfd_fence {
char timeline_name[TASK_COMM_LEN];
};
+struct amdgpu_kfd_dev {
+ struct kfd_dev *dev;
+ uint64_t vram_used;
+};
+
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
@@ -134,16 +139,21 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
/* Shared API */
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr, bool mqd_gfx9);
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-void get_local_mem_info(struct kgd_dev *kgd,
- struct kfd_local_mem_info *mem_info);
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
-
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
-void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr, bool mqd_gfx9);
+void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info);
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
+
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dmabuf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
@@ -195,7 +205,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
struct kfd_vm_fault_info *info);
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+ struct dma_buf *dmabuf,
+ uint64_t va, void *vm,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset);
+
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 244d9834a381..ff7fac7df34b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -23,6 +23,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
@@ -173,13 +174,6 @@ static int get_tile_config(struct kgd_dev *kgd,
}
static const struct kfd2kgd_calls kfd2kgd = {
- .init_gtt_mem_allocation = alloc_gtt_mem,
- .free_gtt_mem = free_gtt_mem,
- .get_local_mem_info = get_local_mem_info,
- .get_gpu_clock_counter = get_gpu_clock_counter,
- .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_pasid_alloc,
- .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_interrupts = kgd_init_interrupts,
@@ -200,28 +194,10 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
- .get_cu_info = get_cu_info,
- .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
- .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
- .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
- .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
- .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
- .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
- .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
- .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
- .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
- .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
- .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
- .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .submit_ib = amdgpu_amdkfd_submit_ib,
- .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
- .gpu_recover = amdgpu_amdkfd_gpu_reset,
- .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 9f149914ad6c..56ea929f524b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -24,6 +24,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
@@ -128,13 +129,6 @@ static int get_tile_config(struct kgd_dev *kgd,
}
static const struct kfd2kgd_calls kfd2kgd = {
- .init_gtt_mem_allocation = alloc_gtt_mem,
- .free_gtt_mem = free_gtt_mem,
- .get_local_mem_info = get_local_mem_info,
- .get_gpu_clock_counter = get_gpu_clock_counter,
- .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_pasid_alloc,
- .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_interrupts = kgd_init_interrupts,
@@ -157,27 +151,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
- .get_cu_info = get_cu_info,
- .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
- .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
- .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
- .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
- .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
- .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
- .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
- .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
- .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
- .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
- .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
- .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .submit_ib = amdgpu_amdkfd_submit_ib,
- .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
- .gpu_recover = amdgpu_amdkfd_gpu_reset,
- .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 42cb4c4e0929..5c51d4910650 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -26,6 +26,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
@@ -46,38 +47,9 @@
#include "v9_structs.h"
#include "soc15.h"
#include "soc15d.h"
+#include "mmhub_v1_0.h"
+#include "gfxhub_v1_0.h"
-/* HACK: MMHUB and GC both have VM-related register with the same
- * names but different offsets. Define the MMHUB register we need here
- * with a prefix. A proper solution would be to move the functions
- * programming these registers into gfx_v9_0.c and mmhub_v1_0.c
- * respectively.
- */
-#define mmMMHUB_VM_INVALIDATE_ENG16_REQ 0x06f3
-#define mmMMHUB_VM_INVALIDATE_ENG16_REQ_BASE_IDX 0
-
-#define mmMMHUB_VM_INVALIDATE_ENG16_ACK 0x0705
-#define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX 0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x072b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x072c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x074b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x074c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x076b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x076c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
-
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0727
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0728
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0
#define V9_PIPE_PER_MEC (4)
#define V9_QUEUES_PER_PIPE_MEC (8)
@@ -167,13 +139,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
}
static const struct kfd2kgd_calls kfd2kgd = {
- .init_gtt_mem_allocation = alloc_gtt_mem,
- .free_gtt_mem = free_gtt_mem,
- .get_local_mem_info = get_local_mem_info,
- .get_gpu_clock_counter = get_gpu_clock_counter,
- .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_pasid_alloc,
- .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_interrupts = kgd_init_interrupts,
@@ -196,26 +161,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = amdgpu_amdkfd_get_tile_config,
- .get_cu_info = get_cu_info,
- .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
- .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
- .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
- .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
- .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
- .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
- .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
- .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
- .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
- .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
- .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
- .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .submit_ib = amdgpu_amdkfd_submit_ib,
- .gpu_recover = amdgpu_amdkfd_gpu_reset,
- .set_compute_idle = amdgpu_amdkfd_set_compute_idle,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
@@ -785,15 +733,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- uint32_t req = (1 << vmid) |
- (0 << VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT) | /* legacy */
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK |
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK |
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK |
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK |
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK;
-
- mutex_lock(&adev->srbm_mutex);
/* Use legacy mode tlb invalidation.
*
@@ -810,34 +749,7 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
* TODO 2: support range-based invalidation, requires kfg2kgd
* interface change
*/
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
- 0xffffffff);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
- 0x0000001f);
-
- WREG32(SOC15_REG_OFFSET(MMHUB, 0,
- mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
- 0xffffffff);
- WREG32(SOC15_REG_OFFSET(MMHUB, 0,
- mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
- 0x0000001f);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_REQ), req);
-
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_INVALIDATE_ENG16_REQ),
- req);
-
- while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ACK)) &
- (1 << vmid)))
- cpu_relax();
-
- while (!(RREG32(SOC15_REG_OFFSET(MMHUB, 0,
- mmMMHUB_VM_INVALIDATE_ENG16_ACK)) &
- (1 << vmid)))
- cpu_relax();
-
- mutex_unlock(&adev->srbm_mutex);
-
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0);
}
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
@@ -876,7 +788,7 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
if (adev->in_gpu_reset)
return -EIO;
- if (ring->ready)
+ if (ring->sched.ready)
return invalidate_tlbs_with_kiq(adev, pasid);
for (vmid = 0; vmid < 16; vmid++) {
@@ -1016,7 +928,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint64_t base = page_table_base | AMDGPU_PTE_VALID;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
@@ -1028,25 +939,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
* now, all processes share the same address space size, like
* on GFX8 and older.
*/
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
- upper_32_bits(adev->vm_manager.max_pfn - 1));
-
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
- upper_32_bits(adev->vm_manager.max_pfn - 1));
+ mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
+ gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index df0a059565f9..be1ab43473c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
+#include <linux/dma-buf.h>
#include <drm/drmP.h>
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
@@ -46,9 +47,9 @@
/* Impose limit on how much memory KFD can use */
static struct {
uint64_t max_system_mem_limit;
- uint64_t max_userptr_mem_limit;
+ uint64_t max_ttm_mem_limit;
int64_t system_mem_used;
- int64_t userptr_mem_used;
+ int64_t ttm_mem_used;
spinlock_t mem_limit_lock;
} kfd_mem_limit;
@@ -90,8 +91,8 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
}
/* Set memory usage limits. Current, limits are
- * System (kernel) memory - 3/8th System RAM
- * Userptr memory - 3/4th System RAM
+ * System (TTM + userptr) memory - 3/4th System RAM
+ * TTM memory - 3/8th System RAM
*/
void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
{
@@ -103,48 +104,61 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
mem *= si.mem_unit;
spin_lock_init(&kfd_mem_limit.mem_limit_lock);
- kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
- kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
- pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
+ kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
+ kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
+ pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
(kfd_mem_limit.max_system_mem_limit >> 20),
- (kfd_mem_limit.max_userptr_mem_limit >> 20));
+ (kfd_mem_limit.max_ttm_mem_limit >> 20));
}
-static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain)
+static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 domain, bool sg)
{
- size_t acc_size;
+ size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
+ uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
int ret = 0;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
sizeof(struct amdgpu_bo));
- spin_lock(&kfd_mem_limit.mem_limit_lock);
+ vram_needed = 0;
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- if (kfd_mem_limit.system_mem_used + (acc_size + size) >
- kfd_mem_limit.max_system_mem_limit) {
- ret = -ENOMEM;
- goto err_no_mem;
- }
- kfd_mem_limit.system_mem_used += (acc_size + size);
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
- if ((kfd_mem_limit.system_mem_used + acc_size >
- kfd_mem_limit.max_system_mem_limit) ||
- (kfd_mem_limit.userptr_mem_used + (size + acc_size) >
- kfd_mem_limit.max_userptr_mem_limit)) {
- ret = -ENOMEM;
- goto err_no_mem;
- }
- kfd_mem_limit.system_mem_used += acc_size;
- kfd_mem_limit.userptr_mem_used += size;
+ /* TTM GTT memory */
+ system_mem_needed = acc_size + size;
+ ttm_mem_needed = acc_size + size;
+ } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+ /* Userptr */
+ system_mem_needed = acc_size + size;
+ ttm_mem_needed = acc_size;
+ } else {
+ /* VRAM and SG */
+ system_mem_needed = acc_size;
+ ttm_mem_needed = acc_size;
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+ vram_needed = size;
+ }
+
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+
+ if ((kfd_mem_limit.system_mem_used + system_mem_needed >
+ kfd_mem_limit.max_system_mem_limit) ||
+ (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+ kfd_mem_limit.max_ttm_mem_limit) ||
+ (adev->kfd.vram_used + vram_needed >
+ adev->gmc.real_vram_size - reserved_for_pt)) {
+ ret = -ENOMEM;
+ } else {
+ kfd_mem_limit.system_mem_used += system_mem_needed;
+ kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
+ adev->kfd.vram_used += vram_needed;
}
-err_no_mem:
+
spin_unlock(&kfd_mem_limit.mem_limit_lock);
return ret;
}
-static void unreserve_system_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain)
+static void unreserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 domain, bool sg)
{
size_t acc_size;
@@ -154,35 +168,39 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
kfd_mem_limit.system_mem_used -= (acc_size + size);
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
+ kfd_mem_limit.ttm_mem_used -= (acc_size + size);
+ } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+ kfd_mem_limit.system_mem_used -= (acc_size + size);
+ kfd_mem_limit.ttm_mem_used -= acc_size;
+ } else {
kfd_mem_limit.system_mem_used -= acc_size;
- kfd_mem_limit.userptr_mem_used -= size;
+ kfd_mem_limit.ttm_mem_used -= acc_size;
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ adev->kfd.vram_used -= size;
+ WARN_ONCE(adev->kfd.vram_used < 0,
+ "kfd VRAM memory accounting unbalanced");
+ }
}
WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
"kfd system memory accounting unbalanced");
- WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
- "kfd userptr memory accounting unbalanced");
+ WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
+ "kfd TTM memory accounting unbalanced");
spin_unlock(&kfd_mem_limit.mem_limit_lock);
}
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
- spin_lock(&kfd_mem_limit.mem_limit_lock);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ u32 domain = bo->preferred_domains;
+ bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
- kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
- kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
- } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
- kfd_mem_limit.system_mem_used -=
- (bo->tbo.acc_size + amdgpu_bo_size(bo));
+ domain = AMDGPU_GEM_DOMAIN_CPU;
+ sg = false;
}
- WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
- "kfd system memory accounting unbalanced");
- WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
- "kfd userptr memory accounting unbalanced");
- spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
}
@@ -395,23 +413,6 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
return 0;
}
-static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f)
-{
- int ret = amdgpu_sync_fence(adev, sync, f, false);
-
- /* Sync objects can't handle multiple GPUs (contexts) updating
- * sync->last_vm_update. Fortunately we don't need it for
- * KFD's purposes, so we can just drop that fence.
- */
- if (sync->last_vm_update) {
- dma_fence_put(sync->last_vm_update);
- sync->last_vm_update = NULL;
- }
-
- return ret;
-}
-
static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
{
struct amdgpu_bo *pd = vm->root.base.bo;
@@ -422,7 +423,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
if (ret)
return ret;
- return sync_vm_fence(adev, sync, vm->last_update);
+ return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
}
/* add_bo_to_vm - Add a BO to a VM
@@ -536,7 +537,7 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
struct amdgpu_bo *bo = mem->bo;
INIT_LIST_HEAD(&entry->head);
- entry->shared = true;
+ entry->num_shared = 1;
entry->bo = &bo->tbo;
mutex_lock(&process_info->lock);
if (userptr)
@@ -677,7 +678,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
@@ -741,7 +742,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
@@ -826,7 +827,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
/* Add the eviction fence back */
amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
- sync_vm_fence(adev, sync, bo_va->last_pt_update);
+ amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
return 0;
}
@@ -851,7 +852,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
return ret;
}
- return sync_vm_fence(adev, sync, bo_va->last_pt_update);
+ return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
}
static int map_bo_to_gpuvm(struct amdgpu_device *adev,
@@ -886,6 +887,24 @@ update_gpuvm_pte_failed:
return ret;
}
+static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
+{
+ struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
+
+ if (!sg)
+ return NULL;
+ if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
+ kfree(sg);
+ return NULL;
+ }
+ sg->sgl->dma_address = addr;
+ sg->sgl->length = size;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->sgl->dma_length = size;
+#endif
+ return sg;
+}
+
static int process_validate_vms(struct amdkfd_process_info *process_info)
{
struct amdgpu_vm *peer_vm;
@@ -901,6 +920,26 @@ static int process_validate_vms(struct amdkfd_process_info *process_info)
return 0;
}
+static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
+ struct amdgpu_sync *sync)
+{
+ struct amdgpu_vm *peer_vm;
+ int ret;
+
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ struct amdgpu_bo *pd = peer_vm->root.base.bo;
+
+ ret = amdgpu_sync_resv(NULL,
+ sync, pd->tbo.resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int process_update_pds(struct amdkfd_process_info *process_info,
struct amdgpu_sync *sync)
{
@@ -1149,6 +1188,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
@@ -1177,13 +1218,25 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (!offset || !*offset)
return -EINVAL;
user_addr = *offset;
+ } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+ bo_type = ttm_bo_type_sg;
+ alloc_flags = 0;
+ if (size > UINT_MAX)
+ return -EINVAL;
+ sg = create_doorbell_sg(*offset, size);
+ if (!sg)
+ return -ENOMEM;
} else {
return -EINVAL;
}
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if (!*mem)
- return -ENOMEM;
+ if (!*mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
@@ -1199,7 +1252,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
byte_align = (adev->family == AMDGPU_FAMILY_VI &&
adev->asic_type != CHIP_FIJI &&
adev->asic_type != CHIP_POLARIS10 &&
- adev->asic_type != CHIP_POLARIS11) ?
+ adev->asic_type != CHIP_POLARIS11 &&
+ adev->asic_type != CHIP_POLARIS12) ?
VI_BO_SIZE_ALIGN : 1;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
@@ -1215,10 +1269,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
amdgpu_sync_create(&(*mem)->sync);
- ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
+ ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
if (ret) {
pr_debug("Insufficient system memory\n");
- goto err_reserve_system_mem;
+ goto err_reserve_limit;
}
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
@@ -1229,7 +1283,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
bp.byte_align = byte_align;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
- bp.type = ttm_bo_type_device;
+ bp.type = bo_type;
bp.resv = NULL;
ret = amdgpu_bo_create(adev, &bp, &bo);
if (ret) {
@@ -1237,6 +1291,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
domain_string(alloc_domain), ret);
goto err_bo_create;
}
+ if (bo_type == ttm_bo_type_sg) {
+ bo->tbo.sg = sg;
+ bo->tbo.ttm->sg = sg;
+ }
bo->kfd_bo = *mem;
(*mem)->bo = bo;
if (user_addr)
@@ -1266,12 +1324,17 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
allocate_init_user_pages_failed:
amdgpu_bo_unref(&bo);
/* Don't unreserve system mem limit twice */
- goto err_reserve_system_mem;
+ goto err_reserve_limit;
err_bo_create:
- unreserve_system_mem_limit(adev, size, alloc_domain);
-err_reserve_system_mem:
+ unreserve_mem_limit(adev, size, alloc_domain, !!sg);
+err_reserve_limit:
mutex_destroy(&(*mem)->lock);
kfree(*mem);
+err:
+ if (sg) {
+ sg_free_table(sg);
+ kfree(sg);
+ }
return ret;
}
@@ -1341,6 +1404,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
/* Free the sync object */
amdgpu_sync_free(&mem->sync);
+ /* If the SG is not NULL, it's one we created for a doorbell
+ * BO. We need to free it.
+ */
+ if (mem->bo->tbo.sg) {
+ sg_free_table(mem->bo->tbo.sg);
+ kfree(mem->bo->tbo.sg);
+ }
+
/* Free the BO*/
amdgpu_bo_unref(&mem->bo);
mutex_destroy(&mem->lock);
@@ -1405,7 +1476,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* the queues are still stopped and we can leave mapping for
* the next restore worker
*/
- if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
+ bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
if (check_if_add_bo_to_vm(avm, mem)) {
@@ -1642,6 +1714,60 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
return 0;
}
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+ struct dma_buf *dma_buf,
+ uint64_t va, void *vm,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* Can't handle non-graphics buffers */
+ return -EINVAL;
+
+ obj = dma_buf->priv;
+ if (obj->dev->dev_private != adev)
+ /* Can't handle buffers from other devices */
+ return -EINVAL;
+
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ return -EINVAL;
+
+ *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if (!*mem)
+ return -ENOMEM;
+
+ if (size)
+ *size = amdgpu_bo_size(bo);
+
+ if (mmap_offset)
+ *mmap_offset = amdgpu_bo_mmap_offset(bo);
+
+ INIT_LIST_HEAD(&(*mem)->bo_va_list);
+ mutex_init(&(*mem)->lock);
+ (*mem)->mapping_flags =
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
+
+ (*mem)->bo = amdgpu_bo_ref(bo);
+ (*mem)->va = va;
+ (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
+ (*mem)->mapped_to_gpu_memory = 0;
+ (*mem)->process_info = avm->process_info;
+ add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
+ amdgpu_sync_create(&(*mem)->sync);
+
+ return 0;
+}
+
/* Evict a userptr BO by stopping the queues if necessary
*
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it
@@ -1808,7 +1934,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
validate_list.head) {
list_add_tail(&mem->resv_list.head, &resv_list);
mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.shared = mem->validate_list.shared;
+ mem->resv_list.num_shared = mem->validate_list.num_shared;
}
/* Reserve all BOs and page tables for validation */
@@ -2027,7 +2153,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
list_add_tail(&mem->resv_list.head, &ctx.list);
mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.shared = mem->validate_list.shared;
+ mem->resv_list.num_shared = mem->validate_list.num_shared;
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
@@ -2044,13 +2170,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
if (ret)
goto validate_map_fail;
- /* Wait for PD/PTs validate to finish */
- /* FIXME: I think this isn't needed */
- list_for_each_entry(peer_vm, &process_info->vm_list_head,
- vm_list_node) {
- struct amdgpu_bo *bo = peer_vm->root.base.bo;
-
- ttm_bo_wait(&bo->tbo, false, false);
+ ret = process_sync_pds_resv(process_info, &sync_obj);
+ if (ret) {
+ pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
+ goto validate_map_fail;
}
/* Validate BOs and map them to GPUVM (update VM page tables). */
@@ -2066,7 +2189,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
pr_debug("Memory eviction: Validate BOs failed. Try again\n");
goto validate_map_fail;
}
-
+ ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
+ if (ret) {
+ pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+ goto validate_map_fail;
+ }
list_for_each_entry(bo_va_entry, &mem->bo_va_list,
bo_list) {
ret = update_gpuvm_pte((struct amdgpu_device *)
@@ -2087,6 +2214,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
goto validate_map_fail;
}
+ /* Wait for validate and PT updates to finish */
amdgpu_sync_wait(&sync_obj, false);
/* Release old eviction fence and create new one, because fence only
@@ -2105,10 +2233,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
process_info->eviction_fence = new_fence;
*ef = dma_fence_get(&new_fence->base);
- /* Wait for validate to finish and attach new eviction fence */
- list_for_each_entry(mem, &process_info->kfd_bo_list,
- validate_list.head)
- ttm_bo_wait(&mem->bo->tbo, false, false);
+ /* Attach new eviction fence to all BOs */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list.head)
amdgpu_bo_fence(mem->bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 14d2982a47cc..5c79da8e1150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -118,7 +118,6 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &bo->tbo;
- entry->tv.shared = !bo->prime_shared_count;
if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
list->gds_obj = bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 8816c697b205..ceadeeadfa56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
case CHIP_TOPAZ:
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
- ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
+ ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
} else
@@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (type == CGS_UCODE_ID_SMU) {
if (((adev->pdev->device == 0x67ef) &&
((adev->pdev->revision == 0xe0) ||
- (adev->pdev->revision == 0xe2) ||
(adev->pdev->revision == 0xe5))) ||
((adev->pdev->device == 0x67ff) &&
((adev->pdev->revision == 0xcf) ||
@@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xff)))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
- } else
+ } else if ((adev->pdev->device == 0x67ef) &&
+ (adev->pdev->revision == 0xe2)) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+ } else {
strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+ }
} else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
}
@@ -378,14 +384,31 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xef))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
- } else
+ } else if ((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe1) ||
+ (adev->pdev->revision == 0xf7))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+ } else {
strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+ }
} else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
}
break;
case CHIP_POLARIS12:
- strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ if (((adev->pdev->device == 0x6987) &&
+ ((adev->pdev->revision == 0xc0) ||
+ (adev->pdev->revision == 0xc3))) ||
+ ((adev->pdev->device == 0x6981) &&
+ ((adev->pdev->revision == 0x00) ||
+ (adev->pdev->revision == 0x01) ||
+ (adev->pdev->revision == 0x10)))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+ } else {
+ strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ }
break;
case CHIP_VEGAM:
strcpy(fw_name, "amdgpu/vegam_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 663043c8f0f5..5dc3ee372e2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -50,7 +50,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &bo->tbo;
- p->uf_entry.tv.shared = true;
+ /* One for TTM and one for the CS job */
+ p->uf_entry.tv.num_shared = 2;
p->uf_entry.user_pages = NULL;
drm_gem_object_put_unlocked(gobj);
@@ -598,6 +599,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
return r;
}
+ /* One for TTM and one for the CS job */
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->tv.num_shared = 2;
+
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
if (p->bo_list->first_userptr != p->bo_list->num_entries)
p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
@@ -717,8 +722,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
gws = p->bo_list->gws_obj;
oa = p->bo_list->oa_obj;
- amdgpu_bo_list_for_each_entry(e, p->bo_list)
- e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo));
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
+ /* Make sure we use the exclusive slot for shared BOs */
+ if (bo->prime_shared_count)
+ e->tv.num_shared = 0;
+ e->bo_va = amdgpu_vm_bo_find(vm, bo);
+ }
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
@@ -955,10 +966,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
- if (r)
- return r;
-
p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
if (amdgpu_vm_debug) {
@@ -1104,7 +1111,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
{
int r;
struct dma_fence *fence;
- r = drm_syncobj_find_fence(p->filp, handle, 0, &fence);
+ r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
if (r)
return r;
@@ -1193,7 +1200,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
int i;
for (i = 0; i < p->num_post_dep_syncobjs; ++i)
- drm_syncobj_replace_fence(p->post_dep_syncobjs[i], 0, p->fence);
+ drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
}
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
@@ -1260,8 +1267,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
return 0;
error_abort:
- dma_fence_put(&job->base.s_fence->finished);
- job->base.s_fence = NULL;
+ drm_sched_job_cleanup(&job->base);
amdgpu_mn_unlock(p->mn);
error_unlock:
@@ -1285,7 +1291,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, data);
if (r) {
- DRM_ERROR("Failed to initialize parser !\n");
+ DRM_ERROR("Failed to initialize parser %d!\n", r);
goto out;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
new file mode 100644
index 000000000000..7e22be7ca68a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+
+ * * Author: Monk.liu@amd.com
+ */
+
+#include "amdgpu.h"
+
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
+{
+ uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
+
+ addr -= AMDGPU_VA_RESERVED_SIZE;
+ addr = amdgpu_gmc_sign_extend(addr);
+
+ return addr;
+}
+
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+ u32 domain, uint32_t size)
+{
+ int r;
+ void *ptr;
+
+ r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+ domain, bo,
+ NULL, &ptr);
+ if (!*bo)
+ return -ENOMEM;
+
+ memset(ptr, 0, size);
+ return 0;
+}
+
+void amdgpu_free_static_csa(struct amdgpu_bo **bo)
+{
+ amdgpu_bo_free_kernel(bo, NULL, NULL);
+}
+
+/*
+ * amdgpu_map_static_csa should be called during amdgpu_vm_init
+ * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
+ * submission of GFX should use this virtual address within META_DATA init
+ * package to support SRIOV gfx preemption.
+ */
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+ uint64_t csa_addr, uint32_t size)
+{
+ struct ww_acquire_ctx ticket;
+ struct list_head list;
+ struct amdgpu_bo_list_entry pd;
+ struct ttm_validate_buffer csa_tv;
+ int r;
+
+ INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&csa_tv.head);
+ csa_tv.bo = &bo->tbo;
+ csa_tv.num_shared = 1;
+
+ list_add(&csa_tv.head, &list);
+ amdgpu_vm_get_pd_bo(vm, &list, &pd);
+
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ if (r) {
+ DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+ return r;
+ }
+
+ *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
+ if (!*bo_va) {
+ ttm_eu_backoff_reservation(&ticket, &list);
+ DRM_ERROR("failed to create bo_va for static CSA\n");
+ return -ENOMEM;
+ }
+
+ r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
+ size);
+ if (r) {
+ DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
+ amdgpu_vm_bo_rmv(adev, *bo_va);
+ ttm_eu_backoff_reservation(&ticket, &list);
+ return r;
+ }
+
+ r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
+ AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+ AMDGPU_PTE_EXECUTABLE);
+
+ if (r) {
+ DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+ amdgpu_vm_bo_rmv(adev, *bo_va);
+ ttm_eu_backoff_reservation(&ticket, &list);
+ return r;
+ }
+
+ ttm_eu_backoff_reservation(&ticket, &list);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
new file mode 100644
index 000000000000..524b4437a021
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Monk.liu@amd.com
+ */
+
+#ifndef AMDGPU_CSA_MANAGER_H
+#define AMDGPU_CSA_MANAGER_H
+
+#define AMDGPU_CSA_SIZE (128 * 1024)
+
+uint32_t amdgpu_get_total_csa_size(struct amdgpu_device *adev);
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+ u32 domain, uint32_t size);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+ uint64_t csa_addr, uint32_t size);
+void amdgpu_free_static_csa(struct amdgpu_bo **bo);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f9b54236102d..d85184b5b35c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_UVD_ENC] = 1,
[AMDGPU_HW_IP_VCN_DEC] = 1,
[AMDGPU_HW_IP_VCN_ENC] = 1,
+ [AMDGPU_HW_IP_VCN_JPEG] = 1,
};
static int amdgput_ctx_total_num_entities(void)
@@ -247,7 +248,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
return -ENOMEM;
mutex_lock(&mgr->lock);
- r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
+ r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
if (r < 0) {
mutex_unlock(&mgr->lock);
kfree(ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 30bc345d6fdf..b60afeade50a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -59,6 +59,8 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_pm.h"
+#include "amdgpu_xgmi.h"
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -513,6 +515,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
*/
static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
{
+
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
adev->doorbell.base = 0;
@@ -525,15 +528,26 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
return -EINVAL;
+ amdgpu_asic_init_doorbell_index(adev);
+
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
- AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
+ adev->doorbell_index.max_assignment+1);
if (adev->doorbell.num_doorbells == 0)
return -EINVAL;
+ /* For Vega, reserve and map two pages on doorbell BAR since SDMA
+ * paging queue doorbell use the second page. The
+ * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
+ * doorbells are in the first page. So with paging queue enabled,
+ * the max num_doorbells should + 1 page (0x400 in dword)
+ */
+ if (adev->asic_type >= CHIP_VEGA10)
+ adev->doorbell.num_doorbells += 0x400;
+
adev->doorbell.ptr = ioremap(adev->doorbell.base,
adev->doorbell.num_doorbells *
sizeof(u32));
@@ -1656,7 +1670,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* right after GMC hw init, we create CSA */
if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_allocate_static_csa(adev);
+ r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_CSA_SIZE);
if (r) {
DRM_ERROR("allocate CSA failed %d\n", r);
return r;
@@ -1681,7 +1697,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
return r;
- amdgpu_xgmi_add_device(adev);
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
if (amdgpu_sriov_vf(adev))
@@ -1848,6 +1865,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_remove_device(adev);
+
amdgpu_amdkfd_device_fini(adev);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
@@ -1890,7 +1910,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_ucode_free_bo(adev);
- amdgpu_free_static_csa(adev);
+ amdgpu_free_static_csa(&adev->virt.csa_obj);
amdgpu_device_wb_fini(adev);
amdgpu_device_vram_scratch_fini(adev);
}
@@ -2337,6 +2357,19 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
return amdgpu_device_asic_has_dc_support(adev->asic_type);
}
+
+static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
+{
+ struct amdgpu_device *adev =
+ container_of(__work, struct amdgpu_device, xgmi_reset_work);
+
+ adev->asic_reset_res = amdgpu_asic_reset(adev);
+ if (adev->asic_reset_res)
+ DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+ adev->asic_reset_res, adev->ddev->unique);
+}
+
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -2435,6 +2468,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
amdgpu_device_delay_enable_gfx_off);
+ INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+
adev->gfx.gfx_off_req_count = 1;
adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
@@ -2455,9 +2490,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- /* doorbell bar mapping */
- amdgpu_device_doorbell_init(adev);
-
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
@@ -2476,6 +2508,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* doorbell bar mapping and doorbell index init*/
+ amdgpu_device_doorbell_init(adev);
+
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
@@ -3148,86 +3183,6 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
return 0;
}
-/**
- * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
- *
- * @adev: amdgpu device pointer
- *
- * attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means succeeded otherwise failed
- */
-static int amdgpu_device_reset(struct amdgpu_device *adev)
-{
- bool need_full_reset, vram_lost = 0;
- int r;
-
- need_full_reset = amdgpu_device_ip_need_full_reset(adev);
-
- if (!need_full_reset) {
- amdgpu_device_ip_pre_soft_reset(adev);
- r = amdgpu_device_ip_soft_reset(adev);
- amdgpu_device_ip_post_soft_reset(adev);
- if (r || amdgpu_device_ip_check_soft_reset(adev)) {
- DRM_INFO("soft reset failed, will fallback to full reset!\n");
- need_full_reset = true;
- }
- }
-
- if (need_full_reset) {
- r = amdgpu_device_ip_suspend(adev);
-
-retry:
- r = amdgpu_asic_reset(adev);
- /* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
-
- if (!r) {
- dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_device_ip_resume_phase1(adev);
- if (r)
- goto out;
-
- vram_lost = amdgpu_device_check_vram_lost(adev);
- if (vram_lost) {
- DRM_ERROR("VRAM is lost!\n");
- atomic_inc(&adev->vram_lost_counter);
- }
-
- r = amdgpu_gtt_mgr_recover(
- &adev->mman.bdev.man[TTM_PL_TT]);
- if (r)
- goto out;
-
- r = amdgpu_device_fw_loading(adev);
- if (r)
- return r;
-
- r = amdgpu_device_ip_resume_phase2(adev);
- if (r)
- goto out;
-
- if (vram_lost)
- amdgpu_device_fill_reset_magic(adev);
- }
- }
-
-out:
- if (!r) {
- amdgpu_irq_gpu_reset_resume_helper(adev);
- r = amdgpu_ib_ring_tests(adev);
- if (r) {
- dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_device_ip_suspend(adev);
- need_full_reset = true;
- goto retry;
- }
- }
-
- if (!r)
- r = amdgpu_device_recover_vram(adev);
-
- return r;
-}
/**
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
@@ -3295,40 +3250,46 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
return false;
}
- if (amdgpu_gpu_recovery == 0 || (amdgpu_gpu_recovery == -1 &&
- !amdgpu_sriov_vf(adev))) {
- DRM_INFO("GPU recovery disabled.\n");
- return false;
- }
+ if (amdgpu_gpu_recovery == 0)
+ goto disabled;
- return true;
-}
+ if (amdgpu_sriov_vf(adev))
+ return true;
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu device pointer
- * @job: which job trigger hang
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Returns 0 for success or an error on failure.
- */
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
- struct amdgpu_job *job)
-{
- int i, r, resched;
+ if (amdgpu_gpu_recovery == -1) {
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_TOPAZ:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_VEGA20:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ break;
+ default:
+ goto disabled;
+ }
+ }
- dev_info(adev->dev, "GPU reset begin!\n");
+ return true;
- mutex_lock(&adev->lock_reset);
- atomic_inc(&adev->gpu_reset_counter);
- adev->in_gpu_reset = 1;
+disabled:
+ DRM_INFO("GPU recovery disabled.\n");
+ return false;
+}
- /* Block kfd */
- amdgpu_amdkfd_pre_reset(adev);
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ bool *need_full_reset_arg)
+{
+ int i, r = 0;
+ bool need_full_reset = *need_full_reset_arg;
/* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3348,10 +3309,144 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
amdgpu_fence_driver_force_completion(ring);
}
- if (amdgpu_sriov_vf(adev))
- r = amdgpu_device_reset_sriov(adev, job ? false : true);
- else
- r = amdgpu_device_reset(adev);
+
+
+ if (!amdgpu_sriov_vf(adev)) {
+
+ if (!need_full_reset)
+ need_full_reset = amdgpu_device_ip_need_full_reset(adev);
+
+ if (!need_full_reset) {
+ amdgpu_device_ip_pre_soft_reset(adev);
+ r = amdgpu_device_ip_soft_reset(adev);
+ amdgpu_device_ip_post_soft_reset(adev);
+ if (r || amdgpu_device_ip_check_soft_reset(adev)) {
+ DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ need_full_reset = true;
+ }
+ }
+
+ if (need_full_reset)
+ r = amdgpu_device_ip_suspend(adev);
+
+ *need_full_reset_arg = need_full_reset;
+ }
+
+ return r;
+}
+
+static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
+ struct list_head *device_list_handle,
+ bool *need_full_reset_arg)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ bool need_full_reset = *need_full_reset_arg, vram_lost = false;
+ int r = 0;
+
+ /*
+ * ASIC reset has to be done on all HGMI hive nodes ASAP
+ * to allow proper links negotiation in FW (within 1 sec)
+ */
+ if (need_full_reset) {
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ } else
+ r = amdgpu_asic_reset(tmp_adev);
+
+ if (r) {
+ DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
+ r, tmp_adev->ddev->unique);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all PSP resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, device_list_handle,
+ gmc.xgmi.head) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+ }
+ }
+
+
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (need_full_reset) {
+ /* post card */
+ if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
+ DRM_WARN("asic atom init failed!");
+
+ if (!r) {
+ dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_device_ip_resume_phase1(tmp_adev);
+ if (r)
+ goto out;
+
+ vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
+ if (vram_lost) {
+ DRM_ERROR("VRAM is lost!\n");
+ atomic_inc(&tmp_adev->vram_lost_counter);
+ }
+
+ r = amdgpu_gtt_mgr_recover(
+ &tmp_adev->mman.bdev.man[TTM_PL_TT]);
+ if (r)
+ goto out;
+
+ r = amdgpu_device_fw_loading(tmp_adev);
+ if (r)
+ return r;
+
+ r = amdgpu_device_ip_resume_phase2(tmp_adev);
+ if (r)
+ goto out;
+
+ if (vram_lost)
+ amdgpu_device_fill_reset_magic(tmp_adev);
+
+ /* Update PSP FW topology after reset */
+ if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ }
+ }
+
+
+out:
+ if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
+ r = amdgpu_device_ip_suspend(tmp_adev);
+ need_full_reset = true;
+ r = -EAGAIN;
+ goto end;
+ }
+ }
+
+ if (!r)
+ r = amdgpu_device_recover_vram(tmp_adev);
+ else
+ tmp_adev->asic_reset_res = r;
+ }
+
+end:
+ *need_full_reset_arg = need_full_reset;
+ return r;
+}
+
+static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -3363,7 +3458,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* or all rings (in the case @job is NULL)
* after above amdgpu_reset accomplished
*/
- if ((!job || job->base.sched == &ring->sched) && !r)
+ if ((!job || job->base.sched == &ring->sched) && !adev->asic_reset_res)
drm_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
@@ -3373,21 +3468,142 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
drm_helper_resume_force_mode(adev->ddev);
}
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+ adev->asic_reset_res = 0;
+}
- if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
- } else {
- dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
- }
+static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
+{
+ mutex_lock(&adev->lock_reset);
+ atomic_inc(&adev->gpu_reset_counter);
+ adev->in_gpu_reset = 1;
+ /* Block kfd */
+ amdgpu_amdkfd_pre_reset(adev);
+}
+static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
+{
/*unlock kfd */
amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
+}
+
+
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu device pointer
+ * @job: which job trigger hang
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int r;
+ struct amdgpu_hive_info *hive = NULL;
+ bool need_full_reset = false;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct list_head device_list, *device_list_handle = NULL;
+
+ INIT_LIST_HEAD(&device_list);
+
+ dev_info(adev->dev, "GPU reset begin!\n");
+
+ /*
+ * In case of XGMI hive disallow concurrent resets to be triggered
+ * by different nodes. No point also since the one node already executing
+ * reset will also reset all the other nodes in the hive.
+ */
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1 &&
+ !mutex_trylock(&hive->hive_lock))
+ return 0;
+
+ /* Start with adev pre asic reset first for soft reset check.*/
+ amdgpu_device_lock_adev(adev);
+ r = amdgpu_device_pre_asic_reset(adev,
+ job,
+ &need_full_reset);
+ if (r) {
+ /*TODO Should we stop ?*/
+ DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, adev->ddev->unique);
+ adev->asic_reset_res = r;
+ }
+
+ /* Build list of devices to reset */
+ if (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!hive) {
+ amdgpu_device_unlock_adev(adev);
+ return -ENODEV;
+ }
+
+ /*
+ * In case we are in XGMI hive mode device reset is done for all the
+ * nodes in the hive to retrain all XGMI links and hence the reset
+ * sequence is executed in loop on all nodes.
+ */
+ device_list_handle = &hive->device_list;
+ } else {
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
+
+retry: /* Rest of adevs pre asic reset from XGMI hive. */
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
+ if (tmp_adev == adev)
+ continue;
+
+ amdgpu_device_lock_adev(tmp_adev);
+ r = amdgpu_device_pre_asic_reset(tmp_adev,
+ NULL,
+ &need_full_reset);
+ /*TODO Should we stop ?*/
+ if (r) {
+ DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, tmp_adev->ddev->unique);
+ tmp_adev->asic_reset_res = r;
+ }
+ }
+
+ /* Actual ASIC resets if needed.*/
+ /* TODO Implement XGMI hive reset logic for SRIOV */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_reset_sriov(adev, job ? false : true);
+ if (r)
+ adev->asic_reset_res = r;
+ } else {
+ r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
+ if (r && r == -EAGAIN)
+ goto retry;
+ }
+
+ /* Post ASIC reset for all devs .*/
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL);
+
+ if (r) {
+ /* bad news, how to tell it to userspace ? */
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ } else {
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+ }
+
+ amdgpu_device_unlock_adev(tmp_adev);
+ }
+
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+ mutex_unlock(&hive->hive_lock);
+
+ if (r)
+ dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 6748cd7fc129..15ce7e681d67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -626,6 +626,18 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
"dither",
amdgpu_dither_enum_list, sz);
+ if (amdgpu_device_has_dc_support(adev)) {
+ adev->mode_info.max_bpc_property =
+ drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
+ if (!adev->mode_info.max_bpc_property)
+ return -ENOMEM;
+ adev->mode_info.abm_level_property =
+ drm_property_create_range(adev->ddev, 0,
+ "abm level", 0, 4);
+ if (!adev->mode_info.abm_level_property)
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -850,7 +862,12 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
if (in_vbl && (*vpos >= vbl_start)) {
vtotal = mode->crtc_vtotal;
- *vpos = *vpos - vtotal;
+
+ /* With variable refresh rate displays the vpos can exceed
+ * the vtotal value. Clamp to 0 to return -vbl_end instead
+ * of guessing the remaining number of lines until scanout.
+ */
+ *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
}
/* Correct for shifted end of vbl at vbl_end. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
new file mode 100644
index 000000000000..be620b29f4aa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * GPU doorbell structures, functions & helpers
+ */
+struct amdgpu_doorbell {
+ /* doorbell mmio */
+ resource_size_t base;
+ resource_size_t size;
+ u32 __iomem *ptr;
+ u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
+};
+
+/* Reserved doorbells for amdgpu (including multimedia).
+ * KFD can use all the rest in the 2M doorbell bar.
+ * For asic before vega10, doorbell is 32-bit, so the
+ * index/offset is in dword. For vega10 and after, doorbell
+ * can be 64-bit, so the index defined is in qword.
+ */
+struct amdgpu_doorbell_index {
+ uint32_t kiq;
+ uint32_t mec_ring0;
+ uint32_t mec_ring1;
+ uint32_t mec_ring2;
+ uint32_t mec_ring3;
+ uint32_t mec_ring4;
+ uint32_t mec_ring5;
+ uint32_t mec_ring6;
+ uint32_t mec_ring7;
+ uint32_t userqueue_start;
+ uint32_t userqueue_end;
+ uint32_t gfx_ring0;
+ uint32_t sdma_engine0;
+ uint32_t sdma_engine1;
+ uint32_t sdma_engine2;
+ uint32_t sdma_engine3;
+ uint32_t sdma_engine4;
+ uint32_t sdma_engine5;
+ uint32_t sdma_engine6;
+ uint32_t sdma_engine7;
+ uint32_t ih;
+ union {
+ struct {
+ uint32_t vcn_ring0_1;
+ uint32_t vcn_ring2_3;
+ uint32_t vcn_ring4_5;
+ uint32_t vcn_ring6_7;
+ } vcn;
+ struct {
+ uint32_t uvd_ring0_1;
+ uint32_t uvd_ring2_3;
+ uint32_t uvd_ring4_5;
+ uint32_t uvd_ring6_7;
+ uint32_t vce_ring0_1;
+ uint32_t vce_ring2_3;
+ uint32_t vce_ring4_5;
+ uint32_t vce_ring6_7;
+ } uvd_vce;
+ };
+ uint32_t max_assignment;
+};
+
+typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
+{
+ AMDGPU_DOORBELL_KIQ = 0x000,
+ AMDGPU_DOORBELL_HIQ = 0x001,
+ AMDGPU_DOORBELL_DIQ = 0x002,
+ AMDGPU_DOORBELL_MEC_RING0 = 0x010,
+ AMDGPU_DOORBELL_MEC_RING1 = 0x011,
+ AMDGPU_DOORBELL_MEC_RING2 = 0x012,
+ AMDGPU_DOORBELL_MEC_RING3 = 0x013,
+ AMDGPU_DOORBELL_MEC_RING4 = 0x014,
+ AMDGPU_DOORBELL_MEC_RING5 = 0x015,
+ AMDGPU_DOORBELL_MEC_RING6 = 0x016,
+ AMDGPU_DOORBELL_MEC_RING7 = 0x017,
+ AMDGPU_DOORBELL_GFX_RING0 = 0x020,
+ AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
+ AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
+ AMDGPU_DOORBELL_IH = 0x1E8,
+ AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
+ AMDGPU_DOORBELL_INVALID = 0xFFFF
+} AMDGPU_DOORBELL_ASSIGNMENT;
+
+typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
+{
+ /* Compute + GFX: 0~255 */
+ AMDGPU_VEGA20_DOORBELL_KIQ = 0x000,
+ AMDGPU_VEGA20_DOORBELL_HIQ = 0x001,
+ AMDGPU_VEGA20_DOORBELL_DIQ = 0x002,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING0 = 0x003,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING1 = 0x004,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING2 = 0x005,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING3 = 0x006,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING4 = 0x007,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING5 = 0x008,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING6 = 0x009,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING7 = 0x00A,
+ AMDGPU_VEGA20_DOORBELL_USERQUEUE_START = 0x00B,
+ AMDGPU_VEGA20_DOORBELL_USERQUEUE_END = 0x08A,
+ AMDGPU_VEGA20_DOORBELL_GFX_RING0 = 0x08B,
+ /* SDMA:256~335*/
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0 = 0x100,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1 = 0x10A,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2 = 0x114,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3 = 0x11E,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4 = 0x128,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5 = 0x132,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6 = 0x13C,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7 = 0x146,
+ /* IH: 376~391 */
+ AMDGPU_VEGA20_DOORBELL_IH = 0x178,
+ /* MMSCH: 392~407
+ * overlap the doorbell assignment with VCN as they are mutually exclusive
+ * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+ */
+ AMDGPU_VEGA20_DOORBELL64_VCN0_1 = 0x188, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+ AMDGPU_VEGA20_DOORBELL64_VCN2_3 = 0x189,
+ AMDGPU_VEGA20_DOORBELL64_VCN4_5 = 0x18A,
+ AMDGPU_VEGA20_DOORBELL64_VCN6_7 = 0x18B,
+
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1 = 0x188,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3 = 0x189,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5 = 0x18A,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7 = 0x18B,
+
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1 = 0x18C,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3 = 0x18D,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5 = 0x18E,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7 = 0x18F,
+ AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x18F,
+ AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF
+} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT;
+
+/*
+ * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
+ */
+typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
+{
+ /*
+ * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
+ * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
+ * Compute related doorbells are allocated from 0x00 to 0x8a
+ */
+
+
+ /* kernel scheduling */
+ AMDGPU_DOORBELL64_KIQ = 0x00,
+
+ /* HSA interface queue and debug queue */
+ AMDGPU_DOORBELL64_HIQ = 0x01,
+ AMDGPU_DOORBELL64_DIQ = 0x02,
+
+ /* Compute engines */
+ AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
+ AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
+ AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
+ AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
+ AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
+ AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
+ AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
+ AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
+
+ /* User queue doorbell range (128 doorbells) */
+ AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
+ AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
+
+ /* Graphics engine */
+ AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
+
+ /*
+ * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
+ * Graphics voltage island aperture 1
+ * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
+ */
+
+ /* For vega10 sriov, the sdma doorbell must be fixed as follow
+ * to keep the same setting with host driver, or it will
+ * happen conflicts
+ */
+ AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
+
+ /* Interrupt handler */
+ AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
+ AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
+ AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
+
+ /* VCN engine use 32 bits doorbell */
+ AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+ AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
+ AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
+ AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
+
+ /* overlap the doorbell assignment with VCN as they are mutually exclusive
+ * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+ */
+ AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
+ AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
+ AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
+ AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
+
+ AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
+ AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
+ AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
+ AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
+
+ AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
+ AMDGPU_DOORBELL64_INVALID = 0xFFFF
+} AMDGPU_DOORBELL64_ASSIGNMENT;
+
+u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
+u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+
+#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
+#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
+#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
+#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 943dbf3c5da1..90f474f98b6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1;
int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode = 0;
uint amdgpu_smu_memory_pool_size = 0;
+/* FBC (bit 0) disabled by default*/
+uint amdgpu_dc_feature_mask = 0;
+
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
@@ -451,9 +454,10 @@ module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
/**
* DOC: param_buf_per_se (int)
- * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx).
+ * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte.
+ * The default is 0 (depending on gfx).
*/
-MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
+MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)");
module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
/**
@@ -631,6 +635,14 @@ module_param(halt_if_hws_hang, int, 0644);
MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
#endif
+/**
+ * DOC: dcfeaturemask (uint)
+ * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * The default is the current set of stable display features.
+ */
+MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
+module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1209,9 +1221,6 @@ static struct drm_driver kms_driver = {
.patchlevel = KMS_DRIVER_PATCHLEVEL,
};
-static struct drm_driver *driver;
-static struct pci_driver *pdriver;
-
static struct pci_driver amdgpu_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
@@ -1241,16 +1250,14 @@ static int __init amdgpu_init(void)
goto error_fence;
DRM_INFO("amdgpu kernel modesetting enabled.\n");
- driver = &kms_driver;
- pdriver = &amdgpu_kms_pci_driver;
- driver->num_ioctls = amdgpu_max_kms_ioctl;
+ kms_driver.num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
amdgpu_amdkfd_init();
/* let modprobe override vga console setting */
- return pci_register_driver(pdriver);
+ return pci_register_driver(&amdgpu_kms_pci_driver);
error_fence:
amdgpu_sync_fini();
@@ -1262,7 +1269,7 @@ error_sync:
static void __exit amdgpu_exit(void)
{
amdgpu_amdkfd_fini();
- pci_unregister_driver(pdriver);
+ pci_unregister_driver(&amdgpu_kms_pci_driver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
amdgpu_fence_slab_fini();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 5448cf27654e..ee47c11e92ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -398,9 +398,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
ring->fence_drv.irq_type = irq_type;
ring->fence_drv.initialized = true;
- dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
- "cpu addr 0x%p\n", ring->idx,
- ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
+ DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr "
+ "0x%016llx, cpu addr 0x%p\n", ring->name,
+ ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 11fea28f8ad3..6d11e1721147 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -248,7 +248,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
}
mb();
amdgpu_asic_flush_hdp(adev, NULL);
- amdgpu_gmc_flush_gpu_tlb(adev, 0);
+ amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
return 0;
}
@@ -259,6 +259,8 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
* @offset: offset into the GPU's gart aperture
* @pages: number of pages to bind
* @dma_addr: DMA addresses of pages
+ * @flags: page table entry flags
+ * @dst: CPU address of the gart table
*
* Map the dma_addresses into GART entries (all asics).
* Returns 0 for success, -EINVAL for failure.
@@ -331,7 +333,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
mb();
amdgpu_asic_flush_hdp(adev, NULL);
- amdgpu_gmc_flush_gpu_tlb(adev, 0);
+ amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 9ff62887e4e3..afa2e2877d87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -41,6 +41,7 @@ struct amdgpu_bo;
struct amdgpu_gart {
struct amdgpu_bo *bo;
+ /* CPU kmapped address of gart table */
void *ptr;
unsigned num_gpu_pages;
unsigned num_cpu_pages;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7b3d1ebda9df..f4f00217546e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -169,7 +169,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
- tv.shared = true;
+ tv.num_shared = 1;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -604,7 +604,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
abo = gem_to_amdgpu_bo(gobj);
tv.bo = &abo->tbo;
- tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
+ if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+ tv.num_shared = 1;
+ else
+ tv.num_shared = 0;
list_add(&tv.head, &list);
} else {
gobj = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index d63daba9b17c..f1ddfc50bcc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -54,6 +54,8 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+extern const struct dma_buf_ops amdgpu_dmabuf_ops;
+
/*
* GEM objects.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 1a656b8657f7..97a60da62004 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -25,6 +25,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -249,7 +250,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
ring->adev = NULL;
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
+ ring->doorbell_index = adev->doorbell_index.kiq;
r = amdgpu_gfx_kiq_acquire(adev, ring);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index b61b5c11aead..f790e15bcd08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -29,6 +29,7 @@
*/
#include "clearstate_defs.h"
#include "amdgpu_ring.h"
+#include "amdgpu_rlc.h"
/* GFX current status */
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
@@ -37,59 +38,6 @@
#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
-
-struct amdgpu_rlc_funcs {
- void (*enter_safe_mode)(struct amdgpu_device *adev);
- void (*exit_safe_mode)(struct amdgpu_device *adev);
-};
-
-struct amdgpu_rlc {
- /* for power gating */
- struct amdgpu_bo *save_restore_obj;
- uint64_t save_restore_gpu_addr;
- volatile uint32_t *sr_ptr;
- const u32 *reg_list;
- u32 reg_list_size;
- /* for clear state */
- struct amdgpu_bo *clear_state_obj;
- uint64_t clear_state_gpu_addr;
- volatile uint32_t *cs_ptr;
- const struct cs_section_def *cs_data;
- u32 clear_state_size;
- /* for cp tables */
- struct amdgpu_bo *cp_table_obj;
- uint64_t cp_table_gpu_addr;
- volatile uint32_t *cp_table_ptr;
- u32 cp_table_size;
-
- /* safe mode for updating CG/PG state */
- bool in_safe_mode;
- const struct amdgpu_rlc_funcs *funcs;
-
- /* for firmware data */
- u32 save_and_restore_offset;
- u32 clear_state_descriptor_offset;
- u32 avail_scratch_ram_locations;
- u32 reg_restore_list_size;
- u32 reg_list_format_start;
- u32 reg_list_format_separate_start;
- u32 starting_offsets_start;
- u32 reg_list_format_size_bytes;
- u32 reg_list_size_bytes;
- u32 reg_list_format_direct_reg_list_length;
- u32 save_restore_list_cntl_size_bytes;
- u32 save_restore_list_gpm_size_bytes;
- u32 save_restore_list_srm_size_bytes;
-
- u32 *register_list_format;
- u32 *register_restore;
- u8 *save_restore_list_cntl;
- u8 *save_restore_list_gpm;
- u8 *save_restore_list_srm;
-
- bool is_rlc_v2_1;
-};
-
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
struct amdgpu_mec {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 6fa7ef446e46..81e6070d255b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -64,7 +64,7 @@ struct amdgpu_vmhub {
struct amdgpu_gmc_funcs {
/* flush the vm tlb via mmio */
void (*flush_gpu_tlb)(struct amdgpu_device *adev,
- uint32_t vmid);
+ uint32_t vmid, uint32_t flush_type);
/* flush the vm tlb via ring */
uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
@@ -89,7 +89,7 @@ struct amdgpu_gmc_funcs {
struct amdgpu_xgmi {
/* from psp */
- u64 device_id;
+ u64 node_id;
u64 hive_id;
/* fixed per family */
u64 node_segment_size;
@@ -99,6 +99,7 @@ struct amdgpu_xgmi {
unsigned num_physical_nodes;
/* gpu list in the same hive */
struct list_head head;
+ bool supported;
};
struct amdgpu_gmc {
@@ -151,7 +152,7 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
};
-#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
+#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index b8963b725dfa..c48207b377bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -146,7 +146,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
fence_ctx = 0;
}
- if (!ring->ready) {
+ if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
return -EINVAL;
}
@@ -221,8 +221,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
- amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
- need_ctx_switch);
+ amdgpu_ring_emit_ib(ring, job, ib, need_ctx_switch);
need_ctx_switch = false;
}
@@ -347,19 +346,14 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
}
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ for (i = 0; i < adev->num_rings; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
long tmo;
- if (!ring || !ring->ready)
- continue;
-
- /* skip IB tests for KIQ in general for the below reasons:
- * 1. We never submit IBs to the KIQ
- * 2. KIQ doesn't use the EOP interrupts,
- * we use some other CP interrupt.
+ /* KIQ rings don't have an IB test because we never submit IBs
+ * to them and they have no interrupt support.
*/
- if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ if (!ring->sched.ready || !ring->funcs->test_ib)
continue;
/* MM engine need more time */
@@ -374,20 +368,23 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
tmo = tmo_gfx;
r = amdgpu_ring_test_ib(ring, tmo);
- if (r) {
- ring->ready = false;
-
- if (ring == &adev->gfx.gfx_ring[0]) {
- /* oh, oh, that's really bad */
- DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
- adev->accel_working = false;
- return r;
-
- } else {
- /* still not good, but we can live with it */
- DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
- ret = r;
- }
+ if (!r) {
+ DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
+ ring->name);
+ continue;
+ }
+
+ ring->sched.ready = false;
+ DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
+ ring->name, r);
+
+ if (ring == &adev->gfx.gfx_ring[0]) {
+ /* oh, oh, that's really bad */
+ adev->accel_working = false;
+ return r;
+
+ } else {
+ ret = r;
}
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 9ce8c93ec19b..f877bb78d10a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -51,14 +51,12 @@ struct amdgpu_ih_ring {
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
u32 (*get_wptr)(struct amdgpu_device *adev);
- bool (*prescreen_iv)(struct amdgpu_device *adev);
void (*decode_iv)(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
void (*set_rptr)(struct amdgpu_device *adev);
};
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
-#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 52c17f6219a7..b7968f426862 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -94,23 +94,6 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
}
/**
- * amdgpu_irq_reset_work_func - execute GPU reset
- *
- * @work: work struct pointer
- *
- * Execute scheduled GPU reset (Cayman+).
- * This function is called when the IRQ handler thinks we need a GPU reset.
- */
-static void amdgpu_irq_reset_work_func(struct work_struct *work)
-{
- struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
- reset_work);
-
- if (!amdgpu_sriov_vf(adev) && amdgpu_device_should_recover_gpu(adev))
- amdgpu_device_gpu_recover(adev, NULL);
-}
-
-/**
* amdgpu_irq_disable_all - disable *all* interrupts
*
* @adev: amdgpu device pointer
@@ -162,13 +145,6 @@ static void amdgpu_irq_callback(struct amdgpu_device *adev,
u32 ring_index = ih->rptr >> 2;
struct amdgpu_iv_entry entry;
- /* Prescreening of high-frequency interrupts */
- if (!amdgpu_ih_prescreen_iv(adev))
- return;
-
- /* Before dispatching irq to IP blocks, send it to amdkfd */
- amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]);
-
entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
amdgpu_ih_decode_iv(adev, &entry);
@@ -262,15 +238,12 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
amdgpu_hotplug_work_func);
}
- INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
-
adev->irq.installed = true;
r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
if (r) {
adev->irq.installed = false;
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
- cancel_work_sync(&adev->reset_work);
return r;
}
adev->ddev->max_vblank_count = 0x00ffffff;
@@ -299,7 +272,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
pci_disable_msi(adev->pdev);
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
- cancel_work_sync(&adev->reset_work);
}
for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
@@ -392,39 +364,38 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
unsigned client_id = entry->client_id;
unsigned src_id = entry->src_id;
struct amdgpu_irq_src *src;
+ bool handled = false;
int r;
trace_amdgpu_iv(entry);
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
- return;
- }
- if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
+ } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
- return;
- }
- if (adev->irq.virq[src_id]) {
+ } else if (adev->irq.virq[src_id]) {
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
- } else {
- if (!adev->irq.client[client_id].sources) {
- DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
- client_id, src_id);
- return;
- }
- src = adev->irq.client[client_id].sources[src_id];
- if (!src) {
- DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
- return;
- }
+ } else if (!adev->irq.client[client_id].sources) {
+ DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
+ } else if ((src = adev->irq.client[client_id].sources[src_id])) {
r = src->funcs->process(adev, src, entry);
- if (r)
+ if (r < 0)
DRM_ERROR("error processing interrupt (%d)\n", r);
+ else if (r)
+ handled = true;
+
+ } else {
+ DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
}
+
+ /* Send it to amdkfd as well if it isn't already handled */
+ if (!handled)
+ amdgpu_amdkfd_interrupt(adev, entry->iv_entry);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 755f733bf0d9..e0af44fd6a0c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -112,6 +112,8 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
+ drm_sched_job_cleanup(s_job);
+
amdgpu_ring_priority_put(ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 57cfe78a262b..e1b46a6703de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -33,6 +33,8 @@
#define to_amdgpu_job(sched_job) \
container_of((sched_job), struct amdgpu_job, base)
+#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
+
struct amdgpu_fence;
struct amdgpu_job {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 81732a84c2ab..bc62bf41b7e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -336,7 +336,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- if (adev->gfx.gfx_ring[i].ready)
+ if (adev->gfx.gfx_ring[i].sched.ready)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
@@ -344,7 +344,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- if (adev->gfx.compute_ring[i].ready)
+ if (adev->gfx.compute_ring[i].sched.ready)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
@@ -352,7 +352,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
- if (adev->sdma.instance[i].ring.ready)
+ if (adev->sdma.instance[i].ring.sched.ready)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -363,7 +363,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->uvd.harvest_config & (1 << i))
continue;
- if (adev->uvd.inst[i].ring.ready)
+ if (adev->uvd.inst[i].ring.sched.ready)
++num_rings;
}
ib_start_alignment = 64;
@@ -372,7 +372,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
- if (adev->vce.ring[i].ready)
+ if (adev->vce.ring[i].sched.ready)
++num_rings;
ib_start_alignment = 4;
ib_size_alignment = 1;
@@ -384,7 +384,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
- if (adev->uvd.inst[i].ring_enc[j].ready)
+ if (adev->uvd.inst[i].ring_enc[j].sched.ready)
++num_rings;
}
ib_start_alignment = 64;
@@ -392,7 +392,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
break;
case AMDGPU_HW_IP_VCN_DEC:
type = AMD_IP_BLOCK_TYPE_VCN;
- if (adev->vcn.ring_dec.ready)
+ if (adev->vcn.ring_dec.sched.ready)
++num_rings;
ib_start_alignment = 16;
ib_size_alignment = 16;
@@ -400,14 +400,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCN_ENC:
type = AMD_IP_BLOCK_TYPE_VCN;
for (i = 0; i < adev->vcn.num_enc_rings; i++)
- if (adev->vcn.ring_enc[i].ready)
+ if (adev->vcn.ring_enc[i].sched.ready)
++num_rings;
ib_start_alignment = 64;
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_VCN_JPEG:
type = AMD_IP_BLOCK_TYPE_VCN;
- if (adev->vcn.ring_jpeg.ready)
+ if (adev->vcn.ring_jpeg.sched.ready)
++num_rings;
ib_start_alignment = 16;
ib_size_alignment = 16;
@@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (!info->return_size || !info->return_pointer)
return -EINVAL;
- /* Ensure IB tests are run on ring */
- flush_delayed_work(&adev->late_init_work);
-
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
ui32 = adev->accel_working;
@@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
struct amdgpu_fpriv *fpriv;
int r, pasid;
+ /* Ensure IB tests are run on ring */
+ flush_delayed_work(&adev->late_init_work);
+
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
@@ -978,7 +978,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
}
if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
+ uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
+
+ r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
+ &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
if (r)
goto error_vm;
}
@@ -1048,8 +1051,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
pasid = fpriv->vm.pasid;
pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
- amdgpu_vm_fini(adev, &fpriv->vm);
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+ amdgpu_vm_fini(adev, &fpriv->vm);
if (pasid)
amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index b9e9e8b02fb7..aadd0fa42e43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -38,7 +38,6 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
-#include <drm/drm_fb_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/hrtimer.h>
@@ -57,7 +56,6 @@ struct amdgpu_hpd;
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
-#define to_amdgpu_plane(x) container_of(x, struct amdgpu_plane, base)
#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base);
@@ -295,13 +293,6 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router);
- /* it is used to enter or exit into free sync mode */
- int (*notify_freesync)(struct drm_device *dev, void *data,
- struct drm_file *filp);
- /* it is used to allow enablement of freesync mode */
- int (*set_freesync_property)(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val);
};
@@ -325,7 +316,7 @@ struct amdgpu_mode_info {
struct card_info *atom_card_info;
bool mode_config_initialized;
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
- struct amdgpu_plane *planes[AMDGPU_MAX_PLANES];
+ struct drm_plane *planes[AMDGPU_MAX_PLANES];
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
@@ -339,6 +330,10 @@ struct amdgpu_mode_info {
struct drm_property *audio_property;
/* FMT dithering */
struct drm_property *dither_property;
+ /* maximum number of bits per channel for monitor color */
+ struct drm_property *max_bpc_property;
+ /* Adaptive Backlight Modulation (power feature) */
+ struct drm_property *abm_level_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
@@ -434,11 +429,6 @@ struct amdgpu_crtc {
struct drm_pending_vblank_event *event;
};
-struct amdgpu_plane {
- struct drm_plane base;
- enum drm_plane_type plane_type;
-};
-
struct amdgpu_encoder_atom_dig {
bool linkb;
/* atom dig */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 904014dc5915..fd271f9746a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -81,7 +81,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_subtract_pin_size(bo);
if (bo->kfd_bo)
- amdgpu_amdkfd_unreserve_system_memory_limit(bo);
+ amdgpu_amdkfd_unreserve_memory_limit(bo);
amdgpu_bo_kunmap(bo);
@@ -608,53 +608,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
}
/**
- * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
- * @adev: amdgpu device object
- * @ring: amdgpu_ring for the engine handling the buffer operations
- * @bo: &amdgpu_bo buffer to be backed up
- * @resv: reservation object with embedded fence
- * @fence: dma_fence associated with the operation
- * @direct: whether to submit the job directly
- *
- * Copies an &amdgpu_bo buffer object to its shadow object.
- * Not used for now.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence,
- bool direct)
-
-{
- struct amdgpu_bo *shadow = bo->shadow;
- uint64_t bo_addr, shadow_addr;
- int r;
-
- if (!shadow)
- return -EINVAL;
-
- bo_addr = amdgpu_bo_gpu_offset(bo);
- shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
-
- r = reservation_object_reserve_shared(bo->tbo.resv);
- if (r)
- goto err;
-
- r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
- amdgpu_bo_size(bo), resv, fence,
- direct, false);
- if (!r)
- amdgpu_bo_fence(bo, *fence, true);
-
-err:
- return r;
-}
-
-/**
* amdgpu_bo_validate - validate an &amdgpu_bo buffer object
* @bo: pointer to the buffer object
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 7d3312d0da11..9291c2f837e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -267,11 +267,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence, bool direct);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 59cc678de8c1..1f61ed95727c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -33,6 +33,8 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
+#include "hwmgr.h"
+#define WIDTH_4K 3840
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
@@ -1642,6 +1644,19 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
+ /* Skip fan attributes on APU */
+ if ((adev->flags & AMD_IS_APU) &&
+ (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
+ return 0;
+
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
@@ -1956,6 +1971,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
mutex_unlock(&adev->pm.mutex);
}
+ /* enable/disable Low Memory PState for UVD (4k videos) */
+ if (adev->asic_type == CHIP_STONEY &&
+ adev->uvd.decode_image_width >= WIDTH_4K) {
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+ if (hwmgr && hwmgr->hwmgr_func &&
+ hwmgr->hwmgr_func->update_nbdpm_pstate)
+ hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
+ !enable,
+ true);
+ }
}
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
@@ -2129,7 +2155,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
+ if (ring && ring->sched.ready)
amdgpu_fence_wait_empty(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index e45e929aaab5..71913a18d142 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -39,8 +39,6 @@
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
-static const struct dma_buf_ops amdgpu_dmabuf_ops;
-
/**
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
* implementation
@@ -332,15 +330,13 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}
-static const struct dma_buf_ops amdgpu_dmabuf_ops = {
+const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_gem_map_attach,
.detach = amdgpu_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_gem_begin_cpu_access,
- .map = drm_gem_dmabuf_kmap,
- .unmap = drm_gem_dmabuf_kunmap,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 25d2f3e757f1..6759d898b3ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -90,6 +90,8 @@ static int psp_sw_fini(void *handle)
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
return 0;
}
@@ -118,21 +120,25 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
static int
psp_cmd_submit_buf(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
- struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr,
- int index)
+ struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
{
int ret;
+ int index;
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
+ index = atomic_inc_return(&psp->fence_value);
ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr,
fence_mc_addr, index);
+ if (ret) {
+ atomic_dec(&psp->fence_value);
+ return ret;
+ }
- while (*((unsigned int *)psp->fence_buf) != index) {
+ while (*((unsigned int *)psp->fence_buf) != index)
msleep(1);
- }
/* the status field must be 0 after FW is loaded */
if (ucode && psp->cmd_buf_mem->resp.status) {
@@ -149,10 +155,22 @@ psp_cmd_submit_buf(struct psp_context *psp,
return ret;
}
-static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+bool psp_support_vmr_ring(struct psp_context *psp)
+{
+ if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
+ return true;
+ else
+ return false;
+}
+
+static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
+ struct psp_gfx_cmd_resp *cmd,
uint64_t tmr_mc, uint32_t size)
{
- cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
+ if (psp_support_vmr_ring(psp))
+ cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
+ else
+ cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_size = size;
@@ -186,12 +204,12 @@ static int psp_tmr_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
+ psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
DRM_INFO("reserve 0x%x from 0x%llx for PSP TMR SIZE\n",
PSP_TMR_SIZE, psp->tmr_mc_addr);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr, 1);
+ psp->fence_buf_mc_addr);
if (ret)
goto failed;
@@ -258,13 +276,194 @@ static int psp_asd_load(struct psp_context *psp)
psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr, 2);
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
+ uint32_t xgmi_ta_size, uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_xgmi_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for xgmi ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->xgmi_context.xgmi_shared_bo,
+ &psp->xgmi_context.xgmi_shared_mc_addr,
+ &psp->xgmi_context.xgmi_shared_buf);
+
+ return ret;
+}
+
+static int psp_xgmi_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
+
+ psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->xgmi_context.xgmi_shared_mc_addr,
+ psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->xgmi_context.initialized = 1;
+ psp->xgmi_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t xgmi_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+ cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id;
+}
+
+static int psp_xgmi_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
kfree(cmd);
return ret;
}
+static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t xgmi_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->xgmi_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_xgmi_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->xgmi_context.initialized)
+ return 0;
+
+ ret = psp_xgmi_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->xgmi_context.initialized = 0;
+
+ /* free xgmi shared memory */
+ amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
+ &psp->xgmi_context.xgmi_shared_mc_addr,
+ &psp->xgmi_context.xgmi_shared_buf);
+
+ return 0;
+}
+
+static int psp_xgmi_initialize(struct psp_context *psp)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ if (!psp->xgmi_context.initialized) {
+ ret = psp_xgmi_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ /* Load XGMI TA */
+ ret = psp_xgmi_load(psp);
+ if (ret)
+ return ret;
+
+ /* Initialize XGMI session */
+ xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
+
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+
+ return ret;
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -292,6 +491,15 @@ static int psp_hw_start(struct psp_context *psp)
if (ret)
return ret;
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ ret = psp_xgmi_initialize(psp);
+ /* Warning the XGMI seesion initialize failure
+ * Instead of stop driver initialization
+ */
+ if (ret)
+ dev_err(psp->adev->dev,
+ "XGMI: Failed to initialize XGMI session\n");
+ }
return 0;
}
@@ -321,7 +529,7 @@ static int psp_np_fw_load(struct psp_context *psp)
return ret;
ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
- psp->fence_buf_mc_addr, i + 3);
+ psp->fence_buf_mc_addr);
if (ret)
return ret;
@@ -340,8 +548,10 @@ static int psp_load_fw(struct amdgpu_device *adev)
int ret;
struct psp_context *psp = &adev->psp;
- if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0)
+ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
+ psp_ring_destroy(psp, PSP_RING_TYPE__KM);
goto skip_memalloc;
+ }
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd)
@@ -452,6 +662,10 @@ static int psp_hw_fini(void *handle)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
+ if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+ psp->xgmi_context.initialized == 1)
+ psp_xgmi_terminate(psp);
+
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
@@ -479,6 +693,15 @@ static int psp_suspend(void *handle)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
+ if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+ psp->xgmi_context.initialized == 1) {
+ ret = psp_xgmi_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate xgmi ta\n");
+ return ret;
+ }
+ }
+
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP ring stop failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 8b8720e9c3f0..10decf70c9aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -27,14 +27,17 @@
#include "amdgpu.h"
#include "psp_gfx_if.h"
+#include "ta_xgmi_if.h"
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
-#define PSP_ASD_SHARED_MEM_SIZE 0x4000
+#define PSP_ASD_SHARED_MEM_SIZE 0x4000
+#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE 0x400000
struct psp_context;
+struct psp_xgmi_node_info;
struct psp_xgmi_topology_info;
enum psp_ring_type
@@ -80,12 +83,20 @@ struct psp_funcs
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
- uint64_t (*xgmi_get_device_id)(struct psp_context *psp);
+ uint64_t (*xgmi_get_node_id)(struct psp_context *psp);
uint64_t (*xgmi_get_hive_id)(struct psp_context *psp);
int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
- struct psp_xgmi_topology_info *topology);
+ struct psp_xgmi_topology_info *topology);
int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
- struct psp_xgmi_topology_info *topology);
+ struct psp_xgmi_topology_info *topology);
+};
+
+struct psp_xgmi_context {
+ uint8_t initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *xgmi_shared_bo;
+ uint64_t xgmi_shared_mc_addr;
+ void *xgmi_shared_buf;
};
struct psp_context
@@ -96,7 +107,7 @@ struct psp_context
const struct psp_funcs *funcs;
- /* fence buffer */
+ /* firmware buffer */
struct amdgpu_bo *fw_pri_bo;
uint64_t fw_pri_mc_addr;
void *fw_pri_buf;
@@ -134,6 +145,16 @@ struct psp_context
struct amdgpu_bo *cmd_buf_bo;
uint64_t cmd_buf_mc_addr;
struct psp_gfx_cmd_resp *cmd_buf_mem;
+
+ /* fence value associated with cmd buffer */
+ atomic_t fence_value;
+
+ /* xgmi ta firmware and buffer */
+ const struct firmware *ta_fw;
+ uint32_t ta_xgmi_ucode_version;
+ uint32_t ta_xgmi_ucode_size;
+ uint8_t *ta_xgmi_start_addr;
+ struct psp_xgmi_context xgmi_context;
};
struct amdgpu_psp_funcs {
@@ -141,21 +162,17 @@ struct amdgpu_psp_funcs {
enum AMDGPU_UCODE_ID);
};
+#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
+struct psp_xgmi_node_info {
+ uint64_t node_id;
+ uint8_t num_hops;
+ uint8_t is_sharing_enabled;
+ enum ta_xgmi_assigned_sdma_engine sdma_engine;
+};
+
struct psp_xgmi_topology_info {
- /* Generated by PSP to identify the GPU instance within xgmi connection */
- uint64_t device_id;
- /*
- * If all bits set to 0 , driver indicates it wants to retrieve the xgmi
- * connection vector topology, but not access enable the connections
- * if some or all bits are set to 1, driver indicates it want to retrieve the
- * current xgmi topology and access enable the link to GPU[i] associated
- * with the bit position in the vector.
- * On return,: bits indicated which xgmi links are present/active depending
- * on the value passed in. The relative bit offset for the relative GPU index
- * within the hive is always marked active.
- */
- uint32_t connection_mask;
- uint32_t reserved; /* must be 0 */
+ uint32_t num_nodes;
+ struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
};
#define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type))
@@ -177,8 +194,8 @@ struct psp_xgmi_topology_info {
((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_device_id(psp) \
- ((psp)->funcs->xgmi_get_device_id ? (psp)->funcs->xgmi_get_device_id((psp)) : 0)
+#define psp_xgmi_get_node_id(psp) \
+ ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0)
#define psp_xgmi_get_hive_id(psp) \
((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0)
#define psp_xgmi_get_topology_info(psp, num_device, topology) \
@@ -199,6 +216,9 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
int psp_gpu_reset(struct amdgpu_device *adev);
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+bool psp_support_vmr_ring(struct psp_context *psp);
+
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index b70e85ec147d..335a0edf114b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -338,7 +338,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
- ring->ready = false;
+ ring->sched.ready = false;
/* Not to finish a ring which is not initialized */
if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
@@ -397,7 +397,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
- if (!ring->funcs->soft_recovery)
+ if (!ring->funcs->soft_recovery || !fence)
return false;
atomic_inc(&ring->adev->gpu_reset_counter);
@@ -500,3 +500,29 @@ static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
debugfs_remove(ring->ent);
#endif
}
+
+/**
+ * amdgpu_ring_test_helper - tests ring and set sched readiness status
+ *
+ * @ring: ring to try the recovery on
+ *
+ * Tests ring and set sched readiness status
+ *
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
+ ring->name, r);
+ else
+ DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
+ ring->name);
+
+ ring->sched.ready = !r;
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 4caa301ce454..0beb01fef83f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -129,8 +129,9 @@ struct amdgpu_ring_funcs {
unsigned emit_ib_size;
/* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch);
+ bool ctx_switch);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@@ -189,7 +190,6 @@ struct amdgpu_ring {
uint64_t gpu_addr;
uint64_t ptr_mask;
uint32_t buf_mask;
- bool ready;
u32 idx;
u32 me;
u32 pipe;
@@ -229,7 +229,7 @@ struct amdgpu_ring {
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
+#define amdgpu_ring_emit_ib(r, job, ib, c) ((r)->funcs->emit_ib((r), (job), (ib), (c)))
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
@@ -313,4 +313,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
ring->count_dw -= count_dw;
}
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
new file mode 100644
index 000000000000..c8793e6cc3c5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
+
+/**
+ * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
+ */
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
+{
+ if (adev->gfx.rlc.in_safe_mode)
+ return;
+
+ /* if RLC is not enabled, do nothing */
+ if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+ return;
+
+ if (adev->cg_flags &
+ (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ adev->gfx.rlc.funcs->set_safe_mode(adev);
+ adev->gfx.rlc.in_safe_mode = true;
+ }
+}
+
+/**
+ * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
+ */
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
+{
+ if (!(adev->gfx.rlc.in_safe_mode))
+ return;
+
+ /* if RLC is not enabled, do nothing */
+ if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+ return;
+
+ if (adev->cg_flags &
+ (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ adev->gfx.rlc.funcs->unset_safe_mode(adev);
+ adev->gfx.rlc.in_safe_mode = false;
+ }
+}
+
+/**
+ * amdgpu_gfx_rlc_init_sr - Init save restore block
+ *
+ * @adev: amdgpu_device pointer
+ * @dws: the size of save restore block
+ *
+ * Allocate and setup value to save restore block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
+{
+ const u32 *src_ptr;
+ volatile u32 *dst_ptr;
+ u32 i;
+ int r;
+
+ /* allocate save restore block */
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+ /* write the sr buffer */
+ src_ptr = adev->gfx.rlc.reg_list;
+ dst_ptr = adev->gfx.rlc.sr_ptr;
+ for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+ dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+ amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+ return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_csb - Init clear state block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to clear state block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
+{
+ volatile u32 *dst_ptr;
+ u32 dws;
+ int r;
+
+ /* allocate clear state block */
+ adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
+ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+ /* set up the cs buffer */
+ dst_ptr = adev->gfx.rlc.cs_ptr;
+ adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
+ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+ return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_cpt - Init cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to cp table of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
+ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+ /* set up the cp table */
+ amdgpu_gfx_rlc_setup_cp_table(adev);
+ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Write cp firmware data into cp table.
+ */
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
+{
+ const __le32 *fw_data;
+ volatile u32 *dst_ptr;
+ int me, i, max_me;
+ u32 bo_offset = 0;
+ u32 table_offset, table_size;
+
+ max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
+
+ /* write the cp table buffer */
+ dst_ptr = adev->gfx.rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.me_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 4) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec2_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+
+ bo_offset += table_size;
+ }
+}
+
+/**
+ * amdgpu_gfx_rlc_fini - Free BO which used for RLC
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
+ * and rlc_jump_table_block.
+ */
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
+{
+ /* save restore block */
+ if (adev->gfx.rlc.save_restore_obj) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
+ }
+
+ /* clear state block */
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+
+ /* jump table block */
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
new file mode 100644
index 000000000000..49a8ab52113b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RLC_H__
+#define __AMDGPU_RLC_H__
+
+#include "clearstate_defs.h"
+
+struct amdgpu_rlc_funcs {
+ bool (*is_rlc_enabled)(struct amdgpu_device *adev);
+ void (*set_safe_mode)(struct amdgpu_device *adev);
+ void (*unset_safe_mode)(struct amdgpu_device *adev);
+ int (*init)(struct amdgpu_device *adev);
+ u32 (*get_csb_size)(struct amdgpu_device *adev);
+ void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
+ int (*get_cp_table_num)(struct amdgpu_device *adev);
+ int (*resume)(struct amdgpu_device *adev);
+ void (*stop)(struct amdgpu_device *adev);
+ void (*reset)(struct amdgpu_device *adev);
+ void (*start)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_rlc {
+ /* for power gating */
+ struct amdgpu_bo *save_restore_obj;
+ uint64_t save_restore_gpu_addr;
+ volatile uint32_t *sr_ptr;
+ const u32 *reg_list;
+ u32 reg_list_size;
+ /* for clear state */
+ struct amdgpu_bo *clear_state_obj;
+ uint64_t clear_state_gpu_addr;
+ volatile uint32_t *cs_ptr;
+ const struct cs_section_def *cs_data;
+ u32 clear_state_size;
+ /* for cp tables */
+ struct amdgpu_bo *cp_table_obj;
+ uint64_t cp_table_gpu_addr;
+ volatile uint32_t *cp_table_ptr;
+ u32 cp_table_size;
+
+ /* safe mode for updating CG/PG state */
+ bool in_safe_mode;
+ const struct amdgpu_rlc_funcs *funcs;
+
+ /* for firmware data */
+ u32 save_and_restore_offset;
+ u32 clear_state_descriptor_offset;
+ u32 avail_scratch_ram_locations;
+ u32 reg_restore_list_size;
+ u32 reg_list_format_start;
+ u32 reg_list_format_separate_start;
+ u32 starting_offsets_start;
+ u32 reg_list_format_size_bytes;
+ u32 reg_list_size_bytes;
+ u32 reg_list_format_direct_reg_list_length;
+ u32 save_restore_list_cntl_size_bytes;
+ u32 save_restore_list_gpm_size_bytes;
+ u32 save_restore_list_srm_size_bytes;
+
+ u32 *register_list_format;
+ u32 *register_restore;
+ u8 *save_restore_list_cntl;
+ u8 *save_restore_list_gpm;
+ u8 *save_restore_list_srm;
+
+ bool is_rlc_v2_1;
+};
+
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index bc9244b429ef..115bb0c99b0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -28,17 +28,31 @@
* GPU SDMA IP block helpers function.
*/
-struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
- if (&adev->sdma.instance[i].ring == ring)
- break;
+ if (ring == &adev->sdma.instance[i].ring ||
+ ring == &adev->sdma.instance[i].page)
+ return &adev->sdma.instance[i];
- if (i < AMDGPU_MAX_SDMA_INSTANCES)
- return &adev->sdma.instance[i];
- else
- return NULL;
+ return NULL;
+}
+
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (ring == &adev->sdma.instance[i].ring ||
+ ring == &adev->sdma.instance[i].page) {
+ *index = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 500113ec65ca..16b1a6ae5ba6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -41,6 +41,7 @@ struct amdgpu_sdma_instance {
uint32_t feature_version;
struct amdgpu_ring ring;
+ struct amdgpu_ring page;
bool burst_nop;
};
@@ -50,6 +51,7 @@ struct amdgpu_sdma {
struct amdgpu_irq_src illegal_inst_irq;
int num_instances;
uint32_t srbm_soft_reset;
+ bool has_page_queue;
};
/*
@@ -92,6 +94,7 @@ struct amdgpu_buffer_funcs {
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
struct amdgpu_sdma_instance *
-amdgpu_get_sdma_instance(struct amdgpu_ring *ring);
+amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index e9bf70e2ac51..626abca770a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -218,6 +218,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_ARGS(vm, ring, job),
TP_STRUCT__entry(
__field(u32, pasid)
+ __string(ring, ring->name)
__field(u32, ring)
__field(u32, vmid)
__field(u32, vm_hub)
@@ -227,14 +228,14 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_fast_assign(
__entry->pasid = vm->pasid;
- __entry->ring = ring->idx;
+ __assign_str(ring, ring->name)
__entry->vmid = job->vmid;
__entry->vm_hub = ring->funcs->vmhub,
__entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush;
),
- TP_printk("pasid=%d, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
- __entry->pasid, __entry->ring, __entry->vmid,
+ TP_printk("pasid=%d, ring=%s, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
+ __entry->pasid, __get_str(ring), __entry->vmid,
__entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
);
@@ -366,20 +367,20 @@ TRACE_EVENT(amdgpu_vm_flush,
uint64_t pd_addr),
TP_ARGS(ring, vmid, pd_addr),
TP_STRUCT__entry(
- __field(u32, ring)
+ __string(ring, ring->name)
__field(u32, vmid)
__field(u32, vm_hub)
__field(u64, pd_addr)
),
TP_fast_assign(
- __entry->ring = ring->idx;
+ __assign_str(ring, ring->name)
__entry->vmid = vmid;
__entry->vm_hub = ring->funcs->vmhub;
__entry->pd_addr = pd_addr;
),
- TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
- __entry->ring, __entry->vmid,
+ TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx",
+ __get_str(ring), __entry->vmid,
__entry->vm_hub,__entry->pd_addr)
);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index a44fc12ae1f9..c91ec3101d00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -61,100 +61,6 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-/*
- * Global memory.
- */
-
-/**
- * amdgpu_ttm_mem_global_init - Initialize and acquire reference to
- * memory object
- *
- * @ref: Object for initialization.
- *
- * This is called by drm_global_item_ref() when an object is being
- * initialized.
- */
-static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-/**
- * amdgpu_ttm_mem_global_release - Drop reference to a memory object
- *
- * @ref: Object being removed
- *
- * This is called by drm_global_item_unref() when an object is being
- * released.
- */
-static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-/**
- * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
- *
- * @adev: AMDGPU device for which the global structures need to be registered.
- *
- * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
- * during bring up.
- */
-static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- /* ensure reference is false in case init fails */
- adev->mman.mem_global_referenced = false;
-
- global_ref = &adev->mman.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &amdgpu_ttm_mem_global_init;
- global_ref->release = &amdgpu_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- goto error_mem;
- }
-
- adev->mman.bo_global_ref.mem_glob =
- adev->mman.mem_global_ref.object;
- global_ref = &adev->mman.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- goto error_bo;
- }
-
- mutex_init(&adev->mman.gtt_window_lock);
-
- adev->mman.mem_global_referenced = true;
-
- return 0;
-
-error_bo:
- drm_global_item_unref(&adev->mman.mem_global_ref);
-error_mem:
- return r;
-}
-
-static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
-{
- if (adev->mman.mem_global_referenced) {
- mutex_destroy(&adev->mman.gtt_window_lock);
- drm_global_item_unref(&adev->mman.bo_global_ref.ref);
- drm_global_item_unref(&adev->mman.mem_global_ref);
- adev->mman.mem_global_referenced = false;
- }
-}
-
static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -1758,14 +1664,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
int r;
u64 vis_vram_limit;
- /* initialize global references for vram/gtt */
- r = amdgpu_ttm_global_init(adev);
- if (r) {
- return r;
- }
+ mutex_init(&adev->mman.gtt_window_lock);
+
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
- adev->mman.bo_global_ref.ref.object,
&amdgpu_bo_driver,
adev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -1922,7 +1824,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_bo_device_release(&adev->mman.bdev);
- amdgpu_ttm_global_fini(adev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
}
@@ -2069,7 +1970,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
unsigned i;
int r;
- if (direct_submit && !ring->ready) {
+ if (direct_submit && !ring->sched.ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index fe8f276e9811..b5b2d101f7db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -39,8 +39,6 @@
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
struct amdgpu_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index aa6641b944a0..7ac25a1c7853 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -58,6 +58,17 @@ struct psp_firmware_header_v1_0 {
};
/* version_major=1, version_minor=0 */
+struct ta_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ta_xgmi_ucode_version;
+ uint32_t ta_xgmi_offset_bytes;
+ uint32_t ta_xgmi_size_bytes;
+ uint32_t ta_ras_ucode_version;
+ uint32_t ta_ras_offset_bytes;
+ uint32_t ta_ras_size_bytes;
+};
+
+/* version_major=1, version_minor=0 */
struct gfx_firmware_header_v1_0 {
struct common_firmware_header header;
uint32_t ucode_feature_version;
@@ -170,6 +181,7 @@ union amdgpu_firmware_header {
struct mc_firmware_header_v1_0 mc;
struct smc_firmware_header_v1_0 smc;
struct psp_firmware_header_v1_0 psp;
+ struct ta_firmware_header_v1_0 ta;
struct gfx_firmware_header_v1_0 gfx;
struct rlc_firmware_header_v1_0 rlc;
struct rlc_firmware_header_v2_0 rlc_v2_0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e5a6db6beab7..4e5d13e41f6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -692,6 +692,8 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
buf_sizes[0x1] = dpb_size;
buf_sizes[0x2] = image_size;
buf_sizes[0x4] = min_ctx_size;
+ /* store image width to adjust nb memory pstate */
+ adev->uvd.decode_image_width = width;
return 0;
}
@@ -1243,30 +1245,20 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence;
long r;
- uint32_t ip_instance = ring->me;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
+ if (r)
goto error;
- }
r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
- } else {
- DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
+ else if (r > 0)
r = 0;
- }
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index a3ab1a41060f..5eb63288d157 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -65,6 +65,8 @@ struct amdgpu_uvd {
struct drm_sched_entity entity;
struct delayed_work idle_work;
unsigned harvest_config;
+ /* store image width to adjust nb memory state */
+ unsigned decode_image_width;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 5f3f54073818..98a1b2ce2b9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1032,8 +1032,10 @@ out:
* @ib: the IB to execute
*
*/
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
amdgpu_ring_write(ring, VCE_CMD_IB);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1079,11 +1081,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
return 0;
r = amdgpu_ring_alloc(ring, 16);
- if (r) {
- DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring, VCE_CMD_END);
amdgpu_ring_commit(ring);
@@ -1093,14 +1093,8 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed\n",
- ring->idx);
+ if (i >= timeout)
r = -ETIMEDOUT;
- }
return r;
}
@@ -1121,27 +1115,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
return 0;
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ if (r)
goto error;
- }
r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
+
error:
dma_fence_put(fence);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index a1f209eed4c4..50293652af14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -65,8 +65,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch);
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ struct amdgpu_ib *ib, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 27da13df2f11..e2e42e3fbcf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -425,11 +425,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -441,14 +439,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -570,30 +563,20 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ if (r)
goto error;
- }
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
dma_fence_put(fence);
-
error:
return r;
}
@@ -606,11 +589,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_ring_alloc(ring, 16);
- if (r) {
- DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring, VCN_ENC_CMD_END);
amdgpu_ring_commit(ring);
@@ -620,14 +601,8 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed\n",
- ring->idx);
+ if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- }
return r;
}
@@ -742,27 +717,19 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ if (r)
goto error;
- }
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
+
error:
dma_fence_put(fence);
return r;
@@ -778,11 +745,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
@@ -796,14 +760,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
return r;
}
@@ -856,21 +814,18 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r = 0;
r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
goto error;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto error;
- } else
+ } else {
r = 0;
+ }
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
@@ -879,15 +834,10 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout)
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
- else {
- DRM_ERROR("ib test failed (0x%08X)\n", tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
dma_fence_put(fence);
-
error:
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index f2f358aa0597..462a04e0f5e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -23,16 +23,6 @@
#include "amdgpu.h"
-uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
-{
- uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
-
- addr -= AMDGPU_VA_RESERVED_SIZE;
- addr = amdgpu_gmc_sign_extend(addr);
-
- return addr;
-}
-
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{
/* By now all MMIO pages except mailbox are blocked */
@@ -41,88 +31,6 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
return RREG32_NO_KIQ(0xc040) == 0xffffffff;
}
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
-{
- int r;
- void *ptr;
-
- r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
- &adev->virt.csa_vmid0_addr, &ptr);
- if (r)
- return r;
-
- memset(ptr, 0, AMDGPU_CSA_SIZE);
- return 0;
-}
-
-void amdgpu_free_static_csa(struct amdgpu_device *adev) {
- amdgpu_bo_free_kernel(&adev->virt.csa_obj,
- &adev->virt.csa_vmid0_addr,
- NULL);
-}
-
-/*
- * amdgpu_map_static_csa should be called during amdgpu_vm_init
- * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
- * submission of GFX should use this virtual address within META_DATA init
- * package to support SRIOV gfx preemption.
- */
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo_va **bo_va)
-{
- uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
- struct ww_acquire_ctx ticket;
- struct list_head list;
- struct amdgpu_bo_list_entry pd;
- struct ttm_validate_buffer csa_tv;
- int r;
-
- INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&csa_tv.head);
- csa_tv.bo = &adev->virt.csa_obj->tbo;
- csa_tv.shared = true;
-
- list_add(&csa_tv.head, &list);
- amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
- if (r) {
- DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
- return r;
- }
-
- *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
- if (!*bo_va) {
- ttm_eu_backoff_reservation(&ticket, &list);
- DRM_ERROR("failed to create bo_va for static CSA\n");
- return -ENOMEM;
- }
-
- r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
- AMDGPU_CSA_SIZE);
- if (r) {
- DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, *bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
- return r;
- }
-
- r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
-
- if (r) {
- DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, *bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
- return r;
- }
-
- ttm_eu_backoff_reservation(&ticket, &list);
- return 0;
-}
-
void amdgpu_virt_init_setting(struct amdgpu_device *adev)
{
/* enable virtual display */
@@ -162,9 +70,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
goto failed_kiq_read;
- if (in_interrupt())
- might_sleep();
-
+ might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
@@ -210,9 +116,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
goto failed_kiq_write;
- if (in_interrupt())
- might_sleep();
-
+ might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
@@ -228,6 +132,46 @@ failed_kiq_write:
pr_err("failed to write reg:%x\n", reg);
}
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
+ ref, mask);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for IRQ context */
+ if (r < 1 && in_interrupt())
+ goto failed_kiq;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq;
+
+ return;
+
+failed_kiq:
+ pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
+}
+
/**
* amdgpu_virt_request_full_gpu() - request full gpu access
* @amdgpu: amdgpu device.
@@ -390,7 +334,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
if (adev->fw_vram_usage.va != NULL) {
adev->virt.fw_reserve.p_pf2vf =
- (struct amdgim_pf2vf_info_header *)(
+ (struct amd_sriov_msg_pf2vf_info_header *)(
adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 880ac113a3a9..722deefc0a7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -63,8 +63,8 @@ struct amdgpu_virt_ops {
* Firmware Reserve Frame buffer
*/
struct amdgpu_virt_fw_reserve {
- struct amdgim_pf2vf_info_header *p_pf2vf;
- struct amdgim_vf2pf_info_header *p_vf2pf;
+ struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
+ struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
unsigned int checksum_key;
};
/*
@@ -85,15 +85,17 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
};
-struct amdgim_pf2vf_info_header {
+struct amd_sriov_msg_pf2vf_info_header {
/* the total structure size in byte. */
uint32_t size;
/* version of this structure, written by the GIM */
uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
} __aligned(4);
struct amdgim_pf2vf_info_v1 {
/* header contains size and version */
- struct amdgim_pf2vf_info_header header;
+ struct amd_sriov_msg_pf2vf_info_header header;
/* max_width * max_height */
unsigned int uvd_enc_max_pixels_count;
/* 16x16 pixels/sec, codec independent */
@@ -112,7 +114,7 @@ struct amdgim_pf2vf_info_v1 {
struct amdgim_pf2vf_info_v2 {
/* header contains size and version */
- struct amdgim_pf2vf_info_header header;
+ struct amd_sriov_msg_pf2vf_info_header header;
/* use private key from mailbox 2 to create chueksum */
uint32_t checksum;
/* The features flags of the GIM driver supports. */
@@ -137,20 +139,22 @@ struct amdgim_pf2vf_info_v2 {
uint64_t vcefw_kboffset;
/* VCE FW size in KB */
uint32_t vcefw_ksize;
- uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)];
} __aligned(4);
-struct amdgim_vf2pf_info_header {
+struct amd_sriov_msg_vf2pf_info_header {
/* the total structure size in byte. */
uint32_t size;
/*version of this structure, written by the guest */
uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
} __aligned(4);
struct amdgim_vf2pf_info_v1 {
/* header contains size and version */
- struct amdgim_vf2pf_info_header header;
+ struct amd_sriov_msg_vf2pf_info_header header;
/* driver version */
char driver_version[64];
/* driver certification, 1=WHQL, 0=None */
@@ -180,7 +184,7 @@ struct amdgim_vf2pf_info_v1 {
struct amdgim_vf2pf_info_v2 {
/* header contains size and version */
- struct amdgim_vf2pf_info_header header;
+ struct amd_sriov_msg_vf2pf_info_header header;
uint32_t checksum;
/* driver version */
uint8_t driver_version[64];
@@ -206,7 +210,7 @@ struct amdgim_vf2pf_info_v2 {
uint32_t uvd_enc_usage;
/* guest uvd engine usage percentage. 0xffff means N/A. */
uint32_t uvd_enc_health;
- uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
} __aligned(4);
#define AMDGPU_FW_VRAM_VF2PF_VER 2
@@ -238,7 +242,6 @@ typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
struct amdgpu_virt {
uint32_t caps;
struct amdgpu_bo *csa_obj;
- uint64_t csa_vmid0_addr;
bool chained_ib_support;
uint32_t reg_val_offs;
struct amdgpu_irq_src ack_irq;
@@ -251,8 +254,6 @@ struct amdgpu_virt {
uint32_t gim_feature;
};
-#define AMDGPU_CSA_SIZE (8 * 1024)
-
#define amdgpu_sriov_enabled(adev) \
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
@@ -277,17 +278,13 @@ static inline bool is_virtual_machine(void)
#endif
}
-struct amdgpu_vm;
-
-uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo_va **bo_va);
-void amdgpu_free_static_csa(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t rreg1,
+ uint32_t ref, uint32_t mask);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 352b30409060..fc91f3e54a87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -617,7 +617,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{
entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo;
- entry->tv.shared = true;
+ /* One for the VM updates, one for TTM and one for the CS job */
+ entry->tv.num_shared = 3;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
@@ -773,10 +774,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
- r = reservation_object_reserve_shared(bo->tbo.resv);
- if (r)
- return r;
-
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto error;
@@ -1632,13 +1629,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
continue;
}
- /* First check if the entry is already handled */
- if (cursor.pfn < frag_start) {
- cursor.entry->huge = true;
- amdgpu_vm_pt_next(adev, &cursor);
- continue;
- }
-
/* If it isn't already handled it can't be a huge page */
if (cursor.entry->huge) {
/* Add the entry to the relocated list to update it. */
@@ -1701,8 +1691,17 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
}
} while (frag_start < entry_end);
- if (frag >= shift)
+ if (amdgpu_vm_pt_descendant(adev, &cursor)) {
+ /* Mark all child entries as huge */
+ while (cursor.pfn < frag_start) {
+ cursor.entry->huge = true;
+ amdgpu_vm_pt_next(adev, &cursor);
+ }
+
+ } else if (frag >= shift) {
+ /* or just move on to the next on the same level. */
amdgpu_vm_pt_next(adev, &cursor);
+ }
}
return 0;
@@ -1840,10 +1839,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
- if (r)
- goto error_free;
-
r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
if (r)
goto error_free;
@@ -3024,6 +3019,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
goto error_free_root;
+ r = reservation_object_reserve_shared(root->tbo.resv, 1);
+ if (r)
+ goto error_unreserve;
+
r = amdgpu_vm_clear_bo(adev, vm, root,
adev->vm_manager.root_level,
vm->pte_support_ats);
@@ -3053,7 +3052,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
INIT_KFIFO(vm->faults);
- vm->fault_credit = 16;
return 0;
@@ -3266,42 +3264,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
/**
- * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
- *
- * @adev: amdgpu_device pointer
- * @pasid: PASID do identify the VM
- *
- * This function is expected to be called in interrupt context.
- *
- * Returns:
- * True if there was fault credit, false otherwise
- */
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
- unsigned int pasid)
-{
- struct amdgpu_vm *vm;
-
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- if (!vm) {
- /* VM not found, can't track fault credit */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- }
-
- /* No lock needed. only accessed by IRQ handler */
- if (!vm->fault_credit) {
- /* Too many faults in this VM */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return false;
- }
-
- vm->fault_credit--;
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
-}
-
-/**
* amdgpu_vm_manager_init - init the VM manager
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 2a8898d19c8b..e8dcfd59fc93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -229,9 +229,6 @@ struct amdgpu_vm {
/* Up to 128 pending retry page faults */
DECLARE_KFIFO(faults, u64, 128);
- /* Limit non-retry fault storms */
- unsigned int fault_credit;
-
/* Points to the KFD process VM info */
struct amdkfd_process_info *process_info;
@@ -299,8 +296,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
- unsigned int pasid);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 897afbb348c1..0b263a9857c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -23,7 +23,7 @@
*/
#include <linux/list.h>
#include "amdgpu.h"
-#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -31,15 +31,16 @@ static DEFINE_MUTEX(xgmi_mutex);
#define AMDGPU_MAX_XGMI_HIVE 8
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
-struct amdgpu_hive_info {
- uint64_t hive_id;
- struct list_head device_list;
-};
-
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;
-static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
+
+void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
+{
+ return &hive->device_list;
+}
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
{
int i;
struct amdgpu_hive_info *tmp;
@@ -58,62 +59,99 @@ static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
tmp = &xgmi_hives[hive_count++];
tmp->hive_id = adev->gmc.xgmi.hive_id;
INIT_LIST_HEAD(&tmp->device_list);
+ mutex_init(&tmp->hive_lock);
+
return tmp;
}
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
+{
+ int ret = -EINVAL;
+
+ /* Each psp need to set the latest topology */
+ ret = psp_xgmi_set_topology_info(&adev->psp,
+ hive->number_devices,
+ &hive->topology_info);
+ if (ret)
+ dev_err(adev->dev,
+ "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
+ adev->gmc.xgmi.node_id,
+ adev->gmc.xgmi.hive_id, ret);
+ else
+ dev_info(adev->dev, "XGMI: Set topology for node %d, hive 0x%llx.\n",
+ adev->gmc.xgmi.physical_node_id,
+ adev->gmc.xgmi.hive_id);
+
+ return ret;
+}
+
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
- struct psp_xgmi_topology_info tmp_topology[AMDGPU_MAX_XGMI_DEVICE_PER_HIVE];
+ struct psp_xgmi_topology_info *hive_topology;
struct amdgpu_hive_info *hive;
struct amdgpu_xgmi *entry;
- struct amdgpu_device *tmp_adev;
+ struct amdgpu_device *tmp_adev = NULL;
int count = 0, ret = -EINVAL;
- if ((adev->asic_type < CHIP_VEGA20) ||
- (adev->flags & AMD_IS_APU) )
+ if (!adev->gmc.xgmi.supported)
return 0;
- adev->gmc.xgmi.device_id = psp_xgmi_get_device_id(&adev->psp);
+
+ adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp);
adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
- memset(&tmp_topology[0], 0, sizeof(tmp_topology));
mutex_lock(&xgmi_mutex);
hive = amdgpu_get_xgmi_hive(adev);
if (!hive)
goto exit;
+ hive_topology = &hive->topology_info;
+
list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
list_for_each_entry(entry, &hive->device_list, head)
- tmp_topology[count++].device_id = entry->device_id;
+ hive_topology->nodes[count++].node_id = entry->node_id;
+ hive->number_devices = count;
- ret = psp_xgmi_get_topology_info(&adev->psp, count, tmp_topology);
- if (ret) {
- dev_err(adev->dev,
- "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
- adev->gmc.xgmi.device_id,
- adev->gmc.xgmi.hive_id, ret);
- goto exit;
- }
- /* Each psp need to set the latest topology */
+ /* Each psp need to get the latest topology */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology);
+ ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology);
if (ret) {
dev_err(tmp_adev->dev,
- "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
- tmp_adev->gmc.xgmi.device_id,
+ "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
+ tmp_adev->gmc.xgmi.node_id,
tmp_adev->gmc.xgmi.hive_id, ret);
- /* To do : continue with some node failed or disable the whole hive */
+ /* To do : continue with some node failed or disable the whole hive */
break;
}
}
- if (!ret)
- dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n",
- adev->gmc.xgmi.physical_node_id,
- adev->gmc.xgmi.hive_id);
+
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ if (ret)
+ break;
+ }
exit:
mutex_unlock(&xgmi_mutex);
return ret;
}
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_hive_info *hive;
+ if (!adev->gmc.xgmi.supported)
+ return;
+
+ mutex_lock(&xgmi_mutex);
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (!hive)
+ goto exit;
+
+ if (!(hive->number_devices--))
+ mutex_destroy(&hive->hive_lock);
+
+exit:
+ mutex_unlock(&xgmi_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
new file mode 100644
index 000000000000..6151eb9c8ad3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_XGMI_H__
+#define __AMDGPU_XGMI_H__
+
+#include "amdgpu_psp.h"
+
+struct amdgpu_hive_info {
+ uint64_t hive_id;
+ struct list_head device_list;
+ struct psp_xgmi_topology_info topology_info;
+ int number_devices;
+ struct mutex hive_lock;
+};
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
+int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 79220a91abe3..86e14c754dd4 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
if (pi->caps_sq_ramping || pi->caps_db_ramping ||
pi->caps_td_ramping || pi->caps_tcp_ramping) {
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable) {
ret = ci_program_pt_config_registers(adev, didt_config_ci);
if (ret) {
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return ret;
}
}
ci_do_enable_didt(adev, enable);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index f41f5f57e9f3..71c50d8900e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1755,6 +1755,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.flush_hdp = &cik_flush_hdp,
.invalidate_hdp = &cik_invalidate_hdp,
.need_full_reset = &cik_need_full_reset,
+ .init_doorbell_index = &legacy_doorbell_index_init,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index e49c6f15a0a0..54c625a2e570 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -30,4 +30,5 @@ void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev);
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index b5775c6a857b..8a8b4967a101 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -228,34 +228,6 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* [127:96] - reserved
*/
-/**
- * cik_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
/**
* cik_ih_decode_iv - decode an interrupt vector
*
@@ -461,7 +433,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cik_ih_funcs = {
.get_wptr = cik_ih_get_wptr,
- .prescreen_iv = cik_ih_prescreen_iv,
.decode_iv = cik_ih_decode_iv,
.set_rptr = cik_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index b918c8886b75..45795191de1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -198,7 +198,7 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -218,9 +218,11 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (CIK).
*/
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 extra_bits = vmid & 0xf;
/* IB packet must end on a 8 DW boundary */
@@ -316,8 +318,8 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
}
- sdma0->ready = false;
- sdma1->ready = false;
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
}
/**
@@ -494,18 +496,16 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- ring->ready = true;
+ ring->sched.ready = true;
}
cik_sdma_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -618,21 +618,17 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
+
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
@@ -647,15 +643,11 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -678,20 +670,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
@@ -706,21 +694,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
@@ -822,7 +805,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
*/
static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
@@ -1214,8 +1197,11 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ u8 instance_id;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+ instance_id = (entry->ring_id & 0x3) >> 0;
+ drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index df5ac4d85a00..9d3ea298e116 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -208,34 +208,6 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * cz_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* cz_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -442,7 +414,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cz_ih_funcs = {
.get_wptr = cz_ih_get_wptr,
- .prescreen_iv = cz_ih_prescreen_iv,
.decode_iv = cz_ih_decode_iv,
.set_rptr = cz_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index d76eb27945dc..1dc3013ea1d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1775,18 +1775,15 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
+ if (r)
+ goto error_free_scratch;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -1798,13 +1795,11 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
break;
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_scratch:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -1845,9 +1840,11 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
}
static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
@@ -1892,17 +1889,15 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err1;
- }
+
ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START));
ib.ptr[2] = 0xDEADBEEF;
@@ -1914,22 +1909,16 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ else
r = -EINVAL;
- }
err2:
amdgpu_ib_free(adev, &ib, NULL);
@@ -1950,9 +1939,9 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
CP_ME_CNTL__CE_HALT_MASK));
WREG32(mmSCRATCH_UMSK, 0);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].ready = false;
+ adev->gfx.gfx_ring[i].sched.ready = false;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].ready = false;
+ adev->gfx.compute_ring[i].sched.ready = false;
}
udelay(50);
}
@@ -2124,12 +2113,9 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the rings */
gfx_v6_0_cp_gfx_start(adev);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
return 0;
}
@@ -2227,14 +2213,11 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
WREG32(mmCP_RB2_CNTL, tmp);
WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
- adev->gfx.compute_ring[0].ready = false;
- adev->gfx.compute_ring[1].ready = false;
for (i = 0; i < 2; i++) {
- r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]);
+ r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]);
if (r)
return r;
- adev->gfx.compute_ring[i].ready = true;
}
return 0;
@@ -2368,18 +2351,11 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
{
const u32 *src_ptr;
volatile u32 *dst_ptr;
- u32 dws, i;
+ u32 dws;
u64 reg_list_mc_addr;
const struct cs_section_def *cs_data;
int r;
@@ -2394,26 +2370,10 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data;
if (src_ptr) {
- /* save restore block */
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.save_restore_obj,
- &adev->gfx.rlc.save_restore_gpu_addr,
- (void **)&adev->gfx.rlc.sr_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
- r);
- gfx_v6_0_rlc_fini(adev);
+ /* init save restore block */
+ r = amdgpu_gfx_rlc_init_sr(adev, dws);
+ if (r)
return r;
- }
-
- /* write the sr buffer */
- dst_ptr = adev->gfx.rlc.sr_ptr;
- for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
- dst_ptr[i] = cpu_to_le32(src_ptr[i]);
-
- amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
}
if (cs_data) {
@@ -2428,7 +2388,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
(void **)&adev->gfx.rlc.cs_ptr);
if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v6_0_rlc_fini(adev);
+ amdgpu_gfx_rlc_fini(adev);
return r;
}
@@ -2549,8 +2509,8 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
if (!adev->gfx.rlc_fw)
return -EINVAL;
- gfx_v6_0_rlc_stop(adev);
- gfx_v6_0_rlc_reset(adev);
+ adev->gfx.rlc.funcs->stop(adev);
+ adev->gfx.rlc.funcs->reset(adev);
gfx_v6_0_init_pg(adev);
gfx_v6_0_init_cg(adev);
@@ -2578,7 +2538,7 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
WREG32(mmRLC_UCODE_ADDR, 0);
gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
- gfx_v6_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -3075,6 +3035,14 @@ static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
.select_me_pipe_q = &gfx_v6_0_select_me_pipe_q
};
+static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = {
+ .init = gfx_v6_0_rlc_init,
+ .resume = gfx_v6_0_rlc_resume,
+ .stop = gfx_v6_0_rlc_stop,
+ .reset = gfx_v6_0_rlc_reset,
+ .start = gfx_v6_0_rlc_start
+};
+
static int gfx_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3082,6 +3050,7 @@ static int gfx_v6_0_early_init(void *handle)
adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS;
adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
+ adev->gfx.rlc.funcs = &gfx_v6_0_rlc_funcs;
gfx_v6_0_set_ring_funcs(adev);
gfx_v6_0_set_irq_funcs(adev);
@@ -3114,7 +3083,7 @@ static int gfx_v6_0_sw_init(void *handle)
return r;
}
- r = gfx_v6_0_rlc_init(adev);
+ r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -3165,7 +3134,7 @@ static int gfx_v6_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- gfx_v6_0_rlc_fini(adev);
+ amdgpu_gfx_rlc_fini(adev);
return 0;
}
@@ -3177,7 +3146,7 @@ static int gfx_v6_0_hw_init(void *handle)
gfx_v6_0_constants_init(adev);
- r = gfx_v6_0_rlc_resume(adev);
+ r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -3195,7 +3164,7 @@ static int gfx_v6_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfx_v6_0_cp_enable(adev, false);
- gfx_v6_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
gfx_v6_0_fini_pg(adev);
return 0;
@@ -3393,12 +3362,31 @@ static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v6_0_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ struct amdgpu_ring *ring;
+
+ switch (entry->ring_id) {
+ case 0:
+ ring = &adev->gfx.gfx_ring[0];
+ break;
+ case 1:
+ case 2:
+ ring = &adev->gfx.compute_ring[entry->ring_id - 1];
+ break;
+ default:
+ return;
+ }
+ drm_sched_fault(&ring->sched);
+}
+
static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v6_0_fault(adev, entry);
return 0;
}
@@ -3407,7 +3395,7 @@ static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal instruction in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v6_0_fault(adev, entry);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0e72bc09939a..3a9fb6018c16 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] =
static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
@@ -2064,17 +2063,14 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
+ if (r)
+ goto error_free_scratch;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -2086,13 +2082,10 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
break;
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_scratch:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -2233,9 +2226,11 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
* on the gfx ring for execution by the GPU.
*/
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
@@ -2262,9 +2257,11 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -2316,17 +2313,15 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err1;
- }
+
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
ib.ptr[2] = 0xDEADBEEF;
@@ -2338,22 +2333,16 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ else
r = -EINVAL;
- }
err2:
amdgpu_ib_free(adev, &ib, NULL);
@@ -2403,7 +2392,7 @@ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
} else {
WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].ready = false;
+ adev->gfx.gfx_ring[i].sched.ready = false;
}
udelay(50);
}
@@ -2613,12 +2602,9 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
gfx_v7_0_cp_gfx_start(adev);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
return 0;
}
@@ -2675,7 +2661,7 @@ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
} else {
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].ready = false;
+ adev->gfx.compute_ring[i].sched.ready = false;
}
udelay(50);
}
@@ -2781,7 +2767,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
* GFX7_MEC_HPD_SIZE * 2;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
@@ -3106,10 +3092,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r)
- ring->ready = false;
+ amdgpu_ring_test_helper(ring);
}
return 0;
@@ -3268,18 +3251,10 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
* The RLC is a multi-purpose microengine that handles a
* variety of functions.
*/
-static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
- u32 dws, i;
+ u32 dws;
const struct cs_section_def *cs_data;
int r;
@@ -3306,66 +3281,23 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data;
if (src_ptr) {
- /* save restore block */
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.save_restore_obj,
- &adev->gfx.rlc.save_restore_gpu_addr,
- (void **)&adev->gfx.rlc.sr_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
+ /* init save restore block */
+ r = amdgpu_gfx_rlc_init_sr(adev, dws);
+ if (r)
return r;
- }
-
- /* write the sr buffer */
- dst_ptr = adev->gfx.rlc.sr_ptr;
- for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
- dst_ptr[i] = cpu_to_le32(src_ptr[i]);
- amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
}
if (cs_data) {
- /* clear state block */
- adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
-
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
+ /* init clear state block */
+ r = amdgpu_gfx_rlc_init_csb(adev);
+ if (r)
return r;
- }
-
- /* set up the cs buffer */
- dst_ptr = adev->gfx.rlc.cs_ptr;
- gfx_v7_0_get_csb_buffer(adev, dst_ptr);
- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
if (adev->gfx.rlc.cp_table_size) {
-
- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
+ r = amdgpu_gfx_rlc_init_cpt(adev);
+ if (r)
return r;
- }
-
- gfx_v7_0_init_cp_pg_table(adev);
-
- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
}
return 0;
@@ -3446,7 +3378,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
return orig;
}
-static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
+{
+ return true;
+}
+
+static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
{
u32 tmp, i, mask;
@@ -3468,7 +3405,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
{
u32 tmp;
@@ -3545,13 +3482,13 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
adev->gfx.rlc_feature_version = le32_to_cpu(
hdr->ucode_feature_version);
- gfx_v7_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
/* disable CG */
tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
- gfx_v7_0_rlc_reset(adev);
+ adev->gfx.rlc.funcs->reset(adev);
gfx_v7_0_init_pg(adev);
@@ -3582,7 +3519,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_BONAIRE)
WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
- gfx_v7_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -3784,72 +3721,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
WREG32(mmRLC_PG_CNTL, data);
}
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
+static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
{
- const __le32 *fw_data;
- volatile u32 *dst_ptr;
- int me, i, max_me = 4;
- u32 bo_offset = 0;
- u32 table_offset, table_size;
-
if (adev->asic_type == CHIP_KAVERI)
- max_me = 5;
-
- if (adev->gfx.rlc.cp_table_ptr == NULL)
- return;
-
- /* write the cp table buffer */
- dst_ptr = adev->gfx.rlc.cp_table_ptr;
- for (me = 0; me < max_me; me++) {
- if (me == 0) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.ce_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 1) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.pfp_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 2) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.me_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 3) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec2_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- }
-
- for (i = 0; i < table_size; i ++) {
- dst_ptr[bo_offset + i] =
- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
- }
-
- bo_offset += table_size;
- }
+ return 5;
+ else
+ return 4;
}
static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
@@ -4288,8 +4165,17 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
};
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
- .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
- .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
+ .is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
+ .set_safe_mode = gfx_v7_0_set_safe_mode,
+ .unset_safe_mode = gfx_v7_0_unset_safe_mode,
+ .init = gfx_v7_0_rlc_init,
+ .get_csb_size = gfx_v7_0_get_csb_size,
+ .get_csb_buffer = gfx_v7_0_get_csb_buffer,
+ .get_cp_table_num = gfx_v7_0_cp_pg_table_num,
+ .resume = gfx_v7_0_rlc_resume,
+ .stop = gfx_v7_0_rlc_stop,
+ .reset = gfx_v7_0_rlc_reset,
+ .start = gfx_v7_0_rlc_start
};
static int gfx_v7_0_early_init(void *handle)
@@ -4477,7 +4363,7 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+ ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
@@ -4540,7 +4426,7 @@ static int gfx_v7_0_sw_init(void *handle)
return r;
}
- r = gfx_v7_0_rlc_init(adev);
+ r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -4604,7 +4490,7 @@ static int gfx_v7_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
gfx_v7_0_cp_compute_fini(adev);
- gfx_v7_0_rlc_fini(adev);
+ amdgpu_gfx_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
@@ -4627,7 +4513,7 @@ static int gfx_v7_0_hw_init(void *handle)
gfx_v7_0_constants_init(adev);
/* init rlc */
- r = gfx_v7_0_rlc_resume(adev);
+ r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -4645,7 +4531,7 @@ static int gfx_v7_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
gfx_v7_0_cp_enable(adev, false);
- gfx_v7_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
gfx_v7_0_fini_pg(adev);
return 0;
@@ -4730,7 +4616,7 @@ static int gfx_v7_0_soft_reset(void *handle)
gfx_v7_0_update_cg(adev, false);
/* stop the rlc */
- gfx_v7_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
/* Disable GFX parsing/prefetching */
WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
@@ -4959,12 +4845,36 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v7_0_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ struct amdgpu_ring *ring;
+ u8 me_id, pipe_id;
+ int i;
+
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ switch (me_id) {
+ case 0:
+ drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if ((ring->me == me_id) && (ring->pipe == pipe_id))
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ }
+}
+
static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v7_0_fault(adev, entry);
return 0;
}
@@ -4974,7 +4884,7 @@ static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
{
DRM_ERROR("Illegal instruction in command stream\n");
// XXX soft reset the gfx block only
- schedule_work(&adev->reset_work);
+ gfx_v7_0_fault(adev, entry);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 617b0c8908a3..381f593b0cda 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -44,7 +44,6 @@
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
-#include "gca/gfx_8_0_enum.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
@@ -54,7 +53,7 @@
#include "ivsrcid/ivsrcid_vislands30.h"
#define GFX8_NUM_GFX_RINGS 1
-#define GFX8_MEC_HPD_SIZE 2048
+#define GFX8_MEC_HPD_SIZE 4096
#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
@@ -839,18 +838,14 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
+ if (r)
+ goto error_free_scratch;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -862,14 +857,11 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
break;
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_scratch:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -886,19 +878,16 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err1;
- }
+
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
ib.ptr[2] = lower_32_bits(gpu_addr);
@@ -912,22 +901,17 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
tmp = adev->wb.wb[index];
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("ib test on ring %d failed\n", ring->idx);
+ else
r = -EINVAL;
- }
err2:
amdgpu_ib_free(adev, &ib, NULL);
@@ -1298,81 +1282,16 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(0);
}
-static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
{
- const __le32 *fw_data;
- volatile u32 *dst_ptr;
- int me, i, max_me = 4;
- u32 bo_offset = 0;
- u32 table_offset, table_size;
-
if (adev->asic_type == CHIP_CARRIZO)
- max_me = 5;
-
- /* write the cp table buffer */
- dst_ptr = adev->gfx.rlc.cp_table_ptr;
- for (me = 0; me < max_me; me++) {
- if (me == 0) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.ce_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 1) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.pfp_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 2) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.me_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 3) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 4) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec2_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- }
-
- for (i = 0; i < table_size; i ++) {
- dst_ptr[bo_offset + i] =
- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
- }
-
- bo_offset += table_size;
- }
-}
-
-static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
+ return 5;
+ else
+ return 4;
}
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
{
- volatile u32 *dst_ptr;
- u32 dws;
const struct cs_section_def *cs_data;
int r;
@@ -1381,44 +1300,18 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data;
if (cs_data) {
- /* clear state block */
- adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
-
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v8_0_rlc_fini(adev);
+ /* init clear state block */
+ r = amdgpu_gfx_rlc_init_csb(adev);
+ if (r)
return r;
- }
-
- /* set up the cs buffer */
- dst_ptr = adev->gfx.rlc.cs_ptr;
- gfx_v8_0_get_csb_buffer(adev, dst_ptr);
- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
if ((adev->asic_type == CHIP_CARRIZO) ||
(adev->asic_type == CHIP_STONEY)) {
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+ r = amdgpu_gfx_rlc_init_cpt(adev);
+ if (r)
return r;
- }
-
- cz_init_cp_jump_table(adev);
-
- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
}
return 0;
@@ -1443,7 +1336,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
@@ -1629,7 +1522,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
return 0;
/* bail if the compute ring is not ready */
- if (!ring->ready)
+ if (!ring->sched.ready)
return 0;
tmp = RREG32(mmGB_EDC_MODE);
@@ -1997,7 +1890,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+ ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX8_MEC_HPD_SIZE);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -2088,7 +1981,7 @@ static int gfx_v8_0_sw_init(void *handle)
return r;
}
- r = gfx_v8_0_rlc_init(adev);
+ r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -2108,7 +2001,7 @@ static int gfx_v8_0_sw_init(void *handle)
/* no gfx doorbells on iceland */
if (adev->asic_type != CHIP_TOPAZ) {
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
+ ring->doorbell_index = adev->doorbell_index.gfx_ring0;
}
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
@@ -2181,7 +2074,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_gfx_kiq_fini(adev);
gfx_v8_0_mec_fini(adev);
- gfx_v8_0_rlc_fini(adev);
+ amdgpu_gfx_rlc_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
@@ -4175,10 +4068,15 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
- gfx_v8_0_rlc_stop(adev);
- gfx_v8_0_rlc_reset(adev);
+ if (amdgpu_sriov_vf(adev)) {
+ gfx_v8_0_init_csb(adev);
+ return 0;
+ }
+
+ adev->gfx.rlc.funcs->stop(adev);
+ adev->gfx.rlc.funcs->reset(adev);
gfx_v8_0_init_pg(adev);
- gfx_v8_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -4197,7 +4095,7 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].ready = false;
+ adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32(mmCP_ME_CNTL, tmp);
udelay(50);
@@ -4322,7 +4220,7 @@ static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
DOORBELL_RANGE_LOWER,
- AMDGPU_DOORBELL_GFX_RING0);
+ adev->doorbell_index.gfx_ring0);
WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
@@ -4379,10 +4277,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
amdgpu_ring_clear_ring(ring);
gfx_v8_0_cp_gfx_start(adev);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r)
- ring->ready = false;
+ ring->sched.ready = true;
+ r = amdgpu_ring_test_helper(ring);
return r;
}
@@ -4396,8 +4292,8 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
} else {
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].ready = false;
- adev->gfx.kiq.ring.ready = false;
+ adev->gfx.compute_ring[i].sched.ready = false;
+ adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
}
@@ -4473,11 +4369,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r) {
+ r = amdgpu_ring_test_helper(kiq_ring);
+ if (r)
DRM_ERROR("KCQ enable failed\n");
- kiq_ring->ready = false;
- }
return r;
}
@@ -4755,8 +4649,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
{
if (adev->asic_type > CHIP_TONGA) {
- WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2);
- WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, AMDGPU_DOORBELL_MEC_RING7 << 2);
+ WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
+ WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
}
/* enable doorbells */
WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
@@ -4781,7 +4675,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj);
- ring->ready = true;
+ ring->sched.ready = true;
return 0;
}
@@ -4820,10 +4714,7 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
*/
for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
ring = &adev->gfx.compute_ring[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r)
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
}
done:
@@ -4867,7 +4758,7 @@ static int gfx_v8_0_hw_init(void *handle)
gfx_v8_0_init_golden_registers(adev);
gfx_v8_0_constants_init(adev);
- r = gfx_v8_0_rlc_resume(adev);
+ r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -4899,7 +4790,7 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
}
- r = amdgpu_ring_test_ring(kiq_ring);
+ r = amdgpu_ring_test_helper(kiq_ring);
if (r)
DRM_ERROR("KCQ disable failed\n");
@@ -4973,16 +4864,16 @@ static int gfx_v8_0_hw_fini(void *handle)
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
}
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (!gfx_v8_0_wait_for_idle(adev))
gfx_v8_0_cp_enable(adev, false);
else
pr_err("cp is busy, skip halt cp\n");
if (!gfx_v8_0_wait_for_rlc_idle(adev))
- gfx_v8_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
else
pr_err("rlc is busy, skip halt rlc\n");
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -5061,17 +4952,16 @@ static bool gfx_v8_0_check_soft_reset(void *handle)
static int gfx_v8_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
- srbm_soft_reset = adev->gfx.srbm_soft_reset;
/* stop the rlc */
- gfx_v8_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
@@ -5165,14 +5055,13 @@ static int gfx_v8_0_soft_reset(void *handle)
static int gfx_v8_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
- srbm_soft_reset = adev->gfx.srbm_soft_reset;
if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
@@ -5197,7 +5086,7 @@ static int gfx_v8_0_post_soft_reset(void *handle)
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
gfx_v8_0_cp_gfx_resume(adev);
- gfx_v8_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -5445,7 +5334,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
@@ -5499,7 +5388,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -5593,57 +5482,53 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
-static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
{
- u32 data;
- unsigned i;
+ uint32_t rlc_setting;
- data = RREG32(mmRLC_CNTL);
- if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
- return;
+ rlc_setting = RREG32(mmRLC_CNTL);
+ if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
+ return false;
- if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
- data |= RLC_SAFE_MODE__CMD_MASK;
- data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
- data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
- WREG32(mmRLC_SAFE_MODE, data);
+ return true;
+}
- for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_GPM_STAT) &
- (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
- RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
- (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
- RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
- break;
- udelay(1);
- }
+static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
+{
+ uint32_t data;
+ unsigned i;
+ data = RREG32(mmRLC_CNTL);
+ data |= RLC_SAFE_MODE__CMD_MASK;
+ data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+ data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+ WREG32(mmRLC_SAFE_MODE, data);
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
- break;
- udelay(1);
- }
- adev->gfx.rlc.in_safe_mode = true;
+ /* wait for RLC_SAFE_MODE */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(mmRLC_GPM_STAT) &
+ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+ RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
+ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+ RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
+ break;
+ udelay(1);
+ }
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ break;
+ udelay(1);
}
}
-static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
{
- u32 data = 0;
+ uint32_t data;
unsigned i;
data = RREG32(mmRLC_CNTL);
- if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
- return;
-
- if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
- if (adev->gfx.rlc.in_safe_mode) {
- data |= RLC_SAFE_MODE__CMD_MASK;
- data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
- WREG32(mmRLC_SAFE_MODE, data);
- adev->gfx.rlc.in_safe_mode = false;
- }
- }
+ data |= RLC_SAFE_MODE__CMD_MASK;
+ data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+ WREG32(mmRLC_SAFE_MODE, data);
for (i = 0; i < adev->usec_timeout; i++) {
if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
@@ -5653,8 +5538,17 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
}
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
- .enter_safe_mode = iceland_enter_rlc_safe_mode,
- .exit_safe_mode = iceland_exit_rlc_safe_mode
+ .is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
+ .set_safe_mode = gfx_v8_0_set_safe_mode,
+ .unset_safe_mode = gfx_v8_0_unset_safe_mode,
+ .init = gfx_v8_0_rlc_init,
+ .get_csb_size = gfx_v8_0_get_csb_size,
+ .get_csb_buffer = gfx_v8_0_get_csb_buffer,
+ .get_cp_table_num = gfx_v8_0_cp_jump_table_num,
+ .resume = gfx_v8_0_rlc_resume,
+ .stop = gfx_v8_0_rlc_stop,
+ .reset = gfx_v8_0_rlc_reset,
+ .start = gfx_v8_0_rlc_start
};
static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -5662,7 +5556,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t temp, data;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
@@ -5758,7 +5652,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
gfx_v8_0_wait_for_rlc_serdes(adev);
}
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5768,7 +5662,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
@@ -5851,7 +5745,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
gfx_v8_0_wait_for_rlc_serdes(adev);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
@@ -6131,9 +6025,11 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
}
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -6161,9 +6057,11 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -6738,12 +6636,39 @@ static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v8_0_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ u8 me_id, pipe_id, queue_id;
+ struct amdgpu_ring *ring;
+ int i;
+
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ queue_id = (entry->ring_id & 0x70) >> 4;
+
+ switch (me_id) {
+ case 0:
+ drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ }
+}
+
static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v8_0_fault(adev, entry);
return 0;
}
@@ -6752,7 +6677,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal instruction in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v8_0_fault(adev, entry);
return 0;
}
@@ -6976,10 +6901,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
17 + /* gfx_v8_0_ring_emit_vm_flush */
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
- .emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
.test_ring = gfx_v8_0_ring_test_ring,
- .test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v8_0_ring_emit_rreg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6d7baf59d6e1..f62d570a81a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -41,7 +41,7 @@
#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
#define GFX9_NUM_GFX_RINGS 1
-#define GFX9_MEC_HPD_SIZE 2048
+#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
@@ -86,6 +86,7 @@ MODULE_FIRMWARE("amdgpu/picasso_me.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
+MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
@@ -396,18 +397,14 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
+ if (r)
+ goto error_free_scratch;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -419,14 +416,11 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
break;
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_scratch:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -443,19 +437,16 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err1;
- }
+
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
ib.ptr[2] = lower_32_bits(gpu_addr);
@@ -469,22 +460,17 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
- r = -ETIMEDOUT;
- goto err2;
+ r = -ETIMEDOUT;
+ goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- goto err2;
+ goto err2;
}
tmp = adev->wb.wb[index];
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
- r = 0;
- } else {
- DRM_ERROR("ib test on ring %d failed\n", ring->idx);
- r = -EINVAL;
- }
+ if (tmp == 0xDEADBEEF)
+ r = 0;
+ else
+ r = -EINVAL;
err2:
amdgpu_ib_free(adev, &ib, NULL);
@@ -660,7 +646,20 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+ /*
+ * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
+ * instead of picasso_rlc.bin.
+ * Judgment method:
+ * PCO AM4: revision >= 0xC8 && revision <= 0xCF
+ * or revision >= 0xD8 && revision <= 0xDF
+ * otherwise is PCO FP5
+ */
+ if (!strcmp(chip_name, "picasso") &&
+ (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
+ ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -1065,85 +1064,13 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
}
-static void rv_init_cp_jump_table(struct amdgpu_device *adev)
+static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
{
- const __le32 *fw_data;
- volatile u32 *dst_ptr;
- int me, i, max_me = 5;
- u32 bo_offset = 0;
- u32 table_offset, table_size;
-
- /* write the cp table buffer */
- dst_ptr = adev->gfx.rlc.cp_table_ptr;
- for (me = 0; me < max_me; me++) {
- if (me == 0) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.ce_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 1) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.pfp_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 2) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.me_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 3) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 4) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec2_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- }
-
- for (i = 0; i < table_size; i ++) {
- dst_ptr[bo_offset + i] =
- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
- }
-
- bo_offset += table_size;
- }
-}
-
-static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
-{
- /* clear state block */
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
-
- /* jump table block */
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
+ return 5;
}
static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
{
- volatile u32 *dst_ptr;
- u32 dws;
const struct cs_section_def *cs_data;
int r;
@@ -1152,45 +1079,18 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data;
if (cs_data) {
- /* clear state block */
- adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
- r);
- gfx_v9_0_rlc_fini(adev);
+ /* init clear state block */
+ r = amdgpu_gfx_rlc_init_csb(adev);
+ if (r)
return r;
- }
- /* set up the cs buffer */
- dst_ptr = adev->gfx.rlc.cs_ptr;
- gfx_v9_0_get_csb_buffer(adev, dst_ptr);
- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
if (adev->asic_type == CHIP_RAVEN) {
/* TODO: double check the cp_table_size for RV */
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
- if (r) {
- dev_err(adev->dev,
- "(%d) failed to create cp table bo\n", r);
- gfx_v9_0_rlc_fini(adev);
+ r = amdgpu_gfx_rlc_init_cpt(adev);
+ if (r)
return r;
- }
-
- rv_init_cp_jump_table(adev);
- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
}
switch (adev->asic_type) {
@@ -1264,7 +1164,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
@@ -1635,8 +1535,8 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
/* Clear GDS reserved memory */
r = amdgpu_ring_alloc(ring, 17);
if (r) {
- DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
- ring->idx, r);
+ DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
+ ring->name, r);
return r;
}
@@ -1680,7 +1580,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
+ ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX9_MEC_HPD_SIZE);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -1748,7 +1648,7 @@ static int gfx_v9_0_sw_init(void *handle)
return r;
}
- r = gfx_v9_0_rlc_init(adev);
+ r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -1769,7 +1669,7 @@ static int gfx_v9_0_sw_init(void *handle)
else
sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
+ ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
@@ -2498,12 +2398,12 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
return 0;
}
- gfx_v9_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
/* disable CG */
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
- gfx_v9_0_rlc_reset(adev);
+ adev->gfx.rlc.funcs->reset(adev);
gfx_v9_0_init_pg(adev);
@@ -2514,15 +2414,24 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
- if (adev->asic_type == CHIP_RAVEN ||
- adev->asic_type == CHIP_VEGA20) {
- if (amdgpu_lbpw != 0)
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ if (amdgpu_lbpw == 0)
+ gfx_v9_0_enable_lbpw(adev, false);
+ else
+ gfx_v9_0_enable_lbpw(adev, true);
+ break;
+ case CHIP_VEGA20:
+ if (amdgpu_lbpw > 0)
gfx_v9_0_enable_lbpw(adev, true);
else
gfx_v9_0_enable_lbpw(adev, false);
+ break;
+ default:
+ break;
}
- gfx_v9_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -2537,7 +2446,7 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
if (!enable) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].ready = false;
+ adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
udelay(50);
@@ -2727,7 +2636,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
gfx_v9_0_cp_gfx_start(adev);
- ring->ready = true;
+ ring->sched.ready = true;
return 0;
}
@@ -2742,8 +2651,8 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].ready = false;
- adev->gfx.kiq.ring.ready = false;
+ adev->gfx.compute_ring[i].sched.ready = false;
+ adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
}
@@ -2866,11 +2775,9 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r) {
+ r = amdgpu_ring_test_helper(kiq_ring);
+ if (r)
DRM_ERROR("KCQ enable failed\n");
- kiq_ring->ready = false;
- }
return r;
}
@@ -3088,9 +2995,9 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
/* enable the doorbell if requested */
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
- (AMDGPU_DOORBELL64_KIQ *2) << 2);
+ (adev->doorbell_index.kiq * 2) << 2);
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
- (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
+ (adev->doorbell_index.userqueue_end * 2) << 2);
}
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
@@ -3249,7 +3156,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj);
- ring->ready = true;
+ ring->sched.ready = true;
return 0;
}
@@ -3314,19 +3221,13 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
return r;
ring = &adev->gfx.gfx_ring[0];
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
-
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r)
- ring->ready = false;
+ amdgpu_ring_test_helper(ring);
}
gfx_v9_0_enable_gui_idle_interrupt(adev, true);
@@ -3353,7 +3254,7 @@ static int gfx_v9_0_hw_init(void *handle)
if (r)
return r;
- r = gfx_v9_0_rlc_resume(adev);
+ r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -3391,7 +3292,7 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
}
- r = amdgpu_ring_test_ring(kiq_ring);
+ r = amdgpu_ring_test_helper(kiq_ring);
if (r)
DRM_ERROR("KCQ disable failed\n");
@@ -3433,7 +3334,7 @@ static int gfx_v9_0_hw_fini(void *handle)
}
gfx_v9_0_cp_enable(adev, false);
- gfx_v9_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
gfx_v9_0_csb_vram_unpin(adev);
@@ -3508,7 +3409,7 @@ static int gfx_v9_0_soft_reset(void *handle)
if (grbm_soft_reset) {
/* stop the rlc */
- gfx_v9_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
/* Disable GFX parsing/prefetching */
gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3607,64 +3508,47 @@ static int gfx_v9_0_late_init(void *handle)
return 0;
}
-static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
{
- uint32_t rlc_setting, data;
- unsigned i;
-
- if (adev->gfx.rlc.in_safe_mode)
- return;
+ uint32_t rlc_setting;
/* if RLC is not enabled, do nothing */
rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
- return;
-
- if (adev->cg_flags &
- (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
- AMD_CG_SUPPORT_GFX_3D_CGCG)) {
- data = RLC_SAFE_MODE__CMD_MASK;
- data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+ return false;
- /* wait for RLC_SAFE_MODE */
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
- break;
- udelay(1);
- }
- adev->gfx.rlc.in_safe_mode = true;
- }
+ return true;
}
-static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
{
- uint32_t rlc_setting, data;
+ uint32_t data;
+ unsigned i;
- if (!adev->gfx.rlc.in_safe_mode)
- return;
+ data = RLC_SAFE_MODE__CMD_MASK;
+ data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
- /* if RLC is not enabled, do nothing */
- rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
- if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
- return;
-
- if (adev->cg_flags &
- (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
- /*
- * Try to exit safe mode only if it is already in safe
- * mode.
- */
- data = RLC_SAFE_MODE__CMD_MASK;
- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
- adev->gfx.rlc.in_safe_mode = false;
+ /* wait for RLC_SAFE_MODE */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ break;
+ udelay(1);
}
}
+static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ data = RLC_SAFE_MODE__CMD_MASK;
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+}
+
static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- gfx_v9_0_enter_rlc_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
@@ -3675,7 +3559,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
}
- gfx_v9_0_exit_rlc_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
@@ -3773,7 +3657,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
{
uint32_t data, def;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
/* Enable 3D CGCG/CGLS */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
@@ -3813,7 +3697,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
}
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -3821,7 +3705,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t def, data;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
@@ -3861,7 +3745,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
}
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -3890,8 +3774,17 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
}
static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
- .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
- .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
+ .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
+ .set_safe_mode = gfx_v9_0_set_safe_mode,
+ .unset_safe_mode = gfx_v9_0_unset_safe_mode,
+ .init = gfx_v9_0_rlc_init,
+ .get_csb_size = gfx_v9_0_get_csb_size,
+ .get_csb_buffer = gfx_v9_0_get_csb_buffer,
+ .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
+ .resume = gfx_v9_0_rlc_resume,
+ .stop = gfx_v9_0_rlc_stop,
+ .reset = gfx_v9_0_rlc_reset,
+ .start = gfx_v9_0_rlc_start
};
static int gfx_v9_0_set_powergating_state(void *handle,
@@ -4072,9 +3965,11 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -4103,20 +3998,22 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
- u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
- amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
- amdgpu_ring_write(ring,
+ amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
- (2 << 0) |
+ (2 << 0) |
#endif
- lower_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, control);
+ lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, control);
}
static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
@@ -4695,12 +4592,39 @@ static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v9_0_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ u8 me_id, pipe_id, queue_id;
+ struct amdgpu_ring *ring;
+ int i;
+
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ queue_id = (entry->ring_id & 0x70) >> 4;
+
+ switch (me_id) {
+ case 0:
+ drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ }
+}
+
static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v9_0_fault(adev, entry);
return 0;
}
@@ -4709,7 +4633,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal instruction in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v9_0_fault(adev, entry);
return 0;
}
@@ -4836,10 +4760,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
- .emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
.test_ring = gfx_v9_0_ring_test_ring,
- .test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v9_0_ring_emit_rreg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index ceb7847b504f..f5edddf3b29d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -35,20 +35,25 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev)
return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24;
}
-static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+ int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
+ WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- gfxhub_v1_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
@@ -72,7 +77,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
/*
@@ -82,11 +87,11 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* to get rid of the VM fault and hardware hang.
*/
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max((adev->gmc.vram_end >> 18) + 0x1,
+ max((adev->gmc.fb_end >> 18) + 0x1,
adev->gmc.agp_end >> 18));
else
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
index 206e29cad753..92d3a70cd9b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
@@ -30,5 +30,7 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value);
void gfxhub_v1_0_init(struct amdgpu_device *adev);
u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
+void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index e1c2b4e9c7b2..9fc3296592fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
MODULE_FIRMWARE("amdgpu/verde_mc.bin");
MODULE_FIRMWARE("amdgpu/oland_mc.bin");
+MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
MODULE_FIRMWARE("amdgpu/si58_mc.bin");
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
@@ -358,7 +359,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
return 0;
}
-static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
+static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev,
+ uint32_t vmid, uint32_t flush_type)
{
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
@@ -580,7 +582,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
else
gmc_v6_0_set_fault_enable_default(adev, true);
- gmc_v6_0_flush_gpu_tlb(adev, 0);
+ gmc_v6_0_flush_gpu_tlb(adev, 0, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 910c4ce19cb3..761dcfb2fec0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -430,7 +430,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
*
* Flush the TLB for the requested page table (CIK).
*/
-static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
+static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev,
+ uint32_t vmid, uint32_t flush_type)
{
/* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -698,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmCHUB_CONTROL, tmp);
}
- gmc_v7_0_flush_gpu_tlb(adev, 0);
+ gmc_v7_0_flush_gpu_tlb(adev, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1d3265c97b70..531aaf377592 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -611,7 +611,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
* Flush the TLB for the requested page table (CIK).
*/
static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
- uint32_t vmid)
+ uint32_t vmid, uint32_t flush_type)
{
/* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -920,7 +920,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
else
gmc_v8_0_set_fault_enable_default(adev, true);
- gmc_v8_0_flush_gpu_tlb(adev, 0);
+ gmc_v8_0_flush_gpu_tlb(adev, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index f35d7a554ad5..ce150de723c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -244,6 +244,62 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+/**
+ * vega10_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry,
+ uint64_t addr)
+{
+ struct amdgpu_vm *vm;
+ u64 key;
+ int r;
+
+ /* No PASID, can't identify faulting process */
+ if (!entry->pasid)
+ return true;
+
+ /* Not a retry fault */
+ if (!(entry->src_data[1] & 0x80))
+ return true;
+
+ /* Track retry faults in per-VM fault FIFO. */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
+ if (!vm) {
+ /* VM not found, process it normally */
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return true;
+ }
+
+ key = AMDGPU_VM_FAULT(entry->pasid, addr);
+ r = amdgpu_vm_add_fault(vm->fault_hash, key);
+
+ /* Hash table is full or the fault is already being processed,
+ * ignore further page faults
+ */
+ if (r != 0) {
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return false;
+ }
+ /* No locking required with single writer and single reader */
+ r = kfifo_put(&vm->faults, key);
+ if (!r) {
+ /* FIFO is full. Ignore it until there is space */
+ amdgpu_vm_clear_fault(vm->fault_hash, key);
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return false;
+ }
+
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ /* It's the first fault for this address, process it normally */
+ return true;
+}
+
static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -255,6 +311,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+ if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
+ return 1; /* This also prevents sending it to KFD */
+
if (!amdgpu_sriov_vf(adev)) {
status = RREG32(hub->vm_l2_pro_fault_status);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
@@ -293,14 +352,14 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
}
-static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
+static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
+ uint32_t flush_type)
{
u32 req = 0;
- /* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vmid);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
@@ -312,48 +371,6 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
return req;
}
-static signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
- uint32_t reg0, uint32_t reg1,
- uint32_t ref, uint32_t mask)
-{
- signed long r, cnt = 0;
- unsigned long flags;
- uint32_t seq;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *ring = &kiq->ring;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
- ref, mask);
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
- /* don't wait anymore for IRQ context */
- if (r < 1 && in_interrupt())
- goto failed_kiq;
-
- might_sleep();
-
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
- msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
- }
-
- if (cnt > MAX_KIQ_REG_TRY)
- goto failed_kiq;
-
- return 0;
-
-failed_kiq:
- pr_err("failed to invalidate tlb with kiq\n");
- return r;
-}
-
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -362,64 +379,50 @@ failed_kiq:
*/
/**
- * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
+ * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
*
* @adev: amdgpu_device pointer
* @vmid: vm instance to flush
+ * @flush_type: the flush type
*
- * Flush the TLB for the requested page table.
+ * Flush the TLB for the requested page table using certain type.
*/
static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
- uint32_t vmid)
+ uint32_t vmid, uint32_t flush_type)
{
- /* Use register 17 for GART */
const unsigned eng = 17;
unsigned i, j;
- int r;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &adev->vmhub[i];
- u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
+ u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
- if (adev->gfx.kiq.ring.ready &&
+ /* This is necessary for a HW workaround under SRIOV as well
+ * as GFXOFF under bare metal
+ */
+ if (adev->gfx.kiq.ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
!adev->in_gpu_reset) {
- r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
- hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid);
- if (!r)
- continue;
- }
-
- spin_lock(&adev->gmc.invalidate_lock);
-
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+ uint32_t req = hub->vm_inv_eng0_req + eng;
+ uint32_t ack = hub->vm_inv_eng0_ack + eng;
- /* Busy wait for ACK.*/
- for (j = 0; j < 100; j++) {
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
- tmp &= 1 << vmid;
- if (tmp)
- break;
- cpu_relax();
- }
- if (j < 100) {
- spin_unlock(&adev->gmc.invalidate_lock);
+ amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
+ 1 << vmid);
continue;
}
- /* Wait for ACK with a delay.*/
+ spin_lock(&adev->gmc.invalidate_lock);
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
for (j = 0; j < adev->usec_timeout; j++) {
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
- tmp &= 1 << vmid;
- if (tmp)
+ if (tmp & (1 << vmid))
break;
udelay(1);
}
- if (j < adev->usec_timeout) {
- spin_unlock(&adev->gmc.invalidate_lock);
- continue;
- }
spin_unlock(&adev->gmc.invalidate_lock);
+ if (j < adev->usec_timeout)
+ continue;
+
DRM_ERROR("Timeout waiting for VM flush ACK!\n");
}
}
@@ -429,7 +432,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
- uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
+ uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
@@ -739,9 +742,8 @@ static int gmc_v9_0_late_init(void *handle)
unsigned vmhub = ring->funcs->vmhub;
ring->vm_inv_eng = vm_inv_eng[vmhub]++;
- dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
- ring->idx, ring->name, ring->vm_inv_eng,
- ring->funcs->vmhub);
+ dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
+ ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
}
/* Engine 16 is used for KFD and 17 for GART flushes */
@@ -959,6 +961,9 @@ static int gmc_v9_0_sw_init(void *handle)
/* This interrupt is VMC page fault.*/
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
+ if (r)
+ return r;
+
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
@@ -991,7 +996,7 @@ static int gmc_v9_0_sw_init(void *handle)
}
adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
- if (adev->asic_type == CHIP_VEGA20) {
+ if (adev->gmc.xgmi.supported) {
r = gfxhub_v1_1_get_xgmi_info(adev);
if (r)
return r;
@@ -1122,7 +1127,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_set_fault_enable_default(adev, value);
mmhub_v1_0_set_fault_enable_default(adev, value);
- gmc_v9_0_flush_gpu_tlb(adev, 0);
+ gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index cf0fc61aebe6..a3984d10b604 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -208,34 +208,6 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * iceland_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* iceland_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -440,7 +412,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
.get_wptr = iceland_ih_get_wptr,
- .prescreen_iv = iceland_ih_prescreen_iv,
.decode_iv = iceland_ih_decode_iv,
.set_rptr = iceland_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index d0e478f43443..0c9a2c03504e 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
pi->caps_db_ramping ||
pi->caps_td_ramping ||
pi->caps_tcp_ramping) {
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable) {
ret = kv_program_pt_config_registers(adev, didt_config_kv);
if (ret) {
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return ret;
}
}
kv_do_enable_didt(adev, enable);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index fd23ba1226a5..d0d966d6080a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -52,20 +52,25 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
return base;
}
-static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+ int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- mmhub_v1_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
@@ -90,7 +95,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
/*
@@ -100,11 +105,11 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* to get rid of the VM fault and hardware hang.
*/
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max((adev->gmc.vram_end >> 18) + 0x1,
+ max((adev->gmc.fb_end >> 18) + 0x1,
adev->gmc.agp_end >> 18));
else
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index bef3d0c0c117..0de0fdf98c00 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -34,5 +34,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
bool enable);
+void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 64e875d528dd..6a0fcd67662a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -37,7 +37,6 @@
#include "gmc/gmc_8_2_sh_mask.h"
#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"
-#include "gca/gfx_8_0_sh_mask.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 882bd83a28c4..0de00fbe9233 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -43,6 +43,8 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
+ GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
};
@@ -89,7 +91,8 @@ enum psp_gfx_cmd_id
GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */
GFX_CMD_ID_DESTROY_TMR = 0x00000007, /* destroy TMR region */
GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */
-
+ GFX_CMD_ID_SETUP_VMR = 0x00000009, /* setup VMR region */
+ GFX_CMD_ID_DESTROY_VMR = 0x0000000A, /* destroy VMR region */
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 295c2205485a..d78b4306a36f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -240,12 +240,9 @@ static int psp_v10_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- struct psp_ring *ring;
unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- ring = &psp->km_ring;
-
/* Write the ring destroy command to C2PMSG_64 */
psp_ring_reg = 3 << 16;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 3f3fac2d50cd..6c9a1b748ca7 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -34,6 +34,7 @@
#include "nbio/nbio_7_4_offset.h"
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
+MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
@@ -98,7 +99,8 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
const char *chip_name;
char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *hdr;
+ const struct psp_firmware_header_v1_0 *sos_hdr;
+ const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
@@ -119,16 +121,32 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
if (err)
goto out;
- hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
- adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version);
- adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes);
- adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) -
- le32_to_cpu(hdr->sos_size_bytes);
- adev->psp.sys_start_addr = (uint8_t *)hdr +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+ sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+ adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
+ adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
+ adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
+ adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->header.ucode_size_bytes) -
+ le32_to_cpu(sos_hdr->sos_size_bytes);
+ adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
+ le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(hdr->sos_offset_bytes);
+ le32_to_cpu(sos_hdr->sos_offset_bytes);
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->psp.ta_fw);
+ if (err)
+ goto out;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
+ adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
+ adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
+ adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
return 0;
out:
if (err) {
@@ -153,8 +171,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (sol_reg) {
+ psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+ printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
+ }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -167,7 +188,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
/* Copy PSP System Driver binary to memory */
memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
- /* Provide the sys driver to bootrom */
+ /* Provide the sys driver to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 1 << 16;
@@ -208,7 +229,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
/* Copy Secure OS binary to PSP memory */
memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
- /* Provide the PSP secure OS to bootrom */
+ /* Provide the PSP secure OS to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 2 << 16;
@@ -278,26 +299,47 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- /* Write low address of the ring to C2PMSG_69 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_70 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
- /* Write size of ring to C2PMSG_71 */
- psp_ring_reg = ring->ring_size;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
- /* Write the ring initialization command to C2PMSG_64 */
- psp_ring_reg = ring_type;
- psp_ring_reg = psp_ring_reg << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ if (psp_support_vmr_ring(psp)) {
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_103 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
+
+ /* Write the ring initialization command to C2PMSG_101 */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, false);
+
+ } else {
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+ }
return ret;
}
@@ -308,15 +350,24 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
int ret = 0;
struct amdgpu_device *adev = psp->adev;
- /* Write the ring destroy command to C2PMSG_64 */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS);
+ /* Write the ring destroy command*/
+ if (psp_support_vmr_ring(psp))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ /* Wait for response flag (bit 31) */
+ if (psp_support_vmr_ring(psp))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
return ret;
}
@@ -355,7 +406,10 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
/* KM (GPCOM) prepare write pointer */
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+ if (psp_support_vmr_ring(psp))
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ else
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
/* Update KM RB frame pointer to new frame */
/* write_frame ptr increments by size of rb_frame in bytes */
@@ -384,7 +438,11 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
+ if (psp_support_vmr_ring(psp)) {
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
return 0;
}
@@ -529,7 +587,7 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
/*send the mode 1 reset command*/
WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
- mdelay(1000);
+ msleep(500);
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
@@ -552,24 +610,110 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
int number_devices, struct psp_xgmi_topology_info *topology)
{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
+ int i;
+ int ret;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ /* Fill in the shared memory with topology information as input */
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to get the topology information */
+ ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
+ if (ret)
+ return ret;
+
+ /* Read the output topology information from the shared memory */
+ topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
+ topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
+ for (i = 0; i < topology->num_nodes; i++) {
+ topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+ topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+ topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
+ topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+ }
+
return 0;
}
static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
int number_devices, struct psp_xgmi_topology_info *topology)
{
- return 0;
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ int i;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to set topology information */
+ return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
}
static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
{
- u64 hive_id = 0;
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
- /* Remove me when we can get correct hive_id through PSP */
- if (psp->adev->gmc.xgmi.num_physical_nodes)
- hive_id = 0x123456789abcdef;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
- return hive_id;
+ /* Invoke xgmi ta to get hive id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ if (ret)
+ return 0;
+ else
+ return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+}
+
+static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
+
+ /* Invoke xgmi ta to get the node id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ if (ret)
+ return 0;
+ else
+ return xgmi_cmd->xgmi_out_message.get_node_id.node_id;
}
static const struct psp_funcs psp_v11_0_funcs = {
@@ -587,6 +731,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
.xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
.xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
+ .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index e1ebf770c303..7357fd56e614 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -194,7 +194,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
/* Copy PSP System Driver binary to memory */
memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
- /* Provide the sys driver to bootrom */
+ /* Provide the sys driver to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 1 << 16;
@@ -254,7 +254,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* Copy Secure OS binary to PSP memory */
memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
- /* Provide the PSP secure OS to bootrom */
+ /* Provide the PSP secure OS to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 2 << 16;
@@ -356,12 +356,9 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- struct psp_ring *ring;
unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- ring = &psp->km_ring;
-
/* Write the ring destroy command to C2PMSG_64 */
psp_ring_reg = 3 << 16;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
@@ -593,9 +590,9 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
}
/*send the mode 1 reset command*/
- WREG32(offset, 0x70000);
+ WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
- mdelay(1000);
+ msleep(500);
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 2d4770e173dd..9f3cb2aec7c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -225,7 +225,7 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -245,9 +245,12 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VI).
*/
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
/* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
@@ -349,8 +352,8 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
- sdma0->ready = false;
- sdma1->ready = false;
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
}
/**
@@ -471,17 +474,15 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- ring->ready = true;
+ ring->sched.ready = true;
}
sdma_v2_4_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -550,21 +551,16 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -581,15 +577,11 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -612,20 +604,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -644,21 +632,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
@@ -760,7 +743,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
*/
static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
@@ -1105,8 +1088,14 @@ static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ u8 instance_id, queue_id;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+ instance_id = (entry->ring_id & 0x3) >> 0;
+ queue_id = (entry->ring_id & 0xc) >> 2;
+
+ if (instance_id <= 1 && queue_id == 0)
+ drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 6fb3edaba0ec..1bccc5fe2d9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -399,7 +399,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -419,9 +419,12 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VI).
*/
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
/* IB packet must end on a 8 DW boundary */
sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
@@ -523,8 +526,8 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
- sdma0->ready = false;
- sdma1->ready = false;
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
}
/**
@@ -739,7 +742,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- ring->ready = true;
+ ring->sched.ready = true;
}
/* unhalt the MEs */
@@ -749,11 +752,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -822,21 +823,16 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -853,15 +849,11 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -884,20 +876,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -916,21 +904,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -1031,7 +1014,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
*/
static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
@@ -1163,7 +1146,7 @@ static int sdma_v3_0_sw_init(void *handle)
if (!amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
ring->doorbell_index = (i == 0) ?
- AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.sdma_engine0 : adev->doorbell_index.sdma_engine1;
} else {
ring->use_pollmem = true;
}
@@ -1440,8 +1423,14 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ u8 instance_id, queue_id;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+ instance_id = (entry->ring_id & 0x3) >> 0;
+ queue_id = (entry->ring_id & 0xc) >> 2;
+
+ if (instance_id <= 1 && queue_id == 0)
+ drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 7a8c9172d30a..4b6d3e5c821f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -54,6 +54,11 @@ MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
+#define WREG32_SDMA(instance, offset, value) \
+ WREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)), value)
+#define RREG32_SDMA(instance, offset) \
+ RREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)))
+
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -367,16 +372,11 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
- u32 lowbit, highbit;
-
- lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
- highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
-
- DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
- ring->me, highbit, lowbit);
- wptr = highbit;
+ wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI);
wptr = wptr << 32;
- wptr |= lowbit;
+ wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR);
+ DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
+ ring->me, wptr);
}
return wptr >> 2;
@@ -417,14 +417,67 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
lower_32_bits(ring->wptr << 2),
ring->me,
upper_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR,
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI,
+ upper_32_bits(ring->wptr << 2));
+ }
+}
+
+/**
+ * sdma_v4_0_page_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (VEGA10+).
+ */
+static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u64 wptr;
+
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+ } else {
+ wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
+ wptr = wptr << 32;
+ wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
+ }
+
+ return wptr >> 2;
+}
+
+/**
+ * sdma_v4_0_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware (VEGA10+).
+ */
+static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
+ /* XXX check if swapping is necessary on BE */
+ WRITE_ONCE(*wb, (ring->wptr << 2));
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+ } else {
+ uint64_t wptr = ring->wptr << 2;
+
+ WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR,
+ lower_32_bits(wptr));
+ WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI,
+ upper_32_bits(wptr));
}
}
static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -444,9 +497,12 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VEGA10).
*/
static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
/* IB packet must end on a 8 DW boundary */
sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
@@ -568,16 +624,16 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
- rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
+ ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
}
- sdma0->ready = false;
- sdma1->ready = false;
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
}
/**
@@ -593,6 +649,39 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
}
/**
+ * sdma_v4_0_page_stop - stop the page async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the page async dma ring buffers (VEGA10).
+ */
+static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].page;
+ struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].page;
+ u32 rb_cntl, ib_cntl;
+ int i;
+
+ if ((adev->mman.buffer_funcs_ring == sdma0) ||
+ (adev->mman.buffer_funcs_ring == sdma1))
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
+ RB_ENABLE, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
+ ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
+ IB_ENABLE, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
+ }
+
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
+}
+
+/**
* sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
@@ -630,18 +719,15 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ f32_cntl = RREG32_SDMA(i, mmSDMA0_CNTL);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
if (enable && amdgpu_sdma_phase_quantum) {
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
- phase_quantum);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
- phase_quantum);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
- phase_quantum);
+ WREG32_SDMA(i, mmSDMA0_PHASE0_QUANTUM, phase_quantum);
+ WREG32_SDMA(i, mmSDMA0_PHASE1_QUANTUM, phase_quantum);
+ WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
}
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+ WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
}
}
@@ -662,156 +748,215 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
if (enable == false) {
sdma_v4_0_gfx_stop(adev);
sdma_v4_0_rlc_stop(adev);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_0_page_stop(adev);
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+ WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl);
}
}
/**
+ * sdma_v4_0_rb_cntl - get parameters for rb_cntl
+ */
+static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
+{
+ /* Set ring buffer size in dwords */
+ uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+#ifdef __BIG_ENDIAN
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
+#endif
+ return rb_cntl;
+}
+
+/**
* sdma_v4_0_gfx_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
+ * @i: instance to resume
*
* Set up the gfx DMA ring buffers and enable them (VEGA10).
* Returns 0 for success, error for failure.
*/
-static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
+static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
{
- struct amdgpu_ring *ring;
+ struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
- u32 rb_bufsz;
u32 wb_offset;
u32 doorbell;
u32 doorbell_offset;
- u32 temp;
u64 wptr_gpu_addr;
- int i, r;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
- wb_offset = (ring->rptr_offs * 4);
+ wb_offset = (ring->rptr_offs * 4);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
+ rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
-#ifdef __BIG_ENDIAN
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
- RPTR_WRITEBACK_SWAP_ENABLE, 1);
-#endif
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR, 0);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR, 0);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_HI, 0);
- /* Initialize the ring buffer's read and write pointers */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
+ /* set the wb address whether it's enabled or not */
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI,
+ upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO,
+ lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
- /* set the wb address whether it's enabled or not */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
- upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
- lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+ RPTR_WRITEBACK_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
+ ring->wptr = 0;
- ring->wptr = 0;
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 1);
- /* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
-
- if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
- }
+ doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL);
+ doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET);
- doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
- doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
-
- if (ring->use_doorbell) {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
- doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE,
+ ring->use_doorbell);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset,
+ SDMA0_GFX_DOORBELL_OFFSET,
OFFSET, ring->doorbell_index);
- } else {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
- }
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
- adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index);
+ WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell);
+ WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset);
+ adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index);
+
+ sdma_v4_0_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 0);
+
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
+
+ ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+ /* enable DMA IBs */
+ WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
- if (amdgpu_sriov_vf(adev))
- sdma_v4_0_ring_set_wptr(ring);
+ ring->sched.ready = true;
+}
- /* set minor_ptr_update to 0 after wptr programed */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+/**
+ * sdma_v4_0_page_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @i: instance to resume
+ *
+ * Set up the page DMA ring buffers and enable them (VEGA10).
+ * Returns 0 for success, error for failure.
+ */
+static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
+{
+ struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
+ u32 wb_offset;
+ u32 doorbell;
+ u32 doorbell_offset;
+ u64 wptr_gpu_addr;
- /* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+ wb_offset = (ring->rptr_offs * 4);
- if (!amdgpu_sriov_vf(adev)) {
- /* unhalt engine */
- temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
- }
+ rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
+ rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
- /* setup the wptr shadow polling */
- wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
- lower_32_bits(wptr_gpu_addr));
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
- upper_32_bits(wptr_gpu_addr));
- wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
- if (amdgpu_sriov_vf(adev))
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
- else
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0);
- /* enable DMA RB */
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ /* set the wb address whether it's enabled or not */
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI,
+ upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO,
+ lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
- ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
-#ifdef __BIG_ENDIAN
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
-#endif
- /* enable DMA IBs */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
+ RPTR_WRITEBACK_ENABLE, 1);
- ring->ready = true;
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v4_0_ctx_switch_enable(adev, true);
- sdma_v4_0_enable(adev, true);
- }
+ ring->wptr = 0;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
- return r;
- }
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1);
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL);
+ doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET);
- }
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE,
+ ring->use_doorbell);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset,
+ SDMA0_PAGE_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
+ WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
+
+ /* paging queue doorbell range is setup at sdma_v4_0_gfx_resume */
+ sdma_v4_0_page_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
+
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_PAGE_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
+
+ ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+ /* enable DMA IBs */
+ WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
- return 0;
+ ring->sched.ready = true;
}
static void
@@ -922,12 +1067,14 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
(adev->sdma.instance[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
+ WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, 0);
for (j = 0; j < fw_size; j++)
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
+ WREG32_SDMA(i, mmSDMA0_UCODE_DATA,
+ le32_to_cpup(fw_data++));
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
+ WREG32_SDMA(i, mmSDMA0_UCODE_ADDR,
+ adev->sdma.instance[i].fw_version);
}
return 0;
@@ -943,33 +1090,78 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
*/
static int sdma_v4_0_start(struct amdgpu_device *adev)
{
- int r = 0;
+ struct amdgpu_ring *ring;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
+ } else {
- /* set RB registers */
- r = sdma_v4_0_gfx_resume(adev);
- return r;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ r = sdma_v4_0_load_microcode(adev);
+ if (r)
+ return r;
+ }
+
+ /* unhalt the MEs */
+ sdma_v4_0_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v4_0_ctx_switch_enable(adev, true);
}
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- r = sdma_v4_0_load_microcode(adev);
+ /* start the gfx rings and rlc compute queues */
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ uint32_t temp;
+
+ WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+ sdma_v4_0_gfx_resume(adev, i);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_0_page_resume(adev, i);
+
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32_SDMA(i, mmSDMA0_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+ WREG32_SDMA(i, mmSDMA0_CNTL, temp);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ /* unhalt engine */
+ temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
+ WREG32_SDMA(i, mmSDMA0_F32_CNTL, temp);
+ }
+ }
+
+ if (amdgpu_sriov_vf(adev)) {
+ sdma_v4_0_ctx_switch_enable(adev, true);
+ sdma_v4_0_enable(adev, true);
+ } else {
+ r = sdma_v4_0_rlc_resume(adev);
if (r)
return r;
}
- /* unhalt the MEs */
- sdma_v4_0_enable(adev, true);
- /* enable sdma ring preemption */
- sdma_v4_0_ctx_switch_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
- /* start the gfx rings and rlc compute queues */
- r = sdma_v4_0_gfx_resume(adev);
- if (r)
- return r;
- r = sdma_v4_0_rlc_resume(adev);
+ if (adev->sdma.has_page_queue) {
+ struct amdgpu_ring *page = &adev->sdma.instance[i].page;
+
+ r = amdgpu_ring_test_helper(page);
+ if (r)
+ return r;
+
+ if (adev->mman.buffer_funcs_ring == page)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
return r;
}
@@ -993,21 +1185,16 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -1024,15 +1211,11 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1055,20 +1238,16 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -1087,21 +1266,17 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
+
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -1206,7 +1381,7 @@ static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
*/
static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
@@ -1272,15 +1447,46 @@ static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
}
+static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
+{
+ uint fw_version = adev->sdma.instance[0].fw_version;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ return fw_version >= 430;
+ case CHIP_VEGA12:
+ /*return fw_version >= 31;*/
+ return false;
+ case CHIP_VEGA20:
+ /*return fw_version >= 115;*/
+ return false;
+ default:
+ return false;
+ }
+}
+
static int sdma_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
if (adev->asic_type == CHIP_RAVEN)
adev->sdma.num_instances = 1;
else
adev->sdma.num_instances = 2;
+ r = sdma_v4_0_init_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load sdma firmware!\n");
+ return r;
+ }
+
+ /* TODO: Page queue breaks driver reload under SRIOV */
+ if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
+ adev->sdma.has_page_queue = false;
+ else if (sdma_v4_0_fw_support_paging_queue(adev))
+ adev->sdma.has_page_queue = true;
+
sdma_v4_0_set_ring_funcs(adev);
sdma_v4_0_set_buffer_funcs(adev);
sdma_v4_0_set_vm_pte_funcs(adev);
@@ -1289,7 +1495,6 @@ static int sdma_v4_0_early_init(void *handle)
return 0;
}
-
static int sdma_v4_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
@@ -1308,12 +1513,6 @@ static int sdma_v4_0_sw_init(void *handle)
if (r)
return r;
- r = sdma_v4_0_init_microcode(adev);
- if (r) {
- DRM_ERROR("Failed to load sdma firmware!\n");
- return r;
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1322,15 +1521,10 @@ static int sdma_v4_0_sw_init(void *handle)
DRM_INFO("use_doorbell being set to: [%s]\n",
ring->use_doorbell?"true":"false");
- if (adev->asic_type == CHIP_VEGA10)
- ring->doorbell_index = (i == 0) ?
- (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
- : (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
- else
- ring->doorbell_index = (i == 0) ?
- (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
- : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
-
+ /* doorbell size is 2 dwords, get DWORD offset */
+ ring->doorbell_index = (i == 0) ?
+ (adev->doorbell_index.sdma_engine0 << 1)
+ : (adev->doorbell_index.sdma_engine1 << 1);
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1340,6 +1534,29 @@ static int sdma_v4_0_sw_init(void *handle)
AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
+
+ if (adev->sdma.has_page_queue) {
+ ring = &adev->sdma.instance[i].page;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+
+ /* paging queue use same doorbell index/routing as gfx queue
+ * with 0x400 (4096 dwords) offset on second doorbell page
+ */
+ ring->doorbell_index = (i == 0) ?
+ (adev->doorbell_index.sdma_engine0 << 1)
+ : (adev->doorbell_index.sdma_engine1 << 1);
+ ring->doorbell_index += 0x400;
+
+ sprintf(ring->name, "page%d", i);
+ r = amdgpu_ring_init(adev, ring, 1024,
+ &adev->sdma.trap_irq,
+ (i == 0) ?
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
+ if (r)
+ return r;
+ }
}
return r;
@@ -1350,8 +1567,11 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ if (adev->sdma.has_page_queue)
+ amdgpu_ring_fini(&adev->sdma.instance[i].page);
+ }
for (i = 0; i < adev->sdma.num_instances; i++) {
release_firmware(adev->sdma.instance[i].fw);
@@ -1414,7 +1634,7 @@ static bool sdma_v4_0_is_idle(void *handle)
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- u32 tmp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
+ u32 tmp = RREG32_SDMA(i, mmSDMA0_STATUS_REG);
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
return false;
@@ -1430,8 +1650,8 @@ static int sdma_v4_0_wait_for_idle(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- sdma0 = RREG32(sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
- sdma1 = RREG32(sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
+ sdma0 = RREG32_SDMA(0, mmSDMA0_STATUS_REG);
+ sdma1 = RREG32_SDMA(1, mmSDMA0_STATUS_REG);
if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
return 0;
@@ -1452,16 +1672,13 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
+ unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1;
u32 sdma_cntl;
- u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
- sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
- sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
-
- sdma_cntl = RREG32(reg_offset);
+ sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32(reg_offset, sdma_cntl);
+ WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl);
return 0;
}
@@ -1470,39 +1687,32 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ uint32_t instance;
+
DRM_DEBUG("IH: SDMA trap\n");
switch (entry->client_id) {
case SOC15_IH_CLIENTID_SDMA0:
- switch (entry->ring_id) {
- case 0:
- amdgpu_fence_process(&adev->sdma.instance[0].ring);
- break;
- case 1:
- /* XXX compute */
- break;
- case 2:
- /* XXX compute */
- break;
- case 3:
- /* XXX page queue*/
- break;
- }
+ instance = 0;
break;
case SOC15_IH_CLIENTID_SDMA1:
- switch (entry->ring_id) {
- case 0:
- amdgpu_fence_process(&adev->sdma.instance[1].ring);
- break;
- case 1:
- /* XXX compute */
- break;
- case 2:
- /* XXX compute */
- break;
- case 3:
- /* XXX page queue*/
- break;
- }
+ instance = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (entry->ring_id) {
+ case 0:
+ amdgpu_fence_process(&adev->sdma.instance[instance].ring);
+ break;
+ case 1:
+ /* XXX compute */
+ break;
+ case 2:
+ /* XXX compute */
+ break;
+ case 3:
+ amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
}
return 0;
@@ -1512,12 +1722,29 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ int instance;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+
+ switch (entry->client_id) {
+ case SOC15_IH_CLIENTID_SDMA0:
+ instance = 0;
+ break;
+ case SOC15_IH_CLIENTID_SDMA1:
+ instance = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (entry->ring_id) {
+ case 0:
+ drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
+ break;
+ }
return 0;
}
-
static void sdma_v4_0_update_medium_grain_clock_gating(
struct amdgpu_device *adev,
bool enable)
@@ -1730,6 +1957,38 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ .support_64bit_ptrs = true,
+ .vmhub = AMDGPU_MMHUB,
+ .get_rptr = sdma_v4_0_ring_get_rptr,
+ .get_wptr = sdma_v4_0_page_ring_get_wptr,
+ .set_wptr = sdma_v4_0_page_ring_set_wptr,
+ .emit_frame_size =
+ 6 + /* sdma_v4_0_ring_emit_hdp_flush */
+ 3 + /* hdp invalidate */
+ 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
+ /* sdma_v4_0_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
+ .emit_ib = sdma_v4_0_ring_emit_ib,
+ .emit_fence = sdma_v4_0_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
+ .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
+ .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
+ .test_ring = sdma_v4_0_ring_test_ring,
+ .test_ib = sdma_v4_0_ring_test_ib,
+ .insert_nop = sdma_v4_0_ring_insert_nop,
+ .pad_ib = sdma_v4_0_ring_pad_ib,
+ .emit_wreg = sdma_v4_0_ring_emit_wreg,
+ .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
@@ -1737,6 +1996,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
adev->sdma.instance[i].ring.me = i;
+ if (adev->sdma.has_page_queue) {
+ adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs;
+ adev->sdma.instance[i].page.me = i;
+ }
}
}
@@ -1818,7 +2081,10 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+ if (adev->sdma.has_page_queue)
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
+ else
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
@@ -1836,7 +2102,10 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
+ if (adev->sdma.has_page_queue)
+ sched = &adev->sdma.instance[i].page.sched;
+ else
+ sched = &adev->sdma.instance[i].ring.sched;
adev->vm_manager.vm_pte_rqs[i] =
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index adbaea6da0d7..b6e473134e19 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -61,9 +61,11 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
}
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
* Pad as necessary with NOPs.
*/
@@ -122,7 +124,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
- ring->ready = false;
+ ring->sched.ready = false;
}
}
@@ -175,13 +177,11 @@ static int si_dma_start(struct amdgpu_device *adev)
WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
- ring->ready = true;
+ ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -209,21 +209,16 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 4);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
@@ -238,15 +233,11 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -269,20 +260,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
ib.ptr[1] = lower_32_bits(gpu_addr);
@@ -295,21 +282,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
@@ -658,15 +640,6 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
-static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
- return 0;
-}
-
static int si_dma_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -781,15 +754,10 @@ static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
.process = si_dma_process_trap_irq,
};
-static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
- .process = si_dma_process_illegal_inst_irq,
-};
-
static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
- adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index b3d7d9f83202..2938fb9f17cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -118,19 +118,6 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev)
return (wptr & adev->irq.ih.ptr_mask);
}
-/**
- * si_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- /* Process all interrupts */
- return true;
-}
-
static void si_ih_decode_iv(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
@@ -301,7 +288,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
static const struct amdgpu_ih_funcs si_ih_funcs = {
.get_wptr = si_ih_get_wptr,
- .prescreen_iv = si_ih_prescreen_iv,
.decode_iv = si_ih_decode_iv,
.set_rptr = si_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index bf5e6a413dee..8849b74078d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -65,6 +65,13 @@
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
+/* for Vega20 register name change */
+#define mmHDP_MEM_POWER_CTRL 0x00d4
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
+#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
/*
* Indirect registers accessor
*/
@@ -500,6 +507,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
return -EINVAL;
}
+ if (adev->asic_type == CHIP_VEGA20)
+ adev->gmc.xgmi.supported = true;
+
if (adev->flags & AMD_IS_APU)
adev->nbio_funcs = &nbio_v7_0_funcs;
else if (adev->asic_type == CHIP_VEGA20)
@@ -606,6 +616,24 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.flush_hdp = &soc15_flush_hdp,
.invalidate_hdp = &soc15_invalidate_hdp,
.need_full_reset = &soc15_need_full_reset,
+ .init_doorbell_index = &vega10_doorbell_index_init,
+};
+
+static const struct amdgpu_asic_funcs vega20_asic_funcs =
+{
+ .read_disabled_bios = &soc15_read_disabled_bios,
+ .read_bios_from_rom = &soc15_read_bios_from_rom,
+ .read_register = &soc15_read_register,
+ .reset = &soc15_asic_reset,
+ .set_vga_state = &soc15_vga_set_state,
+ .get_xclk = &soc15_get_xclk,
+ .set_uvd_clocks = &soc15_set_uvd_clocks,
+ .set_vce_clocks = &soc15_set_vce_clocks,
+ .get_config_memsize = &soc15_get_config_memsize,
+ .flush_hdp = &soc15_flush_hdp,
+ .invalidate_hdp = &soc15_invalidate_hdp,
+ .need_full_reset = &soc15_need_full_reset,
+ .init_doorbell_index = &vega20_doorbell_index_init,
};
static int soc15_common_early_init(void *handle)
@@ -625,11 +653,11 @@ static int soc15_common_early_init(void *handle)
adev->se_cac_rreg = &soc15_se_cac_rreg;
adev->se_cac_wreg = &soc15_se_cac_wreg;
- adev->asic_funcs = &soc15_asic_funcs;
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
case CHIP_VEGA10:
+ adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
@@ -653,6 +681,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = 0x1;
break;
case CHIP_VEGA12:
+ adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -675,6 +704,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_VEGA20:
+ adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -697,6 +727,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x28;
break;
case CHIP_RAVEN:
+ adev->asic_funcs = &soc15_asic_funcs;
if (adev->rev_id >= 0x8)
adev->external_rev_id = adev->rev_id + 0x81;
else if (adev->pdev->device == 0x15d8)
@@ -870,15 +901,33 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable
{
uint32_t def, data;
- def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+ if (adev->asic_type == CHIP_VEGA20) {
+ def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
- data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
- else
- data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
+ else
+ data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
- if (def != data)
- WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
+ if (def != data)
+ WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
+ } else {
+ def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+ else
+ data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
+ }
}
static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index f8ad7804dc40..a66c8bfbbaa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -58,4 +58,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
+void vega10_doorbell_index_init(struct amdgpu_device *adev);
+void vega20_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
new file mode 100644
index 000000000000..ac2c27b7630c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _TA_XGMI_IF_H
+#define _TA_XGMI_IF_H
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+enum ta_command_xgmi {
+ TA_COMMAND_XGMI__INITIALIZE = 0x00,
+ TA_COMMAND_XGMI__GET_NODE_ID = 0x01,
+ TA_COMMAND_XGMI__GET_HIVE_ID = 0x02,
+ TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO = 0x03,
+ TA_COMMAND_XGMI__SET_TOPOLOGY_INFO = 0x04
+};
+
+/* XGMI related enumerations */
+/**********************************************************/;
+enum ta_xgmi_connected_nodes {
+ TA_XGMI__MAX_CONNECTED_NODES = 64
+};
+
+enum ta_xgmi_status {
+ TA_XGMI_STATUS__SUCCESS = 0x00,
+ TA_XGMI_STATUS__GENERIC_FAILURE = 0x01,
+ TA_XGMI_STATUS__NULL_POINTER = 0x02,
+ TA_XGMI_STATUS__INVALID_PARAMETER = 0x03,
+ TA_XGMI_STATUS__NOT_INITIALIZED = 0x04,
+ TA_XGMI_STATUS__INVALID_NODE_NUM = 0x05,
+ TA_XGMI_STATUS__INVALID_NODE_ID = 0x06,
+ TA_XGMI_STATUS__INVALID_TOPOLOGY = 0x07,
+ TA_XGMI_STATUS__FAILED_ID_GEN = 0x08,
+ TA_XGMI_STATUS__FAILED_TOPOLOGY_INIT = 0x09,
+ TA_XGMI_STATUS__SET_SHARING_ERROR = 0x0A
+};
+
+enum ta_xgmi_assigned_sdma_engine {
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__NOT_ASSIGNED = -1,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA0 = 0,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA1 = 1,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA2 = 2,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA3 = 3,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA4 = 4,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA5 = 5
+};
+
+/* input/output structures for XGMI commands */
+/**********************************************************/
+struct ta_xgmi_node_info {
+ uint64_t node_id;
+ uint8_t num_hops;
+ uint8_t is_sharing_enabled;
+ enum ta_xgmi_assigned_sdma_engine sdma_engine;
+};
+
+struct ta_xgmi_cmd_initialize_output {
+ uint32_t status;
+};
+
+struct ta_xgmi_cmd_get_node_id_output {
+ uint64_t node_id;
+};
+
+struct ta_xgmi_cmd_get_hive_id_output {
+ uint64_t hive_id;
+};
+
+struct ta_xgmi_cmd_get_topology_info_input {
+ uint32_t num_nodes;
+ struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+struct ta_xgmi_cmd_get_topology_info_output {
+ uint32_t num_nodes;
+ struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+struct ta_xgmi_cmd_set_topology_info_input {
+ uint32_t num_nodes;
+ struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+/**********************************************************/
+/* Common input structure for XGMI callbacks */
+union ta_xgmi_cmd_input {
+ struct ta_xgmi_cmd_get_topology_info_input get_topology_info;
+ struct ta_xgmi_cmd_set_topology_info_input set_topology_info;
+};
+
+/* Common output structure for XGMI callbacks */
+union ta_xgmi_cmd_output {
+ struct ta_xgmi_cmd_initialize_output initialize;
+ struct ta_xgmi_cmd_get_node_id_output get_node_id;
+ struct ta_xgmi_cmd_get_hive_id_output get_hive_id;
+ struct ta_xgmi_cmd_get_topology_info_output get_topology_info;
+};
+/**********************************************************/
+
+struct ta_xgmi_shared_memory {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ enum ta_xgmi_status xgmi_status;
+ uint32_t reserved;
+ union ta_xgmi_cmd_input xgmi_in_message;
+ union ta_xgmi_cmd_output xgmi_out_message;
+};
+
+#endif //_TA_XGMI_IF_H
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 3abffd06b5c7..15da06ddeb75 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -219,34 +219,6 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * tonga_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* tonga_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -322,7 +294,7 @@ static int tonga_ih_sw_init(void *handle)
return r;
adev->irq.ih.use_doorbell = true;
- adev->irq.ih.doorbell_index = AMDGPU_DOORBELL_IH;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
r = amdgpu_irq_init(adev);
@@ -506,7 +478,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
.get_wptr = tonga_ih_get_wptr,
- .prescreen_iv = tonga_ih_prescreen_iv,
.decode_iv = tonga_ih_decode_iv,
.set_rptr = tonga_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 1fc17bf39fed..d69c8f6daaf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
return r;
@@ -162,12 +162,9 @@ static int uvd_v4_2_hw_init(void *handle)
uvd_v4_2_enable_mgcg(adev, true);
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
r = amdgpu_ring_alloc(ring, 10);
if (r) {
@@ -218,7 +215,7 @@ static int uvd_v4_2_hw_fini(void *handle)
if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev);
- ring->ready = false;
+ ring->sched.ready = false;
return 0;
}
@@ -484,11 +481,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
@@ -499,14 +494,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -519,8 +509,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
amdgpu_ring_write(ring, ib->gpu_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index fde6ad5ac9ab..ee8cd06ddc38 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
return r;
@@ -158,12 +158,9 @@ static int uvd_v5_0_hw_init(void *handle)
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v5_0_enable_mgcg(adev, true);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
r = amdgpu_ring_alloc(ring, 10);
if (r) {
@@ -215,7 +212,7 @@ static int uvd_v5_0_hw_fini(void *handle)
if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev);
- ring->ready = false;
+ ring->sched.ready = false;
return 0;
}
@@ -500,11 +497,8 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
@@ -515,14 +509,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -535,8 +524,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 7a5b40275e8e..d4f4a66f8324 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -175,11 +175,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_ring_alloc(ring, 16);
- if (r) {
- DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);
@@ -189,14 +186,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed\n",
- ring->idx);
+ if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- }
return r;
}
@@ -336,31 +327,24 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ if (r)
goto error;
- }
r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
+
error:
dma_fence_put(fence);
return r;
}
+
static int uvd_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -416,16 +400,16 @@ static int uvd_v6_0_sw_init(void *handle)
DRM_INFO("UVD ENC is disabled\n");
}
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];
@@ -476,12 +460,9 @@ static int uvd_v6_0_hw_init(void *handle)
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v6_0_enable_mgcg(adev, true);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
r = amdgpu_ring_alloc(ring, 10);
if (r) {
@@ -513,12 +494,9 @@ static int uvd_v6_0_hw_init(void *handle)
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
}
@@ -548,7 +526,7 @@ static int uvd_v6_0_hw_fini(void *handle)
if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev);
- ring->ready = false;
+ ring->sched.ready = false;
return 0;
}
@@ -969,11 +947,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
@@ -984,14 +960,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -1004,9 +975,12 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
amdgpu_ring_write(ring, vmid);
@@ -1027,8 +1001,12 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
* Write enc ring commands to execute the indirect buffer
*/
static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 58b39afcfb86..089645e78f98 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -183,11 +183,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
return 0;
r = amdgpu_ring_alloc(ring, 16);
- if (r) {
- DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n",
- ring->me, ring->idx, r);
+ if (r)
return r;
- }
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);
@@ -197,14 +194,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
- ring->me, ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: (%d)ring %d test failed\n",
- ring->me, ring->idx);
+ if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- }
return r;
}
@@ -343,27 +334,19 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r);
+ if (r)
goto error;
- }
r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me);
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r);
- } else {
- DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx);
+ else if (r > 0)
r = 0;
- }
+
error:
dma_fence_put(fence);
return r;
@@ -447,10 +430,6 @@ static int uvd_v7_0_sw_init(void *handle)
DRM_INFO("PSP loading UVD firmware\n");
}
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
continue;
@@ -472,9 +451,9 @@ static int uvd_v7_0_sw_init(void *handle)
* sriov, so set unused location for other unused rings.
*/
if (i == 0)
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
else
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
if (r)
@@ -482,6 +461,10 @@ static int uvd_v7_0_sw_init(void *handle)
}
}
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
if (r)
return r;
@@ -540,12 +523,9 @@ static int uvd_v7_0_hw_init(void *handle)
ring = &adev->uvd.inst[j].ring;
if (!amdgpu_sriov_vf(adev)) {
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
r = amdgpu_ring_alloc(ring, 10);
if (r) {
@@ -582,12 +562,9 @@ static int uvd_v7_0_hw_init(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst[j].ring_enc[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
}
done:
@@ -619,7 +596,7 @@ static int uvd_v7_0_hw_fini(void *handle)
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
- adev->uvd.inst[i].ring.ready = false;
+ adev->uvd.inst[i].ring.sched.ready = false;
}
return 0;
@@ -1235,11 +1212,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
- ring->me, ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -1251,14 +1226,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
- ring->me, ring->idx, i);
- } else {
- DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n",
- ring->me, ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -1300,10 +1270,12 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
* Write ring commands to execute the indirect buffer
*/
static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
struct amdgpu_device *adev = ring->adev;
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1329,8 +1301,12 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
* Write enc ring commands to execute the indirect buffer
*/
static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index ea28828360d3..bed78a778e3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -463,15 +463,11 @@ static int vce_v2_0_hw_init(void *handle)
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
vce_v2_0_enable_mgcg(adev, true, false);
- for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].ready = false;
for (i = 0; i < adev->vce.num_rings; i++) {
- r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
if (r)
return r;
- else
- adev->vce.ring[i].ready = true;
}
DRM_INFO("VCE initialized successfully.\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6dbd39730070..2668effadd27 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -37,7 +37,6 @@
#include "gca/gfx_8_0_d.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
-#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -474,15 +473,10 @@ static int vce_v3_0_hw_init(void *handle)
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
- for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].ready = false;
-
for (i = 0; i < adev->vce.num_rings; i++) {
- r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
if (r)
return r;
- else
- adev->vce.ring[i].ready = true;
}
DRM_INFO("VCE initialized successfully.\n");
@@ -838,8 +832,12 @@ out:
}
static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 1c9471890bf7..9fb34b7d8e03 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -466,9 +466,9 @@ static int vce_v4_0_sw_init(void *handle)
* so set unused location for other unused rings.
*/
if (i == 0)
- ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2;
else
- ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
if (r)
@@ -519,15 +519,10 @@ static int vce_v4_0_hw_init(void *handle)
if (r)
return r;
- for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].ready = false;
-
for (i = 0; i < adev->vce.num_rings; i++) {
- r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
if (r)
return r;
- else
- adev->vce.ring[i].ready = true;
}
DRM_INFO("VCE initialized successfully.\n");
@@ -549,7 +544,7 @@ static int vce_v4_0_hw_fini(void *handle)
}
for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].ready = false;
+ adev->vce.ring[i].sched.ready = false;
return 0;
}
@@ -951,9 +946,11 @@ static int vce_v4_0_set_powergating_state(void *handle,
}
#endif
-static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ struct amdgpu_ib *ib, bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index eae90922fdbe..c1a03505f956 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -176,30 +176,22 @@ static int vcn_v1_0_hw_init(void *handle)
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
int i, r;
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.ring_enc[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ ring->sched.ready = true;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
ring = &adev->vcn.ring_jpeg;
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
done:
if (!r)
@@ -224,7 +216,7 @@ static int vcn_v1_0_hw_fini(void *handle)
if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
vcn_v1_0_stop(adev);
- ring->ready = false;
+ ring->sched.ready = false;
return 0;
}
@@ -1366,10 +1358,12 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
* Write ring commands to execute the indirect buffer
*/
static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
struct amdgpu_device *adev = ring->adev;
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1524,8 +1518,12 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
* Write enc ring commands to execute the indirect buffer
*/
static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1725,10 +1723,12 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6
* Write ring commands to execute the indirect buffer.
*/
static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
struct amdgpu_device *adev = ring->adev;
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index a99f71797aa3..2c250b01a903 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -129,7 +129,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
else
wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF);
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
@@ -220,90 +220,6 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * vega10_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u32 dw0, dw3, dw4, dw5;
- u16 pasid;
- u64 addr, key;
- struct amdgpu_vm *vm;
- int r;
-
- dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
- dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
-
- /* Filter retry page faults, let only the first one pass. If
- * there are too many outstanding faults, ignore them until
- * some faults get cleared.
- */
- switch (dw0 & 0xff) {
- case SOC15_IH_CLIENTID_VMC:
- case SOC15_IH_CLIENTID_UTCL2:
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- pasid = dw3 & 0xffff;
- /* No PASID, can't identify faulting process */
- if (!pasid)
- return true;
-
- /* Not a retry fault, check fault credit */
- if (!(dw5 & 0x80)) {
- if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
- goto ignore_iv;
- return true;
- }
-
- /* Track retry faults in per-VM fault FIFO. */
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
- key = AMDGPU_VM_FAULT(pasid, addr);
- if (!vm) {
- /* VM not found, process it normally */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- } else {
- r = amdgpu_vm_add_fault(vm->fault_hash, key);
-
- /* Hash table is full or the fault is already being processed,
- * ignore further page faults
- */
- if (r != 0) {
- spin_unlock(&adev->vm_manager.pasid_lock);
- goto ignore_iv;
- }
- }
- /* No locking required with single writer and single reader */
- r = kfifo_put(&vm->faults, key);
- if (!r) {
- /* FIFO is full. Ignore it until there is space */
- amdgpu_vm_clear_fault(vm->fault_hash, key);
- spin_unlock(&adev->vm_manager.pasid_lock);
- goto ignore_iv;
- }
-
- spin_unlock(&adev->vm_manager.pasid_lock);
- /* It's the first fault for this address, process it normally */
- return true;
-
-ignore_iv:
- adev->irq.ih.rptr += 32;
- return false;
-}
-
-/**
* vega10_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -385,7 +301,7 @@ static int vega10_ih_sw_init(void *handle)
return r;
adev->irq.ih.use_doorbell = true;
- adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
r = amdgpu_irq_init(adev);
@@ -487,7 +403,6 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
- .prescreen_iv = vega10_ih_prescreen_iv,
.decode_iv = vega10_ih_decode_iv,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index c5c9b2bc190d..422674bb3cdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -56,4 +56,32 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
return 0;
}
+void vega10_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_DOORBELL64_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL64_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL64_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL64_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL64_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL64_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL64_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL64_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL64_MEC_RING7;
+ adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL64_USERQUEUE_START;
+ adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL64_USERQUEUE_END;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL64_GFX_RING0;
+ adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL64_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL64_sDMA_ENGINE1;
+ adev->doorbell_index.ih = AMDGPU_DOORBELL64_IH;
+ adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_DOORBELL64_UVD_RING0_1;
+ adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_DOORBELL64_UVD_RING2_3;
+ adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_DOORBELL64_UVD_RING4_5;
+ adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_DOORBELL64_UVD_RING6_7;
+ adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_DOORBELL64_VCE_RING0_1;
+ adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3;
+ adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5;
+ adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7;
+ /* In unit of dword doorbell */
+ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index 2d4473557b0d..edce413fda9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -49,8 +49,42 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
}
return 0;
}
+void vega20_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_VEGA20_DOORBELL_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_VEGA20_DOORBELL_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_VEGA20_DOORBELL_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_VEGA20_DOORBELL_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_VEGA20_DOORBELL_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_VEGA20_DOORBELL_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_VEGA20_DOORBELL_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_VEGA20_DOORBELL_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_VEGA20_DOORBELL_MEC_RING7;
+ adev->doorbell_index.userqueue_start = AMDGPU_VEGA20_DOORBELL_USERQUEUE_START;
+ adev->doorbell_index.userqueue_end = AMDGPU_VEGA20_DOORBELL_USERQUEUE_END;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_VEGA20_DOORBELL_GFX_RING0;
+ adev->doorbell_index.sdma_engine0 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine1 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.sdma_engine2 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2;
+ adev->doorbell_index.sdma_engine3 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3;
+ adev->doorbell_index.sdma_engine4 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4;
+ adev->doorbell_index.sdma_engine5 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5;
+ adev->doorbell_index.sdma_engine6 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6;
+ adev->doorbell_index.sdma_engine7 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7;
+ adev->doorbell_index.ih = AMDGPU_VEGA20_DOORBELL_IH;
+ adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1;
+ adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3;
+ adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5;
+ adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7;
+ adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1;
+ adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3;
+ adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5;
+ adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7;
+ adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 07880d35e9de..ff2906c215fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -955,6 +955,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.flush_hdp = &vi_flush_hdp,
.invalidate_hdp = &vi_invalidate_hdp,
.need_full_reset = &vi_need_full_reset,
+ .init_doorbell_index = &legacy_doorbell_index_init,
};
#define CZ_REV_BRISTOL(rev) \
@@ -1712,3 +1713,21 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
return 0;
}
+
+void legacy_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
+ adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
+ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 0429fe332269..8de0772f986c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -30,4 +30,5 @@ void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev);
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 5d2475d5392c..177d1e5329a5 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -23,6 +23,7 @@
#include "kfd_priv.h"
#include "kfd_events.h"
#include "cik_int.h"
+#include "amdgpu_amdkfd.h"
static bool cik_event_interrupt_isr(struct kfd_dev *dev,
const uint32_t *ih_ring_entry,
@@ -107,7 +108,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
kfd_process_vm_fault(dev->dqm, pasid);
memset(&info, 0, sizeof(info));
- dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info);
+ amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->kgd, &info);
if (!info.page_addr && !info.status)
return;
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
index 37ce6dd65391..8e2a1663c4db 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -68,6 +68,4 @@
#define GRBM_GFX_INDEX 0x30800
-#define ATC_VMID_PASID_MAPPING_VALID (1U << 31)
-
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 14d5b5fa822d..3623538baf6f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -33,10 +33,12 @@
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/mman.h>
+#include <linux/dma-buf.h>
#include <asm/processor.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_dbgmgr.h"
+#include "amdgpu_amdkfd.h"
static long kfd_ioctl(struct file *, unsigned int, unsigned long);
static int kfd_open(struct inode *, struct file *);
@@ -834,8 +836,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
dev = kfd_device_by_id(args->gpu_id);
if (dev)
/* Reading GPU clock counter from KGD */
- args->gpu_clock_counter =
- dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
+ args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd);
else
/* Node without GPU resource */
args->gpu_clock_counter = 0;
@@ -1042,7 +1043,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
}
mutex_unlock(&p->mutex);
- err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
+ err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd,
mem, &kern_addr, &size);
if (err) {
pr_err("Failed to map event page to kernel\n");
@@ -1240,7 +1241,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev)
if (dev->device_info->needs_iommu_device)
return false;
- dev->kfd2kgd->get_local_mem_info(dev->kgd, &mem_info);
+ amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info);
if (mem_info.local_mem_size_private == 0 &&
mem_info.local_mem_size_public > 0)
return true;
@@ -1273,6 +1274,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return -EINVAL;
}
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+ if (args->size != kfd_doorbell_process_slice(dev))
+ return -EINVAL;
+ offset = kfd_get_process_doorbells(dev, p);
+ }
+
mutex_lock(&p->mutex);
pdd = kfd_bind_process_to_device(dev, p);
@@ -1281,7 +1288,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
goto err_unlock;
}
- err = dev->kfd2kgd->alloc_memory_of_gpu(
+ err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
dev->kgd, args->va_addr, args->size,
pdd->vm, (struct kgd_mem **) &mem, &offset,
flags);
@@ -1303,7 +1310,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return 0;
err_free:
- dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
err_unlock:
mutex_unlock(&p->mutex);
return err;
@@ -1338,7 +1345,8 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
goto err_unlock;
}
- ret = dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+ ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
+ (struct kgd_mem *)mem);
/* If freeing the buffer failed, leave the handle in place for
* clean-up during process tear-down.
@@ -1418,7 +1426,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
err = PTR_ERR(peer_pdd);
goto get_mem_obj_from_handle_failed;
}
- err = peer->kfd2kgd->map_memory_to_gpu(
+ err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
if (err) {
pr_err("Failed to map to gpu %d/%d\n",
@@ -1430,7 +1438,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
mutex_unlock(&p->mutex);
- err = dev->kfd2kgd->sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
+ err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
@@ -1525,7 +1533,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
err = -ENODEV;
goto get_mem_obj_from_handle_failed;
}
- err = dev->kfd2kgd->unmap_memory_to_gpu(
+ err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
if (err) {
pr_err("Failed to unmap from gpu %d/%d\n",
@@ -1549,6 +1557,115 @@ copy_from_user_failed:
return err;
}
+static int kfd_ioctl_get_dmabuf_info(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_get_dmabuf_info_args *args = data;
+ struct kfd_dev *dev = NULL;
+ struct kgd_dev *dma_buf_kgd;
+ void *metadata_buffer = NULL;
+ uint32_t flags;
+ unsigned int i;
+ int r;
+
+ /* Find a KFD GPU device that supports the get_dmabuf_info query */
+ for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
+ if (dev)
+ break;
+ if (!dev)
+ return -EINVAL;
+
+ if (args->metadata_ptr) {
+ metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
+ if (!metadata_buffer)
+ return -ENOMEM;
+ }
+
+ /* Get dmabuf info from KGD */
+ r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd,
+ &dma_buf_kgd, &args->size,
+ metadata_buffer, args->metadata_size,
+ &args->metadata_size, &flags);
+ if (r)
+ goto exit;
+
+ /* Reverse-lookup gpu_id from kgd pointer */
+ dev = kfd_device_by_kgd(dma_buf_kgd);
+ if (!dev) {
+ r = -EINVAL;
+ goto exit;
+ }
+ args->gpu_id = dev->id;
+ args->flags = flags;
+
+ /* Copy metadata buffer to user mode */
+ if (metadata_buffer) {
+ r = copy_to_user((void __user *)args->metadata_ptr,
+ metadata_buffer, args->metadata_size);
+ if (r != 0)
+ r = -EFAULT;
+ }
+
+exit:
+ kfree(metadata_buffer);
+
+ return r;
+}
+
+static int kfd_ioctl_import_dmabuf(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_import_dmabuf_args *args = data;
+ struct kfd_process_device *pdd;
+ struct dma_buf *dmabuf;
+ struct kfd_dev *dev;
+ int idr_handle;
+ uint64_t size;
+ void *mem;
+ int r;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
+
+ dmabuf = dma_buf_get(args->dmabuf_fd);
+ if (!dmabuf)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ r = PTR_ERR(pdd);
+ goto err_unlock;
+ }
+
+ r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf,
+ args->va_addr, pdd->vm,
+ (struct kgd_mem **)&mem, &size,
+ NULL);
+ if (r)
+ goto err_unlock;
+
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
+ if (idr_handle < 0) {
+ r = -EFAULT;
+ goto err_free;
+ }
+
+ mutex_unlock(&p->mutex);
+
+ args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
+
+ return 0;
+
+err_free:
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+err_unlock:
+ mutex_unlock(&p->mutex);
+ return r;
+}
+
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
.cmd_drv = 0, .name = #ioctl}
@@ -1634,7 +1751,13 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
kfd_ioctl_set_cu_mask, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
- kfd_ioctl_get_queue_wave_state, 0)
+ kfd_ioctl_get_queue_wave_state, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
+ kfd_ioctl_get_dmabuf_info, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
+ kfd_ioctl_import_dmabuf, 0),
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 56412b0e7e1c..c02adbbeef2a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -26,6 +26,7 @@
#include "kfd_priv.h"
#include "kfd_topology.h"
#include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
/* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
* GPU processor ID are expressed with Bit[31]=1.
@@ -132,6 +133,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
#define fiji_cache_info carrizo_cache_info
#define polaris10_cache_info carrizo_cache_info
#define polaris11_cache_info carrizo_cache_info
+#define polaris12_cache_info carrizo_cache_info
/* TODO - check & update Vega10 cache details */
#define vega10_cache_info carrizo_cache_info
#define raven_cache_info carrizo_cache_info
@@ -646,7 +648,12 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info = polaris11_cache_info;
num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
break;
+ case CHIP_POLARIS12:
+ pcache_info = polaris12_cache_info;
+ num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
+ break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
pcache_info = vega10_cache_info;
num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
@@ -753,12 +760,10 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
return -ENODATA;
}
- pcrat_image = kmalloc(crat_table->length, GFP_KERNEL);
+ pcrat_image = kmemdup(crat_table, crat_table->length, GFP_KERNEL);
if (!pcrat_image)
return -ENOMEM;
- memcpy(pcrat_image, crat_table, crat_table->length);
-
*crat_image = pcrat_image;
*size = crat_table->length;
@@ -1161,7 +1166,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
cu->proximity_domain = proximity_domain;
- kdev->kfd2kgd->get_cu_info(kdev->kgd, &cu_info);
+ amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
cu->num_simd_per_cu = cu_info.simd_per_cu;
cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
cu->max_waves_simd = cu_info.max_waves_per_simd;
@@ -1192,7 +1197,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
* report the total FB size (public+private) as a single
* private heap.
*/
- kdev->kfd2kgd->get_local_mem_info(kdev->kgd, &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index a9f18ea7e354..9ed14a11afa2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -28,6 +28,7 @@
#include "kfd_pm4_headers_vi.h"
#include "cwsr_trap_handler.h"
#include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
#define MQD_SIZE_ALIGNED 768
@@ -204,6 +205,22 @@ static const struct kfd_device_info polaris11_device_info = {
.num_sdma_queues_per_engine = 2,
};
+static const struct kfd_device_info polaris12_device_info = {
+ .asic_family = CHIP_POLARIS12,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 4,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
+};
+
static const struct kfd_device_info vega10_device_info = {
.asic_family = CHIP_VEGA10,
.max_pasid_bits = 16,
@@ -236,6 +253,22 @@ static const struct kfd_device_info vega10_vf_device_info = {
.num_sdma_queues_per_engine = 2,
};
+static const struct kfd_device_info vega12_device_info = {
+ .asic_family = CHIP_VEGA12,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
+};
+
static const struct kfd_device_info vega20_device_info = {
.asic_family = CHIP_VEGA20,
.max_pasid_bits = 16,
@@ -330,6 +363,14 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x67EB, &polaris11_device_info }, /* Polaris11 */
{ 0x67EF, &polaris11_device_info }, /* Polaris11 */
{ 0x67FF, &polaris11_device_info }, /* Polaris11 */
+ { 0x6980, &polaris12_device_info }, /* Polaris12 */
+ { 0x6981, &polaris12_device_info }, /* Polaris12 */
+ { 0x6985, &polaris12_device_info }, /* Polaris12 */
+ { 0x6986, &polaris12_device_info }, /* Polaris12 */
+ { 0x6987, &polaris12_device_info }, /* Polaris12 */
+ { 0x6995, &polaris12_device_info }, /* Polaris12 */
+ { 0x6997, &polaris12_device_info }, /* Polaris12 */
+ { 0x699F, &polaris12_device_info }, /* Polaris12 */
{ 0x6860, &vega10_device_info }, /* Vega10 */
{ 0x6861, &vega10_device_info }, /* Vega10 */
{ 0x6862, &vega10_device_info }, /* Vega10 */
@@ -339,6 +380,11 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x6868, &vega10_device_info }, /* Vega10 */
{ 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
{ 0x687F, &vega10_device_info }, /* Vega10 */
+ { 0x69A0, &vega12_device_info }, /* Vega12 */
+ { 0x69A1, &vega12_device_info }, /* Vega12 */
+ { 0x69A2, &vega12_device_info }, /* Vega12 */
+ { 0x69A3, &vega12_device_info }, /* Vega12 */
+ { 0x69AF, &vega12_device_info }, /* Vega12 */
{ 0x66a0, &vega20_device_info }, /* Vega20 */
{ 0x66a1, &vega20_device_info }, /* Vega20 */
{ 0x66a2, &vega20_device_info }, /* Vega20 */
@@ -478,7 +524,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
/* add another 512KB for all other allocations on gart (HPD, fences) */
size += 512 * 1024;
- if (kfd->kfd2kgd->init_gtt_mem_allocation(
+ if (amdgpu_amdkfd_alloc_gtt_mem(
kfd->kgd, size, &kfd->gtt_mem,
&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
false)) {
@@ -552,7 +598,7 @@ kfd_topology_add_device_error:
kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
- kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
@@ -569,7 +615,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfd_topology_remove_device(kfd);
kfd_doorbell_fini(kfd);
kfd_gtt_sa_fini(kfd);
- kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
}
kfree(kfd);
@@ -681,6 +727,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
bool is_patched = false;
+ unsigned long flags;
if (!kfd->init_complete)
return;
@@ -690,7 +737,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
return;
}
- spin_lock(&kfd->interrupt_lock);
+ spin_lock_irqsave(&kfd->interrupt_lock, flags);
if (kfd->interrupts_active
&& interrupt_is_wanted(kfd, ih_ring_entry,
@@ -699,7 +746,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
is_patched ? patched_ihre : ih_ring_entry))
queue_work(kfd->ih_wq, &kfd->interrupt_work);
- spin_unlock(&kfd->interrupt_lock);
+ spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
}
int kgd2kfd_quiesce_mm(struct mm_struct *mm)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index a3b933967171..8372556b52eb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -33,6 +33,7 @@
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "kfd_kernel_queue.h"
+#include "amdgpu_amdkfd.h"
/* Size of the per-pipe EOP queue */
#define CIK_HPD_EOP_BYTES_LOG2 11
@@ -219,7 +220,7 @@ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
if (ret)
return ret;
- return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
+ return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
pmf->release_mem_size / sizeof(uint32_t));
}
@@ -672,7 +673,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
pdd = qpd_to_pdd(qpd);
/* Retrieve PD base */
- pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+ pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
@@ -743,7 +744,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
pdd = qpd_to_pdd(qpd);
/* Retrieve PD base */
- pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+ pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
@@ -793,7 +794,7 @@ static int register_process(struct device_queue_manager *dqm,
pdd = qpd_to_pdd(qpd);
/* Retrieve PD base */
- pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+ pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
dqm_lock(dqm);
list_add(&n->list, &dqm->queues);
@@ -805,7 +806,7 @@ static int register_process(struct device_queue_manager *dqm,
retval = dqm->asic_ops.update_qpd(dqm, qpd);
if (dqm->processes_count++ == 0)
- dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
+ amdgpu_amdkfd_set_compute_idle(dqm->dev->kgd, false);
dqm_unlock(dqm);
@@ -829,7 +830,7 @@ static int unregister_process(struct device_queue_manager *dqm,
list_del(&cur->list);
kfree(cur);
if (--dqm->processes_count == 0)
- dqm->dev->kfd2kgd->set_compute_idle(
+ amdgpu_amdkfd_set_compute_idle(
dqm->dev->kgd, true);
goto out;
}
@@ -845,15 +846,8 @@ static int
set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
unsigned int vmid)
{
- uint32_t pasid_mapping;
-
- pasid_mapping = (pasid == 0) ? 0 :
- (uint32_t)pasid |
- ATC_VMID_PASID_MAPPING_VALID;
-
return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
- dqm->dev->kgd, pasid_mapping,
- vmid);
+ dqm->dev->kgd, pasid, vmid);
}
static void init_interrupts(struct device_queue_manager *dqm)
@@ -1553,7 +1547,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
int r;
dqm_lock(dqm);
@@ -1564,19 +1558,19 @@ static int get_wave_state(struct device_queue_manager *dqm,
goto dqm_unlock;
}
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (!mqd) {
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ if (!mqd_mgr) {
r = -ENOMEM;
goto dqm_unlock;
}
- if (!mqd->get_wave_state) {
+ if (!mqd_mgr->get_wave_state) {
r = -EINVAL;
goto dqm_unlock;
}
- r = mqd->get_wave_state(mqd, q->mqd, ctl_stack, ctl_stack_used_size,
- save_area_used_size);
+ r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
+ ctl_stack_used_size, save_area_used_size);
dqm_unlock:
dqm_unlock(dqm);
@@ -1747,10 +1741,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
device_queue_manager_init_vi_tonga(&dqm->asic_ops);
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
device_queue_manager_init_v9(&dqm->asic_ops);
@@ -1796,7 +1792,7 @@ static void kfd_process_hw_exception(struct work_struct *work)
{
struct device_queue_manager *dqm = container_of(work,
struct device_queue_manager, hw_exception_work);
- dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
+ amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index fd60a116be37..c3a5dcfe877a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -24,7 +24,6 @@
#include "kfd_device_queue_manager.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
-#include "gca/gfx_8_0_enum.h"
#include "oss/oss_3_0_sh_mask.h"
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 3d66cec414af..213ea5454d11 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -397,9 +397,11 @@ int kfd_init_apertures(struct kfd_process *process)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
kfd_init_apertures_vi(pdd, id);
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
kfd_init_apertures_v9(pdd, id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index f836897bbf58..a85904ad0d5f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -23,7 +23,7 @@
#include "kfd_priv.h"
#include "kfd_events.h"
#include "soc15_int.h"
-
+#include "kfd_device_queue_manager.h"
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
const uint32_t *ih_ring_entry,
@@ -39,20 +39,39 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
vmid > dev->vm_info.last_vmid_kfd)
return 0;
- /* If there is no valid PASID, it's likely a firmware bug */
- pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
- if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
- return 0;
-
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+ pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+
+ /* This is a known issue for gfx9. Under non HWS, pasid is not set
+ * in the interrupt payload, so we need to find out the pasid on our
+ * own.
+ */
+ if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ const uint32_t pasid_mask = 0xffff;
- pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
- client_id, source_id, pasid);
+ *patched_flag = true;
+ memcpy(patched_ihre, ih_ring_entry,
+ dev->device_info->ih_ring_entry_size);
+
+ pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid(
+ dev->kgd, vmid);
+
+ /* Patch the pasid field */
+ patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3])
+ & ~pasid_mask) | pasid);
+ }
+
+ pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
data[0], data[1], data[2], data[3],
data[4], data[5], data[6], data[7]);
+ /* If there is no valid PASID, it's likely a bug */
+ if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ return 0;
+
/* Interrupt types we care about: various signals and faults.
* They will be forwarded to a work queue (see below).
*/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 6c31f7370193..f1596881f20a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -313,6 +313,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
kernel_queue_init_vi(&kq->ops_asic_specific);
break;
@@ -322,6 +323,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
kernel_queue_init_v9(&kq->ops_asic_specific);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index e33019a7a883..aed9b9b82213 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -22,6 +22,7 @@
*/
#include "kfd_mqd_manager.h"
+#include "amdgpu_amdkfd.h"
struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
@@ -37,8 +38,10 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
return mqd_manager_init_vi_tonga(type, dev);
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
return mqd_manager_init_v9(type, dev);
@@ -58,7 +61,7 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
uint32_t cu_per_sh[4] = {0};
int i, se, cu = 0;
- mm->dev->kfd2kgd->get_cu_info(mm->dev->kgd, &cu_info);
+ amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
if (cu_mask_count > cu_info.cu_active_number)
cu_mask_count = cu_info.cu_active_number;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index f381c1cb27bd..9dbba609450e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -30,6 +30,7 @@
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "sdma0/sdma0_4_0_sh_mask.h"
+#include "amdgpu_amdkfd.h"
static inline struct v9_mqd *get_mqd(void *mqd)
{
@@ -83,7 +84,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
*mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!*mqd_mem_obj)
return -ENOMEM;
- retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd,
+ retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
ALIGN(q->ctl_stack_size, PAGE_SIZE) +
ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
&((*mqd_mem_obj)->gtt_mem),
@@ -250,7 +251,7 @@ static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_dev *kfd = mm->dev;
if (mqd_mem_obj->gtt_mem) {
- kfd->kfd2kgd->free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
kfree(mqd_mem_obj);
} else {
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index c6080ed3b6a7..045a229436a0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -226,9 +226,11 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
pm->pmf = &kfd_vi_pm_funcs;
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
pm->pmf = &kfd_v9_pm_funcs;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 15fff4420e53..33b08ff00b50 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include "kfd_priv.h"
+#include "amdgpu_ids.h"
static unsigned int pasid_bits = 16;
static const struct kfd2kgd_calls *kfd2kgd;
@@ -71,7 +72,7 @@ unsigned int kfd_pasid_alloc(void)
return false;
}
- r = kfd2kgd->alloc_pasid(pasid_bits);
+ r = amdgpu_pasid_alloc(pasid_bits);
return r > 0 ? r : 0;
}
@@ -79,5 +80,5 @@ unsigned int kfd_pasid_alloc(void)
void kfd_pasid_free(unsigned int pasid)
{
if (kfd2kgd)
- kfd2kgd->free_pasid(pasid);
+ amdgpu_pasid_free(pasid);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 53ff86d45d91..0689d4ccbbc0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -507,6 +507,7 @@ struct qcm_process_device {
* All the memory management data should be here too
*/
uint64_t gds_context_area;
+ /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */
uint64_t page_table_base;
uint32_t sh_mem_config;
uint32_t sh_mem_bases;
@@ -792,6 +793,7 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
+struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd);
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
int kfd_numa_node_to_apic_id(int numa_node_id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 0039e451d9af..80b36e860a0a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -31,6 +31,7 @@
#include <linux/compat.h>
#include <linux/mman.h>
#include <linux/file.h>
+#include "amdgpu_amdkfd.h"
struct mm_struct;
@@ -100,8 +101,8 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
{
struct kfd_dev *dev = pdd->dev;
- dev->kfd2kgd->unmap_memory_to_gpu(dev->kgd, mem, pdd->vm);
- dev->kfd2kgd->free_memory_of_gpu(dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
}
/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
@@ -119,16 +120,16 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
int handle;
int err;
- err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
+ err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
pdd->vm, &mem, NULL, flags);
if (err)
goto err_alloc_mem;
- err = kdev->kfd2kgd->map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
+ err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
if (err)
goto err_map_mem;
- err = kdev->kfd2kgd->sync_memory(kdev->kgd, mem, true);
+ err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
@@ -147,7 +148,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
}
if (kptr) {
- err = kdev->kfd2kgd->map_gtt_bo_to_kernel(kdev->kgd,
+ err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
(struct kgd_mem *)mem, kptr, NULL);
if (err) {
pr_debug("Map GTT BO to kernel failed\n");
@@ -165,7 +166,7 @@ sync_memory_failed:
return err;
err_map_mem:
- kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
err_alloc_mem:
*kptr = NULL;
return err;
@@ -296,11 +297,11 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
per_device_list) {
if (!peer_pdd->vm)
continue;
- peer_pdd->dev->kfd2kgd->unmap_memory_to_gpu(
+ amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
peer_pdd->dev->kgd, mem, peer_pdd->vm);
}
- pdd->dev->kfd2kgd->free_memory_of_gpu(pdd->dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
kfd_process_device_remove_obj_handle(pdd, id);
}
}
@@ -323,11 +324,12 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
pdd->dev->id, p->pasid);
if (pdd->drm_file) {
- pdd->dev->kfd2kgd->release_process_vm(pdd->dev->kgd, pdd->vm);
+ amdgpu_amdkfd_gpuvm_release_process_vm(
+ pdd->dev->kgd, pdd->vm);
fput(pdd->drm_file);
}
else if (pdd->vm)
- pdd->dev->kfd2kgd->destroy_process_vm(
+ amdgpu_amdkfd_gpuvm_destroy_process_vm(
pdd->dev->kgd, pdd->vm);
list_del(&pdd->per_device_list);
@@ -688,12 +690,12 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
dev = pdd->dev;
if (drm_file)
- ret = dev->kfd2kgd->acquire_process_vm(
+ ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
dev->kgd, drm_file, p->pasid,
&pdd->vm, &p->kgd_process_info, &p->ef);
else
- ret = dev->kfd2kgd->create_process_vm(
- dev->kgd, p->pasid, &pdd->vm, &p->kgd_process_info, &p->ef);
+ ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
+ &pdd->vm, &p->kgd_process_info, &p->ef);
if (ret) {
pr_err("Failed to create process VM object\n");
return ret;
@@ -714,7 +716,7 @@ err_init_cwsr:
err_reserve_ib_mem:
kfd_process_device_free_bos(pdd);
if (!drm_file)
- dev->kfd2kgd->destroy_process_vm(dev->kgd, pdd->vm);
+ amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
pdd->vm = NULL;
return ret;
@@ -972,7 +974,7 @@ static void restore_process_worker(struct work_struct *work)
*/
p->last_restore_timestamp = get_jiffies_64();
- ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
+ ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
&p->ef);
if (ret) {
pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e3843c5929ed..5f5b2acedbac 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -36,6 +36,7 @@
#include "kfd_topology.h"
#include "kfd_device_queue_manager.h"
#include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
/* topology_device_list - Master list of all topology devices */
static struct list_head topology_device_list;
@@ -100,7 +101,25 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu->pdev == pdev) {
+ if (top_dev->gpu && top_dev->gpu->pdev == pdev) {
+ device = top_dev->gpu;
+ break;
+ }
+
+ up_read(&topology_lock);
+
+ return device;
+}
+
+struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd)
+{
+ struct kfd_topology_device *top_dev;
+ struct kfd_dev *device = NULL;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(top_dev, &topology_device_list, list)
+ if (top_dev->gpu && top_dev->gpu->kgd == kgd) {
device = top_dev->gpu;
break;
}
@@ -1052,7 +1071,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
if (!gpu)
return 0;
- gpu->kfd2kgd->get_local_mem_info(gpu->kgd, &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(gpu->kgd, &local_mem_info);
local_mem_size = local_mem_info.local_mem_size_private +
local_mem_info.local_mem_size_public;
@@ -1118,8 +1137,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
* for APUs - If CRAT from ACPI reports more than one bank, then
* all the banks will report the same mem_clk_max information
*/
- dev->gpu->kfd2kgd->get_local_mem_info(dev->gpu->kgd,
- &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(dev->gpu->kgd, &local_mem_info);
list_for_each_entry(mem, &dev->mem_props, list)
mem->mem_clk_max = local_mem_info.mem_clk_max;
@@ -1240,7 +1258,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* needed for the topology
*/
- dev->gpu->kfd2kgd->get_cu_info(dev->gpu->kgd, &cu_info);
+ amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info);
dev->node_props.simd_arrays_per_engine =
cu_info.num_shader_arrays_per_engine;
@@ -1249,7 +1267,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number,
gpu->pdev->devfn);
dev->node_props.max_engine_clk_fcompute =
- dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(dev->gpu->kgd);
+ amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
dev->node_props.max_engine_clk_ccompute =
cpufreq_quick_get_max(0) / 1000;
dev->node_props.drm_render_minor =
@@ -1272,12 +1290,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
pr_debug("Adding doorbell packet type capability\n");
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index c97dc9613325..cfde1568c79a 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -32,11 +32,12 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
-DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet
+DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b0df6dc9a775..c13856a46d8e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -23,6 +23,9 @@
*
*/
+/* The caprices of the preprocessor require that this be declared right here */
+#define CREATE_TRACE_POINTS
+
#include "dm_services_types.h"
#include "dc.h"
#include "dc/inc/core_types.h"
@@ -38,7 +41,6 @@
#include "amd_shared.h"
#include "amdgpu_dm_irq.h"
#include "dm_helpers.h"
-#include "dm_services_types.h"
#include "amdgpu_dm_mst_types.h"
#if defined(CONFIG_DEBUG_FS)
#include "amdgpu_dm_debugfs.h"
@@ -55,6 +57,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_fb_helper.h>
@@ -72,10 +75,22 @@
#endif
#include "modules/inc/mod_freesync.h"
+#include "modules/power/power_helpers.h"
+#include "modules/inc/mod_info_packet.h"
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+/**
+ * DOC: overview
+ *
+ * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
+ * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
+ * requests into DC requests, and DC responses into DRM responses.
+ *
+ * The root control structure is &struct amdgpu_display_manager.
+ */
+
/* basic init/fini API */
static int amdgpu_dm_init(struct amdgpu_device *adev);
static void amdgpu_dm_fini(struct amdgpu_device *adev);
@@ -95,7 +110,7 @@ static void
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
- struct amdgpu_plane *aplane,
+ struct drm_plane *plane,
unsigned long possible_crtcs);
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
@@ -119,6 +134,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
+static void handle_cursor_update(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state);
@@ -379,11 +396,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
}
-/*
- * Init display KMS
- *
- * Returns 0 on success
- */
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -393,6 +405,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
+ mutex_init(&adev->dm.dc_lock);
+
if(amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
goto error;
@@ -429,6 +443,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->asic_type < CHIP_RAVEN)
init_data.flags.gpu_vm_support = true;
+ if (amdgpu_dc_feature_mask & DC_FBC_MASK)
+ init_data.flags.fbc_support = true;
+
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -504,6 +521,9 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
dc_destroy(&adev->dm.dc);
+
+ mutex_destroy(&adev->dm.dc_lock);
+
return;
}
@@ -635,6 +655,26 @@ static int dm_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct dmcu_iram_parameters params;
+ unsigned int linear_lut[16];
+ int i;
+ struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+ bool ret;
+
+ for (i = 0; i < 16; i++)
+ linear_lut[i] = 0xFFFF * i / 15;
+
+ params.set = 0;
+ params.backlight_ramping_start = 0xCCCC;
+ params.backlight_ramping_reduction = 0xCCCCCCCC;
+ params.backlight_lut_array_size = 16;
+ params.backlight_lut_array = linear_lut;
+
+ ret = dmcu_load_iram(dmcu, params);
+
+ if (!ret)
+ return -EINVAL;
+
return detect_mst_link_for_all_connectors(adev->ddev);
}
@@ -660,6 +700,26 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
+/**
+ * dm_hw_init() - Initialize DC device
+ * @handle: The base driver device containing the amdpgu_dm device.
+ *
+ * Initialize the &struct amdgpu_display_manager device. This involves calling
+ * the initializers of each DM component, then populating the struct with them.
+ *
+ * Although the function implies hardware initialization, both hardware and
+ * software are initialized here. Splitting them out to their relevant init
+ * hooks is a future TODO item.
+ *
+ * Some notable things that are initialized here:
+ *
+ * - Display Core, both software and hardware
+ * - DC modules that we need (freesync and color management)
+ * - DRM software states
+ * - Interrupt sources and handlers
+ * - Vblank support
+ * - Debug FS entries, if enabled
+ */
static int dm_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -670,6 +730,14 @@ static int dm_hw_init(void *handle)
return 0;
}
+/**
+ * dm_hw_fini() - Teardown DC device
+ * @handle: The base driver device containing the amdpgu_dm device.
+ *
+ * Teardown components within &struct amdgpu_display_manager that require
+ * cleanup. This involves cleaning up the DRM device, DC, and any modules that
+ * were loaded. Also flush IRQ workqueues and disable them.
+ */
static int dm_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -895,6 +963,16 @@ static int dm_resume(void *handle)
return ret;
}
+/**
+ * DOC: DM Lifecycle
+ *
+ * DM (and consequently DC) is registered in the amdgpu base driver as a IP
+ * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
+ * the base driver's device list to be initialized and torn down accordingly.
+ *
+ * The functions to do so are provided as hooks in &struct amd_ip_funcs.
+ */
+
static const struct amd_ip_funcs amdgpu_dm_funcs = {
.name = "dm",
.early_init = dm_early_init,
@@ -923,53 +1001,17 @@ const struct amdgpu_ip_block_version dm_ip_block =
};
-static struct drm_atomic_state *
-dm_atomic_state_alloc(struct drm_device *dev)
-{
- struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
-
- if (!state)
- return NULL;
-
- if (drm_atomic_state_init(dev, &state->base) < 0)
- goto fail;
-
- return &state->base;
-
-fail:
- kfree(state);
- return NULL;
-}
-
-static void
-dm_atomic_state_clear(struct drm_atomic_state *state)
-{
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
-
- if (dm_state->context) {
- dc_release_state(dm_state->context);
- dm_state->context = NULL;
- }
-
- drm_atomic_state_default_clear(state);
-}
-
-static void
-dm_atomic_state_alloc_free(struct drm_atomic_state *state)
-{
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
- drm_atomic_state_default_release(state);
- kfree(dm_state);
-}
+/**
+ * DOC: atomic
+ *
+ * *WIP*
+ */
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = amdgpu_dm_atomic_commit,
- .atomic_state_alloc = dm_atomic_state_alloc,
- .atomic_state_clear = dm_atomic_state_clear,
- .atomic_state_free = dm_atomic_state_alloc_free
};
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
@@ -1491,8 +1533,117 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
}
#endif
+/*
+ * Acquires the lock for the atomic state object and returns
+ * the new atomic state.
+ *
+ * This should only be called during atomic check.
+ */
+static int dm_atomic_get_state(struct drm_atomic_state *state,
+ struct dm_atomic_state **dm_state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_state *priv_state;
+ int ret;
+
+ if (*dm_state)
+ return 0;
+
+ ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
+ *dm_state = to_dm_atomic_state(priv_state);
+
+ return 0;
+}
+
+struct dm_atomic_state *
+dm_atomic_get_new_state(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_obj *obj;
+ struct drm_private_state *new_obj_state;
+ int i;
+
+ for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
+ if (obj->funcs == dm->atomic_obj.funcs)
+ return to_dm_atomic_state(new_obj_state);
+ }
+
+ return NULL;
+}
+
+struct dm_atomic_state *
+dm_atomic_get_old_state(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_obj *obj;
+ struct drm_private_state *old_obj_state;
+ int i;
+
+ for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
+ if (obj->funcs == dm->atomic_obj.funcs)
+ return to_dm_atomic_state(old_obj_state);
+ }
+
+ return NULL;
+}
+
+static struct drm_private_state *
+dm_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct dm_atomic_state *old_state, *new_state;
+
+ new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
+
+ new_state->context = dc_create_state();
+ if (!new_state->context) {
+ kfree(new_state);
+ return NULL;
+ }
+
+ old_state = to_dm_atomic_state(obj->state);
+ if (old_state && old_state->context)
+ dc_resource_state_copy_construct(old_state->context,
+ new_state->context);
+
+ return &new_state->base;
+}
+
+static void dm_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+ if (dm_state && dm_state->context)
+ dc_release_state(dm_state->context);
+
+ kfree(dm_state);
+}
+
+static struct drm_private_state_funcs dm_atomic_state_funcs = {
+ .atomic_duplicate_state = dm_atomic_duplicate_state,
+ .atomic_destroy_state = dm_atomic_destroy_state,
+};
+
static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
{
+ struct dm_atomic_state *state;
int r;
adev->mode_info.mode_config_initialized = true;
@@ -1510,6 +1661,24 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ drm_modeset_lock_init(&adev->dm.atomic_obj_lock);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->context = dc_create_state();
+ if (!state->context) {
+ kfree(state);
+ return -ENOMEM;
+ }
+
+ dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
+
+ drm_atomic_private_obj_init(&adev->dm.atomic_obj,
+ &state->base,
+ &dm_atomic_state_funcs);
+
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
@@ -1517,22 +1686,63 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
return 0;
}
+#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
+#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
+{
+#if defined(CONFIG_ACPI)
+ struct amdgpu_dm_backlight_caps caps;
+
+ if (dm->backlight_caps.caps_valid)
+ return;
+
+ amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
+ if (caps.caps_valid) {
+ dm->backlight_caps.min_input_signal = caps.min_input_signal;
+ dm->backlight_caps.max_input_signal = caps.max_input_signal;
+ dm->backlight_caps.caps_valid = true;
+ } else {
+ dm->backlight_caps.min_input_signal =
+ AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps.max_input_signal =
+ AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ }
+#else
+ dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+#endif
+}
+
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
+ struct amdgpu_dm_backlight_caps caps;
+ uint32_t brightness = bd->props.brightness;
+ amdgpu_dm_update_backlight_caps(dm);
+ caps = dm->backlight_caps;
/*
- * PWM interperts 0 as 100% rather than 0% because of HW
- * limitation for level 0.So limiting minimum brightness level
- * to 1.
+ * The brightness input is in the range 0-255
+ * It needs to be rescaled to be between the
+ * requested min and max input signal
+ *
+ * It also needs to be scaled up by 0x101 to
+ * match the DC interface which has a range of
+ * 0 to 0xffff
*/
- if (bd->props.brightness < 1)
- return 1;
+ brightness =
+ brightness
+ * 0x101
+ * (caps.max_input_signal - caps.min_input_signal)
+ / AMDGPU_MAX_BL_LEVEL
+ + caps.min_input_signal * 0x101;
+
if (dc_link_set_backlight_level(dm->backlight_link,
- bd->props.brightness, 0, 0))
+ brightness, 0, 0))
return 0;
else
return 1;
@@ -1559,6 +1769,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
char bl_name[16];
struct backlight_properties props = { 0 };
+ amdgpu_dm_update_backlight_caps(dm);
+
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
@@ -1584,18 +1796,18 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
struct amdgpu_mode_info *mode_info,
int plane_id)
{
- struct amdgpu_plane *plane;
+ struct drm_plane *plane;
unsigned long possible_crtcs;
int ret = 0;
- plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+ plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
mode_info->planes[plane_id] = plane;
if (!plane) {
DRM_ERROR("KMS: Failed to allocate plane\n");
return -ENOMEM;
}
- plane->base.type = mode_info->plane_type[plane_id];
+ plane->type = mode_info->plane_type[plane_id];
/*
* HACK: IGT tests expect that each plane can only have
@@ -1686,7 +1898,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
- if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
+ if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
DRM_ERROR("KMS: Failed to initialize crtc\n");
goto fail;
}
@@ -1790,6 +2002,7 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
drm_mode_config_cleanup(dm->ddev);
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
return;
}
@@ -1809,73 +2022,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev)
/* TODO: implement later */
}
-static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
- struct drm_file *filp)
-{
- struct drm_atomic_state *state;
- struct drm_modeset_acquire_ctx ctx;
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
- int ret = 0;
- uint8_t i;
- bool enable = false;
-
- drm_modeset_acquire_init(&ctx, 0);
-
- state = drm_atomic_state_alloc(dev);
- if (!state) {
- ret = -ENOMEM;
- goto out;
- }
- state->acquire_ctx = &ctx;
-
-retry:
- drm_for_each_crtc(crtc, dev) {
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- goto fail;
-
- /* TODO rework amdgpu_dm_commit_planes so we don't need this */
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- goto fail;
- }
-
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
- struct drm_crtc_state *new_crtc_state;
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct dm_crtc_state *dm_new_crtc_state;
-
- if (!acrtc) {
- ASSERT(0);
- continue;
- }
-
- new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-
- dm_new_crtc_state->freesync_enabled = enable;
- }
-
- ret = drm_atomic_commit(state);
-
-fail:
- if (ret == -EDEADLK) {
- drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
- goto retry;
- }
-
- drm_atomic_state_put(state);
-
-out:
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
- return ret;
-}
-
static const struct amdgpu_display_funcs dm_display_funcs = {
.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
@@ -1888,8 +2034,6 @@ static const struct amdgpu_display_funcs dm_display_funcs = {
dm_crtc_get_scanoutpos,/* called unconditionally */
.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
.add_connector = NULL, /* VBIOS parsing. DAL does it. */
- .notify_freesync = amdgpu_notify_freesync,
-
};
#if defined(CONFIG_DEBUG_KERNEL_DC)
@@ -2362,8 +2506,15 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector)
{
+ struct dm_connector_state *dm_conn_state =
+ to_dm_connector_state(connector->state);
uint32_t bpc = connector->display_info.bpc;
+ /* TODO: Remove this when there's support for max_bpc in drm */
+ if (dm_conn_state && bpc > dm_conn_state->max_bpc)
+ /* Round down to nearest even number. */
+ bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
+
switch (bpc) {
case 0:
/*
@@ -2483,7 +2634,8 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
static void
fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
const struct drm_display_mode *mode_in,
- const struct drm_connector *connector)
+ const struct drm_connector *connector,
+ const struct dc_stream_state *old_stream)
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
@@ -2509,7 +2661,18 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
connector);
timing_out->scan_type = SCANNING_TYPE_NODATA;
timing_out->hdmi_vic = 0;
- timing_out->vic = drm_match_cea_mode(mode_in);
+
+ if(old_stream) {
+ timing_out->vic = old_stream->timing.vic;
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
+ } else {
+ timing_out->vic = drm_match_cea_mode(mode_in);
+ if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+ if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+ }
timing_out->h_addressable = mode_in->crtc_hdisplay;
timing_out->h_total = mode_in->crtc_htotal;
@@ -2525,10 +2688,6 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
timing_out->pix_clk_khz = mode_in->crtc_clock;
timing_out->aspect_ratio = get_aspect_ratio(mode_in);
- if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
- timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
- if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
- timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
stream->output_color_space = get_output_color_space(timing_out);
@@ -2691,13 +2850,18 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct drm_display_mode *drm_mode,
- const struct dm_connector_state *dm_state)
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream)
{
struct drm_display_mode *preferred_mode = NULL;
struct drm_connector *drm_connector;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode;
bool native_mode_found = false;
+ bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+ int mode_refresh;
+ int preferred_refresh = 0;
+
struct dc_sink *sink = NULL;
if (aconnector == NULL) {
DRM_ERROR("aconnector is NULL!\n");
@@ -2707,18 +2871,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
drm_connector = &aconnector->base;
if (!aconnector->dc_sink) {
- /*
- * Create dc_sink when necessary to MST
- * Don't apply fake_sink to MST
- */
- if (aconnector->mst_port) {
- dm_dp_mst_dc_sink_create(drm_connector);
- return stream;
+ if (!aconnector->mst_port) {
+ sink = create_fake_sink(aconnector);
+ if (!sink)
+ return stream;
}
-
- sink = create_fake_sink(aconnector);
- if (!sink)
- return stream;
} else {
sink = aconnector->dc_sink;
}
@@ -2743,6 +2900,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct drm_display_mode,
head);
+ mode_refresh = drm_mode_vrefresh(&mode);
+
if (preferred_mode == NULL) {
/*
* This may not be an error, the use case is when we have no
@@ -2755,13 +2914,23 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
decide_crtc_timing_for_drm_display_mode(
&mode, preferred_mode,
dm_state ? (dm_state->scaling != RMX_OFF) : false);
+ preferred_refresh = drm_mode_vrefresh(preferred_mode);
}
if (!dm_state)
drm_mode_set_crtcinfo(&mode, 0);
- fill_stream_properties_from_drm_display_mode(stream,
- &mode, &aconnector->base);
+ /*
+ * If scaling is enabled and refresh rate didn't change
+ * we copy the vic and polarities of the old timings
+ */
+ if (!scale || mode_refresh != preferred_refresh)
+ fill_stream_properties_from_drm_display_mode(stream,
+ &mode, &aconnector->base, NULL);
+ else
+ fill_stream_properties_from_drm_display_mode(stream,
+ &mode, &aconnector->base, old_stream);
+
update_stream_scaling_settings(&mode, dm_state, stream);
fill_audio_info(
@@ -2773,6 +2942,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (dm_state && dm_state->freesync_capable)
stream->ignore_msa_timing_param = true;
+
finish:
if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
dc_sink_release(sink);
@@ -2841,7 +3011,10 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->adjust = cur->adjust;
state->vrr_infopacket = cur->vrr_infopacket;
- state->freesync_enabled = cur->freesync_enabled;
+ state->abm_level = cur->abm_level;
+ state->vrr_supported = cur->vrr_supported;
+ state->freesync_config = cur->freesync_config;
+ state->crc_enabled = cur->crc_enabled;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
@@ -2954,6 +3127,12 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
dm_new_state->underscan_enable = val;
ret = 0;
+ } else if (property == adev->mode_info.max_bpc_property) {
+ dm_new_state->max_bpc = val;
+ ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ dm_new_state->abm_level = val;
+ ret = 0;
}
return ret;
@@ -2996,7 +3175,14 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
*val = dm_state->underscan_enable;
ret = 0;
+ } else if (property == adev->mode_info.max_bpc_property) {
+ *val = dm_state->max_bpc;
+ ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ *val = dm_state->abm_level;
+ ret = 0;
}
+
return ret;
}
@@ -3040,6 +3226,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->underscan_enable = false;
state->underscan_hborder = 0;
state->underscan_vborder = 0;
+ state->max_bpc = 8;
__drm_atomic_helper_connector_reset(connector, &state->base);
}
@@ -3060,7 +3247,12 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
new_state->freesync_capable = state->freesync_capable;
- new_state->freesync_enable = state->freesync_enable;
+ new_state->abm_level = state->abm_level;
+ new_state->scaling = state->scaling;
+ new_state->underscan_enable = state->underscan_enable;
+ new_state->underscan_hborder = state->underscan_hborder;
+ new_state->underscan_vborder = state->underscan_vborder;
+ new_state->max_bpc = state->max_bpc;
return &new_state->base;
}
@@ -3162,7 +3354,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
goto fail;
}
- stream = create_stream_for_sink(aconnector, mode, NULL);
+ stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
if (stream == NULL) {
DRM_ERROR("Failed to create stream for sink!\n");
goto fail;
@@ -3196,7 +3388,6 @@ amdgpu_dm_connector_helper_funcs = {
*/
.get_modes = get_modes,
.mode_valid = amdgpu_dm_connector_mode_valid,
- .best_encoder = drm_atomic_helper_best_encoder
};
static void dm_crtc_helper_disable(struct drm_crtc *crtc)
@@ -3308,7 +3499,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane,
static const struct drm_plane_funcs dm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
+ .destroy = drm_primary_helper_destroy,
.reset = dm_drm_plane_reset,
.atomic_duplicate_state = dm_drm_plane_duplicate_state,
.atomic_destroy_state = dm_drm_plane_destroy_state,
@@ -3434,10 +3625,43 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+static int dm_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state)
+{
+ /* Only support async updates on cursor planes. */
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void dm_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(new_state->state, plane);
+
+ if (plane->state->fb != new_state->fb)
+ drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_w = new_state->src_w;
+ plane->state->src_h = new_state->src_h;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->crtc_h = new_state->crtc_h;
+
+ handle_cursor_update(plane, old_state);
+}
+
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
.prepare_fb = dm_plane_helper_prepare_fb,
.cleanup_fb = dm_plane_helper_cleanup_fb,
.atomic_check = dm_plane_atomic_check,
+ .atomic_async_check = dm_plane_atomic_async_check,
+ .atomic_async_update = dm_plane_atomic_async_update
};
/*
@@ -3469,49 +3693,49 @@ static const u32 cursor_formats[] = {
};
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
- struct amdgpu_plane *aplane,
+ struct drm_plane *plane,
unsigned long possible_crtcs)
{
int res = -EPERM;
- switch (aplane->base.type) {
+ switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
res = drm_universal_plane_init(
dm->adev->ddev,
- &aplane->base,
+ plane,
possible_crtcs,
&dm_plane_funcs,
rgb_formats,
ARRAY_SIZE(rgb_formats),
- NULL, aplane->base.type, NULL);
+ NULL, plane->type, NULL);
break;
case DRM_PLANE_TYPE_OVERLAY:
res = drm_universal_plane_init(
dm->adev->ddev,
- &aplane->base,
+ plane,
possible_crtcs,
&dm_plane_funcs,
yuv_formats,
ARRAY_SIZE(yuv_formats),
- NULL, aplane->base.type, NULL);
+ NULL, plane->type, NULL);
break;
case DRM_PLANE_TYPE_CURSOR:
res = drm_universal_plane_init(
dm->adev->ddev,
- &aplane->base,
+ plane,
possible_crtcs,
&dm_plane_funcs,
cursor_formats,
ARRAY_SIZE(cursor_formats),
- NULL, aplane->base.type, NULL);
+ NULL, plane->type, NULL);
break;
}
- drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
+ drm_plane_helper_add(plane, &dm_plane_helper_funcs);
/* Create (reset) the plane state */
- if (aplane->base.funcs->reset)
- aplane->base.funcs->reset(&aplane->base);
+ if (plane->funcs->reset)
+ plane->funcs->reset(plane);
return res;
@@ -3522,7 +3746,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
uint32_t crtc_index)
{
struct amdgpu_crtc *acrtc = NULL;
- struct amdgpu_plane *cursor_plane;
+ struct drm_plane *cursor_plane;
int res = -ENOMEM;
@@ -3530,7 +3754,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
if (!cursor_plane)
goto fail;
- cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
+ cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
@@ -3541,7 +3765,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
dm->ddev,
&acrtc->base,
plane,
- &cursor_plane->base,
+ cursor_plane,
&amdgpu_dm_crtc_funcs, NULL);
if (res)
@@ -3599,14 +3823,17 @@ static int to_drm_connector_type(enum signal_type st)
}
}
+static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
+{
+ return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+}
+
static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
{
- const struct drm_connector_helper_funcs *helper =
- connector->helper_private;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
- encoder = helper->best_encoder(connector);
+ encoder = amdgpu_dm_connector_to_encoder(connector);
if (encoder == NULL)
return;
@@ -3733,14 +3960,12 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
{
- const struct drm_connector_helper_funcs *helper =
- connector->helper_private;
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
struct drm_encoder *encoder;
struct edid *edid = amdgpu_dm_connector->edid;
- encoder = helper->best_encoder(connector);
+ encoder = amdgpu_dm_connector_to_encoder(connector);
if (!edid || !drm_edid_is_valid(edid)) {
amdgpu_dm_connector->num_modes =
@@ -3779,12 +4004,12 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
case DRM_MODE_CONNECTOR_HDMIA:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
aconnector->base.ycbcr_420_allowed =
- link->link_enc->features.ycbcr420_supported ? true : false;
+ link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
aconnector->base.ycbcr_420_allowed =
- link->link_enc->features.ycbcr420_supported ? true : false;
+ link->link_enc->features.dp_ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DVID:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
@@ -3806,7 +4031,21 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_object_attach_property(&aconnector->base.base,
adev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.max_bpc_property,
+ 0);
+ if (connector_type == DRM_MODE_CONNECTOR_eDP &&
+ dc_is_dmcu_initialized(adev->dm.dc)) {
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.abm_level_property, 0);
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ drm_connector_attach_vrr_capable_property(
+ &aconnector->base);
+ }
}
static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -4111,6 +4350,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state)
{
+ struct amdgpu_device *adev = plane->dev->dev_private;
struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
@@ -4135,9 +4375,12 @@ static void handle_cursor_update(struct drm_plane *plane,
if (!position.enable) {
/* turn off cursor */
- if (crtc_state && crtc_state->stream)
+ if (crtc_state && crtc_state->stream) {
+ mutex_lock(&adev->dm.dc_lock);
dc_stream_set_cursor_position(crtc_state->stream,
&position);
+ mutex_unlock(&adev->dm.dc_lock);
+ }
return;
}
@@ -4155,6 +4398,7 @@ static void handle_cursor_update(struct drm_plane *plane,
attributes.pitch = attributes.width;
if (crtc_state->stream) {
+ mutex_lock(&adev->dm.dc_lock);
if (!dc_stream_set_cursor_attributes(crtc_state->stream,
&attributes))
DRM_ERROR("DC failed to set cursor attributes\n");
@@ -4162,6 +4406,7 @@ static void handle_cursor_update(struct drm_plane *plane,
if (!dc_stream_set_cursor_position(crtc_state->stream,
&position))
DRM_ERROR("DC failed to set cursor position\n");
+ mutex_unlock(&adev->dm.dc_lock);
}
}
@@ -4183,6 +4428,91 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
acrtc->crtc_id);
}
+struct dc_stream_status *dc_state_get_stream_status(
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (stream == state->streams[i])
+ return &state->stream_status[i];
+ }
+
+ return NULL;
+}
+
+static void update_freesync_state_on_stream(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state,
+ struct dc_stream_state *new_stream)
+{
+ struct mod_vrr_params vrr = {0};
+ struct dc_info_packet vrr_infopacket = {0};
+ struct mod_freesync_config config = new_crtc_state->freesync_config;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ if (new_crtc_state->vrr_supported &&
+ config.min_refresh_in_uhz &&
+ config.max_refresh_in_uhz) {
+ config.state = new_crtc_state->base.vrr_enabled ?
+ VRR_STATE_ACTIVE_VARIABLE :
+ VRR_STATE_INACTIVE;
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
+ }
+
+ mod_freesync_build_vrr_params(dm->freesync_module,
+ new_stream,
+ &config, &vrr);
+
+ mod_freesync_build_vrr_infopacket(
+ dm->freesync_module,
+ new_stream,
+ &vrr,
+ PACKET_TYPE_VRR,
+ TRANSFER_FUNC_UNKNOWN,
+ &vrr_infopacket);
+
+ new_crtc_state->freesync_timing_changed =
+ (memcmp(&new_crtc_state->adjust,
+ &vrr.adjust,
+ sizeof(vrr.adjust)) != 0);
+
+ new_crtc_state->freesync_vrr_info_changed =
+ (memcmp(&new_crtc_state->vrr_infopacket,
+ &vrr_infopacket,
+ sizeof(vrr_infopacket)) != 0);
+
+ new_crtc_state->adjust = vrr.adjust;
+ new_crtc_state->vrr_infopacket = vrr_infopacket;
+
+ new_stream->adjust = new_crtc_state->adjust;
+ new_stream->vrr_infopacket = vrr_infopacket;
+
+ if (new_crtc_state->freesync_vrr_info_changed)
+ DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
+ new_crtc_state->base.crtc->base.id,
+ (int)new_crtc_state->base.vrr_enabled,
+ (int)vrr.state);
+
+ if (new_crtc_state->freesync_timing_changed)
+ DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
+ new_crtc_state->base.crtc->base.id,
+ vrr.adjust.v_total_min,
+ vrr.adjust.v_total_max);
+}
+
/*
* Executes flip
*
@@ -4204,6 +4534,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
struct dc_flip_addrs addr = { {0} };
/* TODO eliminate or rename surface_update */
struct dc_surface_update surface_updates[1] = { {0} };
+ struct dc_stream_update stream_update = {0};
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
struct dc_stream_status *stream_status;
@@ -4276,13 +4607,30 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
}
surface_updates->flip_addr = &addr;
+ if (acrtc_state->stream) {
+ update_freesync_state_on_stream(
+ &adev->dm,
+ acrtc_state,
+ acrtc_state->stream);
+
+ if (acrtc_state->freesync_timing_changed)
+ stream_update.adjust =
+ &acrtc_state->stream->adjust;
+
+ if (acrtc_state->freesync_vrr_info_changed)
+ stream_update.vrr_infopacket =
+ &acrtc_state->stream->vrr_infopacket;
+ }
+
+ mutex_lock(&adev->dm.dc_lock);
dc_commit_updates_for_stream(adev->dm.dc,
surface_updates,
1,
acrtc_state->stream,
- NULL,
+ &stream_update,
&surface_updates->surface,
state);
+ mutex_unlock(&adev->dm.dc_lock);
DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
__func__,
@@ -4297,6 +4645,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
* with a dc_plane_state and follow the atomic model a bit more closely here.
*/
static bool commit_planes_to_stream(
+ struct amdgpu_display_manager *dm,
struct dc *dc,
struct dc_plane_state **plane_states,
uint8_t new_plane_count,
@@ -4313,6 +4662,7 @@ static bool commit_planes_to_stream(
struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
struct dc_stream_update *stream_update =
kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
+ unsigned int abm_level;
if (!stream_update) {
BREAK_TO_DEBUGGER();
@@ -4340,9 +4690,9 @@ static bool commit_planes_to_stream(
stream_update->dst = dc_stream->dst;
stream_update->out_transfer_func = dc_stream->out_transfer_func;
- if (dm_new_crtc_state->freesync_enabled != dm_old_crtc_state->freesync_enabled) {
- stream_update->vrr_infopacket = &dc_stream->vrr_infopacket;
- stream_update->adjust = &dc_stream->adjust;
+ if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
+ abm_level = dm_new_crtc_state->abm_level;
+ stream_update->abm_level = &abm_level;
}
for (i = 0; i < new_plane_count; i++) {
@@ -4372,11 +4722,13 @@ static bool commit_planes_to_stream(
updates[i].scaling_info = &scaling_info[i];
}
+ mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(
dc,
updates,
new_plane_count,
dc_stream, stream_update, plane_states, state);
+ mutex_unlock(&dm->dc_lock);
kfree(flip_addr);
kfree(plane_info);
@@ -4386,6 +4738,7 @@ static bool commit_planes_to_stream(
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
struct drm_crtc *pcrtc,
@@ -4402,7 +4755,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
struct dm_crtc_state *dm_old_crtc_state =
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
int planes_count = 0;
unsigned long flags;
@@ -4463,7 +4815,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
crtc,
fb,
(uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
- dm_state->context);
+ dc_state);
}
}
@@ -4480,15 +4832,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
- dc_stream_attach->adjust = acrtc_state->adjust;
- dc_stream_attach->vrr_infopacket = acrtc_state->vrr_infopacket;
+ dc_stream_attach->abm_level = acrtc_state->abm_level;
- if (false == commit_planes_to_stream(dm->dc,
+ if (false == commit_planes_to_stream(dm,
+ dm->dc,
plane_states_constructed,
planes_count,
acrtc_state,
dm_old_crtc_state,
- dm_state->context))
+ dc_state))
dm_error("%s: Failed to attach plane!\n", __func__);
} else {
/*TODO BUG Here should go disable planes on CRTC. */
@@ -4542,12 +4894,21 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
/*TODO Handle EINTR, reenable IRQ*/
}
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
uint32_t i, j;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -4560,7 +4921,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_update_legacy_modeset_state(dev, state);
- dm_state = to_dm_atomic_state(state);
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ } else {
+ /* No state changes, retain current state. */
+ dc_state_temp = dc_create_state();
+ ASSERT(dc_state_temp);
+ dc_state = dc_state_temp;
+ dc_resource_state_copy_construct_current(dm->dc, dc_state);
+ }
/* update changed items */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -4633,9 +5003,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
} /* for_each_crtc_in_state() */
- if (dm_state->context) {
- dm_enable_per_frame_crtc_master_sync(dm_state->context);
- WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
+ if (dc_state) {
+ dm_enable_per_frame_crtc_master_sync(dc_state);
+ mutex_lock(&dm->dc_lock);
+ WARN_ON(!dc_commit_state(dm->dc, dc_state));
+ mutex_unlock(&dm->dc_lock);
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -4648,13 +5020,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_stream_get_status(dm_new_crtc_state->stream);
if (!status)
+ status = dc_state_get_stream_status(dc_state,
+ dm_new_crtc_state->stream);
+
+ if (!status)
DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
else
acrtc->otg_inst = status->primary_otg_inst;
}
}
- /* Handle scaling and underscan changes*/
+ /* Handle scaling, underscan, and abm changes*/
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
@@ -4670,11 +5046,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
- /* Skip anything that is not scaling or underscan changes */
- if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
- continue;
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ /* Skip anything that is not scaling or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
+ (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
+ continue;
update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
@@ -4686,17 +5065,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
WARN_ON(!status);
WARN_ON(!status->plane_count);
- dm_new_crtc_state->stream->adjust = dm_new_crtc_state->adjust;
- dm_new_crtc_state->stream->vrr_infopacket = dm_new_crtc_state->vrr_infopacket;
+ dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
/*TODO How it works with MPO ?*/
if (!commit_planes_to_stream(
+ dm,
dm->dc,
status->plane_states,
status->plane_count,
dm_new_crtc_state,
to_dm_crtc_state(old_crtc_state),
- dm_state->context))
+ dc_state))
dm_error("%s: Failed to update stream scaling!\n", __func__);
}
@@ -4729,7 +5108,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->stream)
- amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
+ amdgpu_dm_commit_planes(state, dc_state, dev,
+ dm, crtc, &wait_for_vblank);
}
@@ -4769,6 +5149,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for (i = 0; i < crtc_disable_count; i++)
pm_runtime_put_autosuspend(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
+
+ if (dc_state_temp)
+ dc_release_state(dc_state_temp);
}
@@ -4912,20 +5295,18 @@ static int do_aquire_global_lock(struct drm_device *dev,
return ret < 0 ? ret : 0;
}
-void set_freesync_on_stream(struct amdgpu_display_manager *dm,
- struct dm_crtc_state *new_crtc_state,
- struct dm_connector_state *new_con_state,
- struct dc_stream_state *new_stream)
+static void get_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state,
+ struct dm_connector_state *new_con_state)
{
struct mod_freesync_config config = {0};
- struct mod_vrr_params vrr = {0};
- struct dc_info_packet vrr_infopacket = {0};
struct amdgpu_dm_connector *aconnector =
to_amdgpu_dm_connector(new_con_state->base.connector);
- if (new_con_state->freesync_capable &&
- new_con_state->freesync_enable) {
- config.state = new_crtc_state->freesync_enabled ?
+ new_crtc_state->vrr_supported = new_con_state->freesync_capable;
+
+ if (new_con_state->freesync_capable) {
+ config.state = new_crtc_state->base.vrr_enabled ?
VRR_STATE_ACTIVE_VARIABLE :
VRR_STATE_INACTIVE;
config.min_refresh_in_uhz =
@@ -4935,19 +5316,18 @@ void set_freesync_on_stream(struct amdgpu_display_manager *dm,
config.vsif_supported = true;
}
- mod_freesync_build_vrr_params(dm->freesync_module,
- new_stream,
- &config, &vrr);
+ new_crtc_state->freesync_config = config;
+}
- mod_freesync_build_vrr_infopacket(dm->freesync_module,
- new_stream,
- &vrr,
- packet_type_fs1,
- NULL,
- &vrr_infopacket);
+static void reset_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state)
+{
+ new_crtc_state->vrr_supported = false;
- new_crtc_state->adjust = vrr.adjust;
- new_crtc_state->vrr_infopacket = vrr_infopacket;
+ memset(&new_crtc_state->adjust, 0,
+ sizeof(new_crtc_state->adjust));
+ memset(&new_crtc_state->vrr_infopacket, 0,
+ sizeof(new_crtc_state->vrr_infopacket));
}
static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
@@ -4955,11 +5335,11 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
bool enable,
bool *lock_and_validation_needed)
{
+ struct dm_atomic_state *dm_state = NULL;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
struct dc_stream_state *new_stream;
int ret = 0;
@@ -5007,7 +5387,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
new_stream = create_stream_for_sink(aconnector,
&new_crtc_state->mode,
- dm_new_conn_state);
+ dm_new_conn_state,
+ dm_old_crtc_state->stream);
/*
* we can have no stream on ACTION_SET if a display
@@ -5022,8 +5403,7 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
break;
}
- set_freesync_on_stream(dm, dm_new_crtc_state,
- dm_new_conn_state, new_stream);
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
@@ -5033,9 +5413,6 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
}
}
- if (dm_old_crtc_state->freesync_enabled != dm_new_crtc_state->freesync_enabled)
- new_crtc_state->mode_changed = true;
-
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto next_crtc;
@@ -5057,6 +5434,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
if (!dm_old_crtc_state->stream)
goto next_crtc;
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
crtc->base.id);
@@ -5072,6 +5453,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
dc_stream_release(dm_old_crtc_state->stream);
dm_new_crtc_state->stream = NULL;
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+
*lock_and_validation_needed = true;
} else {/* Add stream for any updated/enabled CRTC */
@@ -5091,6 +5474,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
WARN_ON(dm_new_crtc_state->stream);
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
dm_new_crtc_state->stream = new_stream;
dc_stream_retain(new_stream);
@@ -5149,7 +5536,9 @@ next_crtc:
amdgpu_dm_set_ctm(dm_new_crtc_state);
}
-
+ /* Update Freesync settings. */
+ get_freesync_config_for_crtc(dm_new_crtc_state,
+ dm_new_conn_state);
}
return ret;
@@ -5165,12 +5554,13 @@ static int dm_update_planes_state(struct dc *dc,
bool enable,
bool *lock_and_validation_needed)
{
+
+ struct dm_atomic_state *dm_state = NULL;
struct drm_crtc *new_plane_crtc, *old_plane_crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
int i ;
/* TODO return page_flip_needed() function */
@@ -5208,6 +5598,10 @@ static int dm_update_planes_state(struct dc *dc,
DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
plane->base.id, old_plane_crtc->base.id);
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ return ret;
+
if (!dc_remove_plane_from_context(
dc,
dm_old_crtc_state->stream,
@@ -5262,6 +5656,12 @@ static int dm_update_planes_state(struct dc *dc,
return ret;
}
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
+ return ret;
+ }
+
/*
* Any atomic check errors that occur after this will
* not need a release. The plane state will be attached
@@ -5293,11 +5693,14 @@ static int dm_update_planes_state(struct dc *dc,
return ret;
}
-enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, struct drm_atomic_state *state)
-{
-
- int i, j, num_plane;
+static int
+dm_determine_update_type_for_commit(struct dc *dc,
+ struct drm_atomic_state *state,
+ enum surface_update_type *out_type)
+{
+ struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
+ int i, j, num_plane, ret = 0;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
struct drm_crtc *new_plane_crtc, *old_plane_crtc;
@@ -5313,6 +5716,12 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
struct dc_stream_update stream_update;
enum surface_update_type update_type = UPDATE_TYPE_FAST;
+ if (!updates || !surface) {
+ DRM_ERROR("Plane or surface update failed to allocate");
+ /* Set type to FULL to avoid crashing in DC*/
+ update_type = UPDATE_TYPE_FULL;
+ goto cleanup;
+ }
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -5365,35 +5774,73 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
}
if (num_plane > 0) {
- status = dc_stream_get_status(new_dm_crtc_state->stream);
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto cleanup;
+
+ old_dm_state = dm_atomic_get_old_state(state);
+ if (!old_dm_state) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ status = dc_state_get_stream_status(old_dm_state->context,
+ new_dm_crtc_state->stream);
+
update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
&stream_update, status);
if (update_type > UPDATE_TYPE_MED) {
update_type = UPDATE_TYPE_FULL;
- goto ret;
+ goto cleanup;
}
}
} else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
update_type = UPDATE_TYPE_FULL;
- goto ret;
+ goto cleanup;
}
}
-ret:
+cleanup:
kfree(updates);
kfree(surface);
- return update_type;
+ *out_type = update_type;
+ return ret;
}
+/**
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ * @dev: The DRM device
+ * @state: The atomic state to commit
+ *
+ * Validate that the given atomic state is programmable by DC into hardware.
+ * This involves constructing a &struct dc_state reflecting the new hardware
+ * state we wish to commit, then querying DC to see if it is programmable. It's
+ * important not to modify the existing DC state. Otherwise, atomic_check
+ * may unexpectedly commit hardware changes.
+ *
+ * When validating the DC state, it's important that the right locks are
+ * acquired. For full updates case which removes/adds/updates streams on one
+ * CRTC while flipping on another CRTC, acquiring global lock will guarantee
+ * that any such full update commit will wait for completion of any outstanding
+ * flip using DRMs synchronization events. See
+ * dm_determine_update_type_for_commit()
+ *
+ * Note that DM adds the affected connectors for all CRTCs in state, when that
+ * might not seem necessary. This is because DC stream creation requires the
+ * DC sink, which is tied to the DRM connector state. Cleaning this up should
+ * be possible but non-trivial - a possible TODO item.
+ *
+ * Return: -Error code if validation failed.
+ */
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct amdgpu_device *adev = dev->dev_private;
+ struct dm_atomic_state *dm_state = NULL;
struct dc *dc = adev->dm.dc;
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
struct drm_connector *connector;
struct drm_connector_state *old_con_state, *new_con_state;
struct drm_crtc *crtc;
@@ -5414,12 +5861,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed &&
- (dm_old_crtc_state->freesync_enabled == dm_new_crtc_state->freesync_enabled))
+ !new_crtc_state->vrr_enabled)
continue;
if (!new_crtc_state->enable)
@@ -5434,10 +5878,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
- dm_state->context = dc_create_state();
- ASSERT(dm_state->context);
- dc_resource_state_copy_construct_current(dc, dm_state->context);
-
/* Remove exiting planes if they are modified */
ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
if (ret) {
@@ -5490,16 +5930,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
- /*
- * For full updates case when
- * removing/adding/updating streams on one CRTC while flipping
- * on another CRTC,
- * acquiring global lock will guarantee that any such full
- * update commit
- * will wait for completion of any outstanding flip using DRMs
- * synchronization events.
- */
- update_type = dm_determine_update_type_for_commit(dc, state);
+ ret = dm_determine_update_type_for_commit(dc, state, &update_type);
+ if (ret)
+ goto fail;
if (overall_update_type < update_type)
overall_update_type = update_type;
@@ -5517,6 +5950,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (overall_update_type > UPDATE_TYPE_FAST) {
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
ret = do_aquire_global_lock(dev, state);
if (ret)
@@ -5526,6 +5962,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = -EINVAL;
goto fail;
}
+ } else if (state->legacy_cursor_update) {
+ /*
+ * This is a fast cursor update coming from the plane update
+ * helper, check if it can be done asynchronously for better
+ * performance.
+ */
+ state->async_update = !drm_atomic_helper_async_check(dev, state);
}
/* Must be success */
@@ -5571,14 +6014,15 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct detailed_data_monitor_range *range;
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- struct dm_connector_state *dm_con_state;
+ struct dm_connector_state *dm_con_state = NULL;
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
+ bool freesync_capable = false;
if (!connector->state) {
DRM_ERROR("%s - Connector has no state", __func__);
- return;
+ goto update;
}
if (!edid) {
@@ -5588,9 +6032,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
amdgpu_dm_connector->max_vfreq = 0;
amdgpu_dm_connector->pixel_clock_mhz = 0;
- dm_con_state->freesync_capable = false;
- dm_con_state->freesync_enable = false;
- return;
+ goto update;
}
dm_con_state = to_dm_connector_state(connector->state);
@@ -5598,10 +6040,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
edid_check_required = false;
if (!amdgpu_dm_connector->dc_sink) {
DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
- return;
+ goto update;
}
if (!adev->dm.freesync_module)
- return;
+ goto update;
/*
* if edid non zero restrict freesync only for dp and edp
*/
@@ -5613,7 +6055,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
amdgpu_dm_connector);
}
}
- dm_con_state->freesync_capable = false;
if (edid_check_required == true && (edid->version > 1 ||
(edid->version == 1 && edid->revision > 1))) {
for (i = 0; i < 4; i++) {
@@ -5645,8 +6086,16 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (amdgpu_dm_connector->max_vfreq -
amdgpu_dm_connector->min_vfreq > 10) {
- dm_con_state->freesync_capable = true;
+ freesync_capable = true;
}
}
+
+update:
+ if (dm_con_state)
+ dm_con_state->freesync_capable = freesync_capable;
+
+ if (connector->vrr_capable_property)
+ drm_connector_set_vrr_capable_property(connector,
+ freesync_capable);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 978b34a5011c..25bb91ee80ba 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -59,60 +59,140 @@ struct common_irq_params {
enum dc_irq_source irq_src;
};
+/**
+ * struct irq_list_head - Linked-list for low context IRQ handlers.
+ *
+ * @head: The list_head within &struct handler_data
+ * @work: A work_struct containing the deferred handler work
+ */
struct irq_list_head {
struct list_head head;
/* In case this interrupt needs post-processing, 'work' will be queued*/
struct work_struct work;
};
+/**
+ * struct dm_compressor_info - Buffer info used by frame buffer compression
+ * @cpu_addr: MMIO cpu addr
+ * @bo_ptr: Pointer to the buffer object
+ * @gpu_addr: MMIO gpu addr
+ */
struct dm_comressor_info {
void *cpu_addr;
struct amdgpu_bo *bo_ptr;
uint64_t gpu_addr;
};
+/**
+ * struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI
+ * @min_input_signal: minimum possible input in range 0-255
+ * @max_input_signal: maximum possible input in range 0-255
+ * @caps_valid: true if these values are from the ACPI interface
+ */
+struct amdgpu_dm_backlight_caps {
+ int min_input_signal;
+ int max_input_signal;
+ bool caps_valid;
+};
+
+/**
+ * struct amdgpu_display_manager - Central amdgpu display manager device
+ *
+ * @dc: Display Core control structure
+ * @adev: AMDGPU base driver structure
+ * @ddev: DRM base driver structure
+ * @display_indexes_num: Max number of display streams supported
+ * @irq_handler_list_table_lock: Synchronizes access to IRQ tables
+ * @backlight_dev: Backlight control device
+ * @cached_state: Caches device atomic state for suspend/resume
+ * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
+ */
struct amdgpu_display_manager {
+
struct dc *dc;
+
+ /**
+ * @cgs_device:
+ *
+ * The Common Graphics Services device. It provides an interface for
+ * accessing registers.
+ */
struct cgs_device *cgs_device;
- struct amdgpu_device *adev; /*AMD base driver*/
- struct drm_device *ddev; /*DRM base driver*/
+ struct amdgpu_device *adev;
+ struct drm_device *ddev;
u16 display_indexes_num;
- /*
- * 'irq_source_handler_table' holds a list of handlers
- * per (DAL) IRQ source.
+ /**
+ * @atomic_obj
*
- * Each IRQ source may need to be handled at different contexts.
- * By 'context' we mean, for example:
- * - The ISR context, which is the direct interrupt handler.
- * - The 'deferred' context - this is the post-processing of the
- * interrupt, but at a lower priority.
+ * In combination with &dm_atomic_state it helps manage
+ * global atomic state that doesn't map cleanly into existing
+ * drm resources, like &dc_context.
+ */
+ struct drm_private_obj atomic_obj;
+
+ struct drm_modeset_lock atomic_obj_lock;
+
+ /**
+ * @dc_lock:
+ *
+ * Guards access to DC functions that can issue register write
+ * sequences.
+ */
+ struct mutex dc_lock;
+
+ /**
+ * @irq_handler_list_low_tab:
+ *
+ * Low priority IRQ handler table.
+ *
+ * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ
+ * source. Low priority IRQ handlers are deferred to a workqueue to be
+ * processed. Hence, they can sleep.
*
* Note that handlers are called in the same order as they were
* registered (FIFO).
*/
struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+
+ /**
+ * @irq_handler_list_high_tab:
+ *
+ * High priority IRQ handler table.
+ *
+ * It is a n*m table, same as &irq_handler_list_low_tab. However,
+ * handlers in this table are not deferred and are called immediately.
+ */
struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
+ /**
+ * @pflip_params:
+ *
+ * Page flip IRQ parameters, passed to registered handlers when
+ * triggered.
+ */
struct common_irq_params
pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
+ /**
+ * @vblank_params:
+ *
+ * Vertical blanking IRQ parameters, passed to registered handlers when
+ * triggered.
+ */
struct common_irq_params
vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
- /* this spin lock synchronizes access to 'irq_handler_list_table' */
spinlock_t irq_handler_list_table_lock;
struct backlight_device *backlight_dev;
const struct dc_link *backlight_link;
+ struct amdgpu_dm_backlight_caps backlight_caps;
struct mod_freesync *freesync_module;
- /**
- * Caches device atomic state for suspend/resume
- */
struct drm_atomic_state *cached_state;
struct dm_comressor_info compressor;
@@ -160,8 +240,6 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock;
bool fake_enable;
-
- bool mst_connected;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
@@ -185,15 +263,21 @@ struct dm_crtc_state {
int crc_skip_count;
bool crc_enabled;
- bool freesync_enabled;
+ bool freesync_timing_changed;
+ bool freesync_vrr_info_changed;
+
+ bool vrr_supported;
+ struct mod_freesync_config freesync_config;
struct dc_crtc_timing_adjust adjust;
struct dc_info_packet vrr_infopacket;
+
+ int abm_level;
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
struct dm_atomic_state {
- struct drm_atomic_state base;
+ struct drm_private_state base;
struct dc_state *context;
};
@@ -206,9 +290,10 @@ struct dm_connector_state {
enum amdgpu_rmx_type scaling;
uint8_t underscan_vborder;
uint8_t underscan_hborder;
+ uint8_t max_bpc;
bool underscan_enable;
- bool freesync_enable;
bool freesync_capable;
+ uint8_t abm_level;
};
#define to_dm_connector_state(x)\
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index be19e6861189..216e48cec716 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -164,7 +164,7 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
*/
stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
ret = mod_color_calculate_regamma_params(stream->out_transfer_func,
- gamma, true, adev->asic_type <= CHIP_RAVEN);
+ gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
dc_gamma_release(&gamma);
if (!ret) {
stream->out_transfer_func->type = old_type;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 01fc5717b657..f088ac585978 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -75,6 +75,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
return -EINVAL;
}
+ if (!stream_state) {
+ DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
+ return -EINVAL;
+ }
+
/* When enabling CRC, we should also disable dithering. */
if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
if (dc_stream_configure_crc(stream_state->ctx->dc,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index a212178f2edc..cd10f77cdeb0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -32,16 +32,55 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_irq.h"
+/**
+ * DOC: overview
+ *
+ * DM provides another layer of IRQ management on top of what the base driver
+ * already provides. This is something that could be cleaned up, and is a
+ * future TODO item.
+ *
+ * The base driver provides IRQ source registration with DRM, handler
+ * registration into the base driver's IRQ table, and a handler callback
+ * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
+ * handler looks up the IRQ table, and calls the respective
+ * &amdgpu_irq_src_funcs.process hookups.
+ *
+ * What DM provides on top are two IRQ tables specifically for top-half and
+ * bottom-half IRQ handling, with the bottom-half implementing workqueues:
+ *
+ * - &amdgpu_display_manager.irq_handler_list_high_tab
+ * - &amdgpu_display_manager.irq_handler_list_low_tab
+ *
+ * They override the base driver's IRQ table, and the effect can be seen
+ * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
+ * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
+ * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
+ * still needs to register the IRQ with the base driver. See
+ * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
+ *
+ * To expose DC's hardware interrupt toggle to the base driver, DM implements
+ * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
+ * amdgpu_irq_update() to enable or disable the interrupt.
+ */
+
/******************************************************************************
* Private declarations.
*****************************************************************************/
+/**
+ * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
+ *
+ * @list: Linked list entry referencing the next/previous handler
+ * @handler: Handler function
+ * @handler_arg: Argument passed to the handler when triggered
+ * @dm: DM which this handler belongs to
+ * @irq_source: DC interrupt source that this handler is registered for
+ */
struct amdgpu_dm_irq_handler_data {
struct list_head list;
interrupt_handler handler;
void *handler_arg;
- /* DM which this handler belongs to */
struct amdgpu_display_manager *dm;
/* DAL irq source which registered for this interrupt. */
enum dc_irq_source irq_source;
@@ -68,7 +107,7 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
}
/**
- * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
+ * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
*
* @work: work struct
*/
@@ -99,8 +138,8 @@ static void dm_irq_work_func(struct work_struct *work)
* (The most common use is HPD interrupt) */
}
-/**
- * Remove a handler and return a pointer to hander list from which the
+/*
+ * Remove a handler and return a pointer to handler list from which the
* handler was removed.
*/
static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
@@ -203,6 +242,24 @@ static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
* Note: caller is responsible for input validation.
*****************************************************************************/
+/**
+ * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
+ * @adev: The base driver device containing the DM device.
+ * @int_params: Interrupt parameters containing the source, and handler context
+ * @ih: Function pointer to the interrupt handler to register
+ * @handler_args: Arguments passed to the handler when the interrupt occurs
+ *
+ * Register an interrupt handler for the given IRQ source, under the given
+ * context. The context can either be high or low. High context handlers are
+ * executed directly within ISR context, while low context is executed within a
+ * workqueue, thereby allowing operations that sleep.
+ *
+ * Registered handlers are called in a FIFO manner, i.e. the most recently
+ * registered handler will be called first.
+ *
+ * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
+ * source, handler function, and args
+ */
void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
struct dc_interrupt_params *int_params,
void (*ih)(void *),
@@ -261,6 +318,15 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
return handler_data;
}
+/**
+ * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
+ * @adev: The base driver device containing the DM device
+ * @irq_source: IRQ source to remove the given handler from
+ * @ih: Function pointer to the interrupt handler to unregister
+ *
+ * Go through both low and high context IRQ tables, and find the given handler
+ * for the given irq source. If found, remove it. Otherwise, do nothing.
+ */
void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
enum dc_irq_source irq_source,
void *ih)
@@ -295,6 +361,20 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
}
}
+/**
+ * amdgpu_dm_irq_init() - Initialize DM IRQ management
+ * @adev: The base driver device containing the DM device
+ *
+ * Initialize DM's high and low context IRQ tables.
+ *
+ * The N by M table contains N IRQ sources, with M
+ * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
+ * list_heads are initialized here. When an interrupt n is triggered, all m
+ * handlers are called in sequence, FIFO according to registration order.
+ *
+ * The low context table requires special steps to initialize, since handlers
+ * will be deferred to a workqueue. See &struct irq_list_head.
+ */
int amdgpu_dm_irq_init(struct amdgpu_device *adev)
{
int src;
@@ -317,7 +397,12 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
return 0;
}
-/* DM IRQ and timer resource release */
+/**
+ * amdgpu_dm_irq_fini() - Tear down DM IRQ management
+ * @adev: The base driver device containing the DM device
+ *
+ * Flush all work within the low context IRQ table.
+ */
void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
{
int src;
@@ -414,7 +499,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
return 0;
}
-/**
+/*
* amdgpu_dm_irq_schedule_work - schedule all work items registered for the
* "irq_source".
*/
@@ -439,8 +524,9 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
}
-/** amdgpu_dm_irq_immediate_work
- * Callback high irq work immediately, don't send to work queue
+/*
+ * amdgpu_dm_irq_immediate_work
+ * Callback high irq work immediately, don't send to work queue
*/
static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
enum dc_irq_source irq_source)
@@ -467,11 +553,14 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
}
-/*
- * amdgpu_dm_irq_handler
+/**
+ * amdgpu_dm_irq_handler - Generic DM IRQ handler
+ * @adev: amdgpu base driver device containing the DM device
+ * @source: Unused
+ * @entry: Data about the triggered interrupt
*
- * Generic IRQ handler, calls all registered high irq work immediately, and
- * schedules work for low irq
+ * Calls all registered high irq work immediately, and schedules work for low
+ * irq. The DM IRQ table is used to find the corresponding handlers.
*/
static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
@@ -613,7 +702,7 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
}
-/*
+/**
* amdgpu_dm_hpd_init - hpd setup callback.
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 03601d717fed..d02c32a1039c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -205,40 +205,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.atomic_get_property = amdgpu_dm_connector_atomic_get_property
};
-void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct dc_sink *dc_sink;
- struct dc_sink_init_data init_params = {
- .link = aconnector->dc_link,
- .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
-
- /* FIXME none of this is safe. we shouldn't touch aconnector here in
- * atomic_check
- */
-
- /*
- * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
- */
- if (!aconnector->port || !aconnector->port->aux.ddc.algo)
- return;
-
- ASSERT(aconnector->edid);
-
- dc_sink = dc_link_add_remote_sink(
- aconnector->dc_link,
- (uint8_t *)aconnector->edid,
- (aconnector->edid->extensions + 1) * EDID_LENGTH,
- &init_params);
-
- dc_sink->priv = aconnector;
- aconnector->dc_sink = dc_sink;
-
- if (aconnector->dc_sink)
- amdgpu_dm_update_freesync_caps(
- connector, aconnector->edid);
-}
-
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -319,12 +285,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder;
struct drm_encoder *encoder;
- const struct drm_connector_helper_funcs *connector_funcs =
- connector->base.helper_private;
- struct drm_encoder *enc_master =
- connector_funcs->best_encoder(&connector->base);
- DRM_DEBUG_KMS("enc master is %p\n", enc_master);
amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
if (!amdgpu_encoder)
return NULL;
@@ -354,25 +315,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- aconnector = to_amdgpu_dm_connector(connector);
- if (aconnector->mst_port == master
- && !aconnector->port) {
- DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
-
- aconnector->port = port;
- drm_connector_set_path_property(connector, pathprop);
-
- drm_connector_list_iter_end(&conn_iter);
- aconnector->mst_connected = true;
- return &aconnector->base;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
if (!aconnector)
@@ -421,8 +363,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
*/
amdgpu_dm_connector_funcs_reset(connector);
- aconnector->mst_connected = true;
-
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
@@ -434,6 +374,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
+ struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
@@ -447,7 +390,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
aconnector->dc_sink = NULL;
}
- aconnector->mst_connected = false;
+ drm_connector_unregister(connector);
+ if (adev->mode_info.rfbdev)
+ drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
+ drm_connector_put(connector);
}
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -458,18 +404,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev);
}
-static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
-{
- mutex_lock(&connector->dev->mode_config.mutex);
- drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
- mutex_unlock(&connector->dev->mode_config.mutex);
-}
-
static void dm_dp_mst_register_connector(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if (adev->mode_info.rfbdev)
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -477,9 +415,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
drm_connector_register(connector);
-
- if (aconnector->mst_connected)
- dm_dp_mst_link_status_reset(connector);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 8cf51da26657..2da851b40042 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -31,6 +31,5 @@ struct amdgpu_dm_connector;
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector);
-void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 12001a006b2d..9d2d6986b983 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -485,11 +485,11 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
return;
clock.clock_type = amd_pp_dcf_clock;
- clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
+ clock.clock_freq_in_khz = req->hard_min_dcefclk_mhz * 1000;
pp_funcs->display_clock_voltage_request(pp_handle, &clock);
clock.clock_type = amd_pp_f_clock;
- clock.clock_freq_in_khz = req->hard_min_fclk_khz;
+ clock.clock_freq_in_khz = req->hard_min_fclk_mhz * 1000;
pp_funcs->display_clock_voltage_request(pp_handle, &clock);
}
@@ -518,13 +518,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
wm_dce_clocks[i].wm_set_id =
ranges->reader_wm_sets[i].wm_inst;
wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
- ranges->reader_wm_sets[i].max_drain_clk_khz;
+ ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
- ranges->reader_wm_sets[i].min_drain_clk_khz;
+ ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
wm_dce_clocks[i].wm_max_mem_clk_in_khz =
- ranges->reader_wm_sets[i].max_fill_clk_khz;
+ ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
wm_dce_clocks[i].wm_min_mem_clk_in_khz =
- ranges->reader_wm_sets[i].min_fill_clk_khz;
+ ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
}
for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
@@ -534,13 +534,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
wm_soc_clocks[i].wm_set_id =
ranges->writer_wm_sets[i].wm_inst;
wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
- ranges->writer_wm_sets[i].max_fill_clk_khz;
+ ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
- ranges->writer_wm_sets[i].min_fill_clk_khz;
+ ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
wm_soc_clocks[i].wm_max_mem_clk_in_khz =
- ranges->writer_wm_sets[i].max_drain_clk_khz;
+ ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
wm_soc_clocks[i].wm_min_mem_clk_in_khz =
- ranges->writer_wm_sets[i].min_drain_clk_khz;
+ ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
}
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
new file mode 100644
index 000000000000..d898981684d5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM amdgpu_dm
+
+#if !defined(_AMDGPU_DM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _AMDGPU_DM_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(amdgpu_dc_rreg,
+ TP_PROTO(unsigned long *read_count, uint32_t reg, uint32_t value),
+ TP_ARGS(read_count, reg, value),
+ TP_STRUCT__entry(
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->value = value;
+ *read_count = *read_count + 1;
+ ),
+ TP_printk("reg=0x%08lx, value=0x%08lx",
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
+TRACE_EVENT(amdgpu_dc_wreg,
+ TP_PROTO(unsigned long *write_count, uint32_t reg, uint32_t value),
+ TP_ARGS(write_count, reg, value),
+ TP_STRUCT__entry(
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->value = value;
+ *write_count = *write_count + 1;
+ ),
+ TP_printk("reg=0x%08lx, value=0x%08lx",
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
+
+TRACE_EVENT(amdgpu_dc_performance,
+ TP_PROTO(unsigned long read_count, unsigned long write_count,
+ unsigned long *last_read, unsigned long *last_write,
+ const char *func, unsigned int line),
+ TP_ARGS(read_count, write_count, last_read, last_write, func, line),
+ TP_STRUCT__entry(
+ __field(uint32_t, reads)
+ __field(uint32_t, writes)
+ __field(uint32_t, read_delta)
+ __field(uint32_t, write_delta)
+ __string(func, func)
+ __field(uint32_t, line)
+ ),
+ TP_fast_assign(
+ __entry->reads = read_count;
+ __entry->writes = write_count;
+ __entry->read_delta = read_count - *last_read;
+ __entry->write_delta = write_count - *last_write;
+ __assign_str(func, func);
+ __entry->line = line;
+ *last_read = read_count;
+ *last_write = write_count;
+ ),
+ TP_printk("%s:%d reads=%08ld (%08ld total), writes=%08ld (%08ld total)",
+ __get_str(func), __entry->line,
+ (unsigned long)__entry->read_delta,
+ (unsigned long)__entry->reads,
+ (unsigned long)__entry->write_delta,
+ (unsigned long)__entry->writes)
+);
+#endif /* _AMDGPU_DM_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE amdgpu_dm_trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 0e1dc1b1a48d..c2ab026aee91 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2030,7 +2030,7 @@ static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
static struct device_id device_type_from_device_id(uint16_t device_id)
{
- struct device_id result_device_id;
+ struct device_id result_device_id = {0};
switch (device_id) {
case ATOM_DEVICE_LCD1_SUPPORT:
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index ff764da21b6f..751bb614fc0e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1884,6 +1884,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
.is_accelerated_mode = bios_parser_is_accelerated_mode,
+ .is_active_display = bios_is_active_display,
+
.set_scratch_critical_state = bios_parser_set_scratch_critical_state,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
index d4589470985c..fdda8aa8e303 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
@@ -88,3 +88,96 @@ uint32_t bios_get_vga_enabled_displays(
return active_disp;
}
+bool bios_is_active_display(
+ struct dc_bios *bios,
+ enum signal_type signal,
+ const struct connector_device_tag_info *device_tag)
+{
+ uint32_t active = 0;
+ uint32_t connected = 0;
+ uint32_t bios_scratch_0 = 0;
+ uint32_t bios_scratch_3 = 0;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ {
+ if (device_tag->dev_id.device_type == DEVICE_TYPE_DFP) {
+ switch (device_tag->dev_id.enum_id) {
+ case 1:
+ {
+ active = ATOM_S3_DFP1_ACTIVE;
+ connected = 0x0008; //ATOM_DISPLAY_DFP1_CONNECT
+ }
+ break;
+
+ case 2:
+ {
+ active = ATOM_S3_DFP2_ACTIVE;
+ connected = 0x0080; //ATOM_DISPLAY_DFP2_CONNECT
+ }
+ break;
+
+ case 3:
+ {
+ active = ATOM_S3_DFP3_ACTIVE;
+ connected = 0x0200; //ATOM_DISPLAY_DFP3_CONNECT
+ }
+ break;
+
+ case 4:
+ {
+ active = ATOM_S3_DFP4_ACTIVE;
+ connected = 0x0400; //ATOM_DISPLAY_DFP4_CONNECT
+ }
+ break;
+
+ case 5:
+ {
+ active = ATOM_S3_DFP5_ACTIVE;
+ connected = 0x0800; //ATOM_DISPLAY_DFP5_CONNECT
+ }
+ break;
+
+ case 6:
+ {
+ active = ATOM_S3_DFP6_ACTIVE;
+ connected = 0x0040; //ATOM_DISPLAY_DFP6_CONNECT
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+ break;
+
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_EDP:
+ {
+ active = ATOM_S3_LCD1_ACTIVE;
+ connected = 0x0002; //ATOM_DISPLAY_LCD1_CONNECT
+ }
+ break;
+
+ default:
+ break;
+ }
+
+
+ if (bios->regs->BIOS_SCRATCH_0) /*follow up with other asic, todo*/
+ bios_scratch_0 = REG_READ(BIOS_SCRATCH_0);
+ if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/
+ bios_scratch_3 = REG_READ(BIOS_SCRATCH_3);
+
+ bios_scratch_3 &= ATOM_S3_DEVICE_ACTIVE_MASK;
+ if ((active & bios_scratch_3) && (connected & bios_scratch_0))
+ return true;
+
+ return false;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
index 75a29e68fb27..f33cac2147e3 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
@@ -35,6 +35,10 @@ bool bios_is_accelerated_mode(struct dc_bios *bios);
void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios);
+bool bios_is_active_display(
+ struct dc_bios *bios,
+ enum signal_type signal,
+ const struct connector_device_tag_info *device_tag);
#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 3208188b7ed4..43e4a2be0fa6 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1423,27 +1423,27 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
ranges.num_reader_wm_sets = WM_SET_COUNT;
ranges.num_writer_wm_sets = WM_SET_COUNT;
ranges.reader_wm_sets[0].wm_inst = WM_A;
- ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
- ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive;
- ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive;
+ ranges.reader_wm_sets[0].min_drain_clk_mhz = min_dcfclk_khz / 1000;
+ ranges.reader_wm_sets[0].max_drain_clk_mhz = overdrive / 1000;
+ ranges.reader_wm_sets[0].min_fill_clk_mhz = min_fclk_khz / 1000;
+ ranges.reader_wm_sets[0].max_fill_clk_mhz = overdrive / 1000;
ranges.writer_wm_sets[0].wm_inst = WM_A;
- ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
- ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
- ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
- ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive;
+ ranges.writer_wm_sets[0].min_fill_clk_mhz = socclk_khz / 1000;
+ ranges.writer_wm_sets[0].max_fill_clk_mhz = overdrive / 1000;
+ ranges.writer_wm_sets[0].min_drain_clk_mhz = min_fclk_khz / 1000;
+ ranges.writer_wm_sets[0].max_drain_clk_mhz = overdrive / 1000;
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
ranges.reader_wm_sets[0].wm_inst = WM_A;
- ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000;
- ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
- ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000;
+ ranges.reader_wm_sets[0].min_drain_clk_mhz = 300;
+ ranges.reader_wm_sets[0].max_drain_clk_mhz = 5000;
+ ranges.reader_wm_sets[0].min_fill_clk_mhz = 800;
+ ranges.reader_wm_sets[0].max_fill_clk_mhz = 5000;
ranges.writer_wm_sets[0].wm_inst = WM_A;
- ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000;
- ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
- ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000;
+ ranges.writer_wm_sets[0].min_fill_clk_mhz = 200;
+ ranges.writer_wm_sets[0].max_fill_clk_mhz = 5000;
+ ranges.writer_wm_sets[0].min_drain_clk_mhz = 800;
+ ranges.writer_wm_sets[0].max_drain_clk_mhz = 5000;
}
ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 7c491c91465f..d9c57984394b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -62,6 +62,55 @@
const static char DC_BUILD_ID[] = "production-build";
+/**
+ * DOC: Overview
+ *
+ * DC is the OS-agnostic component of the amdgpu DC driver.
+ *
+ * DC maintains and validates a set of structs representing the state of the
+ * driver and writes that state to AMD hardware
+ *
+ * Main DC HW structs:
+ *
+ * struct dc - The central struct. One per driver. Created on driver load,
+ * destroyed on driver unload.
+ *
+ * struct dc_context - One per driver.
+ * Used as a backpointer by most other structs in dc.
+ *
+ * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
+ * plugpoints). Created on driver load, destroyed on driver unload.
+ *
+ * struct dc_sink - One per display. Created on boot or hotplug.
+ * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
+ * (the display directly attached). It may also have one or more remote
+ * sinks (in the Multi-Stream Transport case)
+ *
+ * struct resource_pool - One per driver. Represents the hw blocks not in the
+ * main pipeline. Not directly accessible by dm.
+ *
+ * Main dc state structs:
+ *
+ * These structs can be created and destroyed as needed. There is a full set of
+ * these structs in dc->current_state representing the currently programmed state.
+ *
+ * struct dc_state - The global DC state to track global state information,
+ * such as bandwidth values.
+ *
+ * struct dc_stream_state - Represents the hw configuration for the pipeline from
+ * a framebuffer to a display. Maps one-to-one with dc_sink.
+ *
+ * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
+ * and may have more in the Multi-Plane Overlay case.
+ *
+ * struct resource_context - Represents the programmable state of everything in
+ * the resource_pool. Not directly accessible by dm.
+ *
+ * struct pipe_ctx - A member of struct resource_context. Represents the
+ * internal hardware pipeline components. Each dc_plane_state has either
+ * one or two (in the pipe-split case).
+ */
+
/*******************************************************************************
* Private functions
******************************************************************************/
@@ -175,6 +224,17 @@ failed_alloc:
return false;
}
+static struct dc_perf_trace *dc_perf_trace_create(void)
+{
+ return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
+}
+
+static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
+{
+ kfree(*perf_trace);
+ *perf_trace = NULL;
+}
+
/**
*****************************************************************************
* Function: dc_stream_adjust_vmin_vmax
@@ -240,7 +300,7 @@ bool dc_stream_get_crtc_position(struct dc *dc,
}
/**
- * dc_stream_configure_crc: Configure CRC capture for the given stream.
+ * dc_stream_configure_crc() - Configure CRC capture for the given stream.
* @dc: DC Object
* @stream: The stream to configure CRC on.
* @enable: Enable CRC if true, disable otherwise.
@@ -292,7 +352,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
}
/**
- * dc_stream_get_crc: Get CRC values for the given stream.
+ * dc_stream_get_crc() - Get CRC values for the given stream.
* @dc: DC object
* @stream: The DC stream state of the stream to get CRCs from.
* @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
@@ -328,7 +388,7 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option)
{
struct bit_depth_reduction_params params;
- struct dc_link *link = stream->status.link;
+ struct dc_link *link = stream->sink->link;
struct pipe_ctx *pipes = NULL;
int i;
@@ -391,9 +451,11 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
== stream) {
pipes = &dc->current_state->res_ctx.pipe_ctx[i];
- dc->hwss.program_csc_matrix(pipes,
- stream->output_color_space,
- stream->csc_color_matrix.matrix);
+ dc->hwss.program_output_csc(dc,
+ pipes,
+ stream->output_color_space,
+ stream->csc_color_matrix.matrix,
+ pipes->plane_res.hubp->opp_id);
ret = true;
}
}
@@ -534,6 +596,8 @@ static void destruct(struct dc *dc)
if (dc->ctx->created_bios)
dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ dc_perf_trace_destroy(&dc->ctx->perf_trace);
+
kfree(dc->ctx);
dc->ctx = NULL;
@@ -657,6 +721,12 @@ static bool construct(struct dc *dc,
goto fail;
}
+ dc_ctx->perf_trace = dc_perf_trace_create();
+ if (!dc_ctx->perf_trace) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
/* Create GPIO service */
dc_ctx->gpio_service = dal_gpio_service_create(
dc_version,
@@ -941,7 +1011,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (!dcb->funcs->is_accelerated_mode(dcb))
dc->hwss.enable_accelerated_mode(dc, context);
- dc->hwss.set_bandwidth(dc, context, false);
+ dc->hwss.prepare_bandwidth(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
@@ -957,8 +1027,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program hardware */
- dc->hwss.ready_shared_resources(dc, context);
-
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
@@ -1012,7 +1080,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
/* pplib is notified if disp_num changed */
- dc->hwss.set_bandwidth(dc, context, true);
+ dc->hwss.optimize_bandwidth(dc, context);
dc_release_state(dc->current_state);
@@ -1020,8 +1088,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_retain_state(dc->current_state);
- dc->hwss.optimize_shared_resources(dc);
-
return result;
}
@@ -1063,7 +1129,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
dc->optimized_required = false;
- dc->hwss.set_bandwidth(dc, context, true);
+ dc->hwss.optimize_bandwidth(dc, context);
return true;
}
@@ -1331,6 +1397,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
return overall_type;
}
+/**
+ * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
+ *
+ * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
+ */
enum surface_update_type dc_check_update_surfaces_for_stream(
struct dc *dc,
struct dc_surface_update *updates,
@@ -1369,35 +1440,6 @@ static struct dc_stream_status *stream_get_status(
static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
-static void notify_display_count_to_smu(
- struct dc *dc,
- struct dc_state *context)
-{
- int i, display_count;
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
-
- /*
- * if function pointer not set up, this message is
- * sent as part of pplib_apply_display_requirements.
- * So just return.
- */
- if (!pp_smu || !pp_smu->set_display_count)
- return;
-
- display_count = 0;
- for (i = 0; i < context->stream_count; i++) {
- const struct dc_stream_state *stream = context->streams[i];
-
- /* only notify active stream */
- if (stream->dpms_off)
- continue;
-
- display_count++;
- }
-
- pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
-}
-
static void commit_planes_do_stream_update(struct dc *dc,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
@@ -1422,7 +1464,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->adjust->v_total_max);
if (stream_update->periodic_fn_vsync_delta &&
- pipe_ctx->stream_res.tg &&
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
@@ -1441,6 +1482,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->output_csc_transform)
dc_stream_program_csc_matrix(dc, stream);
+ if (stream_update->dither_option) {
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+ }
+
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
@@ -1448,19 +1497,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
- dc->hwss.pplib_apply_display_requirements(
- dc, dc->current_state);
- notify_display_count_to_smu(dc, dc->current_state);
+ dc->hwss.optimize_bandwidth(dc, dc->current_state);
} else {
- dc->hwss.pplib_apply_display_requirements(
- dc, dc->current_state);
- notify_display_count_to_smu(dc, dc->current_state);
+ dc->hwss.prepare_bandwidth(dc, dc->current_state);
core_link_enable_stream(dc->current_state, pipe_ctx);
}
}
-
-
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
// if otg funcs defined check if blanked before programming
@@ -1487,7 +1530,7 @@ static void commit_planes_for_stream(struct dc *dc,
struct pipe_ctx *top_pipe_to_program = NULL;
if (update_type == UPDATE_TYPE_FULL) {
- dc->hwss.set_bandwidth(dc, context, false);
+ dc->hwss.prepare_bandwidth(dc, context);
context_clock_trace(dc, context);
}
@@ -1669,6 +1712,9 @@ enum dc_irq_source dc_interrupt_to_irq_source(
return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
}
+/**
+ * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
+ */
bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
{
@@ -1724,6 +1770,15 @@ void dc_resume(struct dc *dc)
core_link_resume(dc->links[i]);
}
+bool dc_is_dmcu_initialized(struct dc *dc)
+{
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (dmcu)
+ return dmcu->funcs->is_dmcu_initialized(dmcu);
+ return false;
+}
+
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
@@ -1753,6 +1808,11 @@ static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink
return true;
}
+/**
+ * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
+ *
+ * EDID length is in bytes
+ */
struct dc_sink *dc_link_add_remote_sink(
struct dc_link *link,
const uint8_t *edid,
@@ -1811,6 +1871,12 @@ fail_add_sink:
return NULL;
}
+/**
+ * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
+ *
+ * Note that this just removes the struct dc_sink - it doesn't
+ * program hardware or alter other members of dc_link
+ */
void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
{
int i;
@@ -1848,4 +1914,4 @@ void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx
info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz;
info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz;
-} \ No newline at end of file
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index e1ebdf7b5eaf..73d049506618 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -311,7 +311,7 @@ void context_timing_trace(
{
int i;
struct dc *core_dc = dc;
- int h_pos[MAX_PIPES], v_pos[MAX_PIPES];
+ int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0};
struct crtc_position position;
unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -322,8 +322,7 @@ void context_timing_trace(
/* get_position() returns CRTC vertical/horizontal counter
* hence not applicable for underlay pipe
*/
- if (pipe_ctx->stream == NULL
- || pipe_ctx->pipe_idx == underlay_idx)
+ if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
continue;
pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position);
@@ -333,7 +332,7 @@ void context_timing_trace(
for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
- if (pipe_ctx->stream == NULL)
+ if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
continue;
TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n",
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index fb04a4ad141f..4dc5846de5c4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -198,6 +198,13 @@ static bool program_hpd_filter(
return result;
}
+/**
+ * dc_link_detect_sink() - Determine if there is a sink connected
+ *
+ * @type: Returned connection type
+ * Does not detect downstream devices, such as MST sinks
+ * or display connected through active dongles
+ */
bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
{
uint32_t is_hpd_high = 0;
@@ -324,9 +331,9 @@ static enum signal_type get_basic_signal_type(
return SIGNAL_TYPE_NONE;
}
-/*
- * @brief
- * Check whether there is a dongle on DP connector
+/**
+ * dc_link_is_dp_sink_present() - Check if there is a native DP
+ * or passive DP-HDMI dongle connected
*/
bool dc_link_is_dp_sink_present(struct dc_link *link)
{
@@ -593,6 +600,14 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
}
+/**
+ * dc_link_detect() - Detect if a sink is attached to a given link
+ *
+ * link->local_sink is created or destroyed as needed.
+ *
+ * This does not create remote sinks but will trigger DM
+ * to start MST detection if a branch is detected.
+ */
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
{
struct dc_sink_init_data sink_init_data = { 0 };
@@ -1357,28 +1372,13 @@ static enum dc_status enable_link_dp(
struct dc_link *link = stream->sink->link;
struct dc_link_settings link_settings = {0};
enum dp_panel_mode panel_mode;
- enum dc_link_rate max_link_rate = LINK_RATE_HIGH2;
/* get link settings for video mode timing */
decide_link_settings(stream, &link_settings);
- /* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS
- * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
- */
- if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
- max_link_rate = LINK_RATE_HIGH3;
-
- if (link_settings.link_rate == max_link_rate) {
- struct dc_clocks clocks = state->bw.dcn.clk;
-
- /* dce/dcn compat, do not update dispclk */
- clocks.dispclk_khz = 0;
- /* 27mhz = 27000000hz= 27000khz */
- clocks.phyclk_khz = link_settings.link_rate * 27000;
-
- state->dis_clk->funcs->update_clocks(
- state->dis_clk, &clocks, false);
- }
+ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
+ link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
+ state->dccg->funcs->update_clocks(state->dccg, state, false);
dp_enable_link_phy(
link,
@@ -1411,8 +1411,6 @@ static enum dc_status enable_link_dp(
else
status = DC_FAIL_DP_LINK_TRAINING;
- enable_stream_features(pipe_ctx);
-
return status;
}
@@ -1722,7 +1720,7 @@ static void write_i2c_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
/* Write failure */
@@ -1734,7 +1732,7 @@ static void write_i2c_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
/* Write failure */
@@ -2156,14 +2154,16 @@ int dc_link_get_backlight_level(const struct dc_link *link)
{
struct abm *abm = link->ctx->dc->res_pool->abm;
- if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL)
+ if (abm == NULL || abm->funcs->get_current_backlight == NULL)
return DC_ERROR_UNEXPECTED;
- return (int) abm->funcs->get_current_backlight_8_bit(abm);
+ return (int) abm->funcs->get_current_backlight(abm);
}
-bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
- uint32_t frame_ramp, const struct dc_stream_state *stream)
+bool dc_link_set_backlight_level(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp,
+ const struct dc_stream_state *stream)
{
struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
@@ -2175,26 +2175,24 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
if ((dmcu == NULL) ||
(abm == NULL) ||
- (abm->funcs->set_backlight_level == NULL))
+ (abm->funcs->set_backlight_level_pwm == NULL))
return false;
- if (stream) {
- if (stream->bl_pwm_level == EDP_BACKLIGHT_RAMP_DISABLE_LEVEL)
- frame_ramp = 0;
-
- ((struct dc_stream_state *)stream)->bl_pwm_level = level;
- }
+ if (stream)
+ ((struct dc_stream_state *)stream)->bl_pwm_level =
+ backlight_pwm_u16_16;
use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
- DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level);
+ DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
+ backlight_pwm_u16_16, backlight_pwm_u16_16);
if (dc_is_embedded_signal(link->connector_signal)) {
- if (stream != NULL) {
- for (i = 0; i < MAX_PIPES; i++) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
if (core_dc->current_state->res_ctx.
- pipe_ctx[i].stream
- == stream)
+ pipe_ctx[i].stream->sink->link
+ == link)
/* DMCU -1 for all controller id values,
* therefore +1 here
*/
@@ -2204,9 +2202,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
1;
}
}
- abm->funcs->set_backlight_level(
+ abm->funcs->set_backlight_level_pwm(
abm,
- level,
+ backlight_pwm_u16_16,
frame_ramp,
controller_id,
use_smooth_brightness);
@@ -2220,7 +2218,7 @@ bool dc_link_set_abm_disable(const struct dc_link *link)
struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
- if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+ if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))
return false;
abm->funcs->set_abm_immediate_disable(abm);
@@ -2233,7 +2231,7 @@ bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
struct dc *core_dc = link->ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
- if (dmcu != NULL && link->psr_enabled)
+ if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled)
dmcu->funcs->set_psr_enable(dmcu, enable, wait);
return true;
@@ -2609,6 +2607,13 @@ void core_link_enable_stream(
core_dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->sink->link->cur_link_settings);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ enable_stream_features(pipe_ctx);
+
+ dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
+ pipe_ctx->stream->bl_pwm_level,
+ 0,
+ pipe_ctx->stream);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index d91df5ef0cb3..849a3a3032f7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2196,7 +2196,7 @@ static void get_active_converter_info(
}
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
- uint8_t det_caps[4];
+ uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
union dwnstream_port_caps_byte0 *port_caps =
(union dwnstream_port_caps_byte0 *)det_caps;
core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
@@ -2371,11 +2371,22 @@ static bool retrieve_link_cap(struct dc_link *link)
dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
- core_link_read_dpcd(
+ uint8_t ext_cap_data[16];
+
+ memset(ext_cap_data, '\0', sizeof(ext_cap_data));
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
link,
DP_DP13_DPCD_REV,
- dpcd_data,
- sizeof(dpcd_data));
+ ext_cap_data,
+ sizeof(ext_cap_data));
+ if (status == DC_OK) {
+ memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data));
+ break;
+ }
+ }
+ if (status != DC_OK)
+ dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index b6fe29b9fb65..c347afd1030f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -478,10 +478,29 @@ static enum pixel_format convert_pixel_format_to_dalsurface(
return dal_pixel_format;
}
-static void rect_swap_helper(struct rect *rect)
-{
- swap(rect->height, rect->width);
- swap(rect->x, rect->y);
+static inline void get_vp_scan_direction(
+ enum dc_rotation_angle rotation,
+ bool horizontal_mirror,
+ bool *orthogonal_rotation,
+ bool *flip_vert_scan_dir,
+ bool *flip_horz_scan_dir)
+{
+ *orthogonal_rotation = false;
+ *flip_vert_scan_dir = false;
+ *flip_horz_scan_dir = false;
+ if (rotation == ROTATION_ANGLE_180) {
+ *flip_vert_scan_dir = true;
+ *flip_horz_scan_dir = true;
+ } else if (rotation == ROTATION_ANGLE_90) {
+ *orthogonal_rotation = true;
+ *flip_horz_scan_dir = true;
+ } else if (rotation == ROTATION_ANGLE_270) {
+ *orthogonal_rotation = true;
+ *flip_vert_scan_dir = true;
+ }
+
+ if (horizontal_mirror)
+ *flip_horz_scan_dir = !*flip_horz_scan_dir;
}
static void calculate_viewport(struct pipe_ctx *pipe_ctx)
@@ -490,25 +509,14 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
const struct dc_stream_state *stream = pipe_ctx->stream;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect surf_src = plane_state->src_rect;
- struct rect clip = { 0 };
+ struct rect clip, dest;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
bool pri_split = pipe_ctx->bottom_pipe &&
pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
bool sec_split = pipe_ctx->top_pipe &&
pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
- bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
-
- /*
- * Need to calculate the scan direction for viewport to properly determine offset
- */
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
- flip_vert_scan_dir = true;
- flip_horz_scan_dir = true;
- } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
- flip_vert_scan_dir = true;
- else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- flip_horz_scan_dir = true;
+ bool orthogonal_rotation, flip_y_start, flip_x_start;
if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
@@ -516,13 +524,10 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
sec_split = false;
}
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- rect_swap_helper(&surf_src);
-
/* The actual clip is an intersection between stream
* source and surface clip
*/
+ dest = plane_state->dst_rect;
clip.x = stream->src.x > plane_state->clip_rect.x ?
stream->src.x : plane_state->clip_rect.x;
@@ -539,84 +544,77 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
stream->src.y + stream->src.height - clip.y :
plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
+ /*
+ * Need to calculate how scan origin is shifted in vp space
+ * to correctly rotate clip and dst
+ */
+ get_vp_scan_direction(
+ plane_state->rotation,
+ plane_state->horizontal_mirror,
+ &orthogonal_rotation,
+ &flip_y_start,
+ &flip_x_start);
+
+ if (orthogonal_rotation) {
+ swap(clip.x, clip.y);
+ swap(clip.width, clip.height);
+ swap(dest.x, dest.y);
+ swap(dest.width, dest.height);
+ }
+ if (flip_x_start) {
+ clip.x = dest.x + dest.width - clip.x - clip.width;
+ dest.x = 0;
+ }
+ if (flip_y_start) {
+ clip.y = dest.y + dest.height - clip.y - clip.height;
+ dest.y = 0;
+ }
+
/* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
* num_pixels = clip.num_pix * scl_ratio
*/
- data->viewport.x = surf_src.x + (clip.x - plane_state->dst_rect.x) *
- surf_src.width / plane_state->dst_rect.width;
- data->viewport.width = clip.width *
- surf_src.width / plane_state->dst_rect.width;
-
- data->viewport.y = surf_src.y + (clip.y - plane_state->dst_rect.y) *
- surf_src.height / plane_state->dst_rect.height;
- data->viewport.height = clip.height *
- surf_src.height / plane_state->dst_rect.height;
-
- /* To transfer the x, y to correct coordinate on mirror image (camera).
- * deg 0 : transfer x,
- * deg 90 : don't need to transfer,
- * deg180 : transfer y,
- * deg270 : transfer x and y.
- * To transfer the x, y to correct coordinate on non-mirror image (video).
- * deg 0 : don't need to transfer,
- * deg 90 : transfer y,
- * deg180 : transfer x and y,
- * deg270 : transfer x.
- */
- if (pipe_ctx->plane_state->horizontal_mirror) {
- if (flip_horz_scan_dir && !flip_vert_scan_dir) {
- data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
- data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
- } else if (flip_horz_scan_dir && flip_vert_scan_dir)
- data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
- else {
- if (!flip_horz_scan_dir && !flip_vert_scan_dir)
- data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
+ data->viewport.x = surf_src.x + (clip.x - dest.x) * surf_src.width / dest.width;
+ data->viewport.width = clip.width * surf_src.width / dest.width;
+
+ data->viewport.y = surf_src.y + (clip.y - dest.y) * surf_src.height / dest.height;
+ data->viewport.height = clip.height * surf_src.height / dest.height;
+
+ /* Handle split */
+ if (pri_split || sec_split) {
+ if (orthogonal_rotation) {
+ if (flip_y_start != pri_split)
+ data->viewport.height /= 2;
+ else {
+ data->viewport.y += data->viewport.height / 2;
+ /* Ceil offset pipe */
+ data->viewport.height = (data->viewport.height + 1) / 2;
+ }
+ } else {
+ if (flip_x_start != pri_split)
+ data->viewport.width /= 2;
+ else {
+ data->viewport.x += data->viewport.width / 2;
+ /* Ceil offset pipe */
+ data->viewport.width = (data->viewport.width + 1) / 2;
+ }
}
- } else {
- if (flip_horz_scan_dir)
- data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
- if (flip_vert_scan_dir)
- data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
}
/* Round down, compensate in init */
data->viewport_c.x = data->viewport.x / vpc_div;
data->viewport_c.y = data->viewport.y / vpc_div;
- data->inits.h_c = (data->viewport.x % vpc_div) != 0 ?
- dc_fixpt_half : dc_fixpt_zero;
- data->inits.v_c = (data->viewport.y % vpc_div) != 0 ?
- dc_fixpt_half : dc_fixpt_zero;
+ data->inits.h_c = (data->viewport.x % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
+ data->inits.v_c = (data->viewport.y % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
+
/* Round up, assume original video size always even dimensions */
data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
-
- /* Handle hsplit */
- if (sec_split) {
- data->viewport.x += data->viewport.width / 2;
- data->viewport_c.x += data->viewport_c.width / 2;
- /* Ceil offset pipe */
- data->viewport.width = (data->viewport.width + 1) / 2;
- data->viewport_c.width = (data->viewport_c.width + 1) / 2;
- } else if (pri_split) {
- if (data->viewport.width > 1)
- data->viewport.width /= 2;
- if (data->viewport_c.width > 1)
- data->viewport_c.width /= 2;
- }
-
- if (plane_state->rotation == ROTATION_ANGLE_90 ||
- plane_state->rotation == ROTATION_ANGLE_270) {
- rect_swap_helper(&data->viewport_c);
- rect_swap_helper(&data->viewport);
- }
}
-static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
+static void calculate_recout(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
- struct rect surf_src = plane_state->src_rect;
struct rect surf_clip = plane_state->clip_rect;
bool pri_split = pipe_ctx->bottom_pipe &&
pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
@@ -624,10 +622,6 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- rect_swap_helper(&surf_src);
-
pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
if (stream->src.x < surf_clip.x)
pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
@@ -656,7 +650,7 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
stream->dst.y + stream->dst.height
- pipe_ctx->plane_res.scl_data.recout.y;
- /* Handle h & vsplit */
+ /* Handle h & v split, handle rotation using viewport */
if (sec_split && top_bottom_split) {
pipe_ctx->plane_res.scl_data.recout.y +=
pipe_ctx->plane_res.scl_data.recout.height / 2;
@@ -665,44 +659,14 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
(pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
} else if (pri_split && top_bottom_split)
pipe_ctx->plane_res.scl_data.recout.height /= 2;
- else if (pri_split || sec_split) {
- /* HMirror XOR Secondary_pipe XOR Rotation_180 */
- bool right_view = (sec_split != plane_state->horizontal_mirror) !=
- (plane_state->rotation == ROTATION_ANGLE_180);
-
- if (plane_state->rotation == ROTATION_ANGLE_90
- || plane_state->rotation == ROTATION_ANGLE_270)
- /* Secondary_pipe XOR Rotation_270 */
- right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
-
- if (right_view) {
- pipe_ctx->plane_res.scl_data.recout.x +=
- pipe_ctx->plane_res.scl_data.recout.width / 2;
- /* Ceil offset pipe */
- pipe_ctx->plane_res.scl_data.recout.width =
- (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
- } else {
- if (pipe_ctx->plane_res.scl_data.recout.width > 1)
- pipe_ctx->plane_res.scl_data.recout.width /= 2;
- }
- }
- /* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
- * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
- * ratio)
- */
- recout_full->x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
- * stream->dst.width / stream->src.width -
- surf_src.x * plane_state->dst_rect.width / surf_src.width
- * stream->dst.width / stream->src.width;
- recout_full->y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
- * stream->dst.height / stream->src.height -
- surf_src.y * plane_state->dst_rect.height / surf_src.height
- * stream->dst.height / stream->src.height;
-
- recout_full->width = plane_state->dst_rect.width
- * stream->dst.width / stream->src.width;
- recout_full->height = plane_state->dst_rect.height
- * stream->dst.height / stream->src.height;
+ else if (sec_split) {
+ pipe_ctx->plane_res.scl_data.recout.x +=
+ pipe_ctx->plane_res.scl_data.recout.width / 2;
+ /* Ceil offset pipe */
+ pipe_ctx->plane_res.scl_data.recout.width =
+ (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
+ } else if (pri_split)
+ pipe_ctx->plane_res.scl_data.recout.width /= 2;
}
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -715,9 +679,10 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
const int out_w = stream->dst.width;
const int out_h = stream->dst.height;
+ /*Swap surf_src height and width since scaling ratios are in recout rotation*/
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- rect_swap_helper(&surf_src);
+ swap(surf_src.height, surf_src.width);
pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction(
surf_src.width,
@@ -754,358 +719,202 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
}
-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
+static inline void adjust_vp_and_init_for_seamless_clip(
+ bool flip_scan_dir,
+ int recout_skip,
+ int src_size,
+ int taps,
+ struct fixed31_32 ratio,
+ struct fixed31_32 *init,
+ int *vp_offset,
+ int *vp_size)
{
- struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
- struct rect src = pipe_ctx->plane_state->src_rect;
- int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
- || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
- bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
-
- /*
- * Need to calculate the scan direction for viewport to make adjustments
- */
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
- flip_vert_scan_dir = true;
- flip_horz_scan_dir = true;
- } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
- flip_vert_scan_dir = true;
- else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- flip_horz_scan_dir = true;
-
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
- rect_swap_helper(&src);
- rect_swap_helper(&data->viewport_c);
- rect_swap_helper(&data->viewport);
-
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270 &&
- pipe_ctx->plane_state->horizontal_mirror) {
- flip_vert_scan_dir = true;
- }
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 &&
- pipe_ctx->plane_state->horizontal_mirror) {
- flip_vert_scan_dir = false;
- }
- } else if (pipe_ctx->plane_state->horizontal_mirror)
- flip_horz_scan_dir = !flip_horz_scan_dir;
-
- /*
- * Init calculated according to formula:
- * init = (scaling_ratio + number_of_taps + 1) / 2
- * init_bot = init + scaling_ratio
- * init_c = init + truncated_vp_c_offset(from calculate viewport)
- */
- data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19);
-
- data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19);
-
- data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19);
-
- data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
-
- if (!flip_horz_scan_dir) {
+ if (!flip_scan_dir) {
/* Adjust for viewport end clip-off */
- if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
- int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h, data->ratios.horz));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
- int vp_clip = (src.x + src.width) / vpc_div -
- data->viewport_c.width - data->viewport_c.x;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+ if ((*vp_offset + *vp_size) < src_size) {
+ int vp_clip = src_size - *vp_size - *vp_offset;
+ int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+ *vp_size += int_part < vp_clip ? int_part : vp_clip;
}
/* Adjust for non-0 viewport offset */
- if (data->viewport.x) {
+ if (*vp_offset) {
int int_part;
- data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
- data->ratios.horz, data->recout.x - recout_full->x));
- int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
- if (int_part < data->taps.h_taps) {
- int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
- (data->taps.h_taps - int_part) : data->viewport.x;
- data->viewport.x -= int_adj;
- data->viewport.width += int_adj;
+ *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
+ int_part = dc_fixpt_floor(*init) - *vp_offset;
+ if (int_part < taps) {
+ int int_adj = *vp_offset >= (taps - int_part) ?
+ (taps - int_part) : *vp_offset;
+ *vp_offset -= int_adj;
+ *vp_size += int_adj;
int_part += int_adj;
- } else if (int_part > data->taps.h_taps) {
- data->viewport.x += int_part - data->taps.h_taps;
- data->viewport.width -= int_part - data->taps.h_taps;
- int_part = data->taps.h_taps;
+ } else if (int_part > taps) {
+ *vp_offset += int_part - taps;
+ *vp_size -= int_part - taps;
+ int_part = taps;
}
- data->inits.h.value &= 0xffffffff;
- data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
- }
-
- if (data->viewport_c.x) {
- int int_part;
-
- data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
- data->ratios.horz_c, data->recout.x - recout_full->x));
- int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
- if (int_part < data->taps.h_taps_c) {
- int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
- (data->taps.h_taps_c - int_part) : data->viewport_c.x;
- data->viewport_c.x -= int_adj;
- data->viewport_c.width += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.h_taps_c) {
- data->viewport_c.x += int_part - data->taps.h_taps_c;
- data->viewport_c.width -= int_part - data->taps.h_taps_c;
- int_part = data->taps.h_taps_c;
- }
- data->inits.h_c.value &= 0xffffffff;
- data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+ init->value &= 0xffffffff;
+ *init = dc_fixpt_add_int(*init, int_part);
}
} else {
/* Adjust for non-0 viewport offset */
- if (data->viewport.x) {
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h, data->ratios.horz));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.width += int_part < data->viewport.x ? int_part : data->viewport.x;
- data->viewport.x -= int_part < data->viewport.x ? int_part : data->viewport.x;
- }
- if (data->viewport_c.x) {
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+ if (*vp_offset) {
+ int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.width += int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
- data->viewport_c.x -= int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
+ *vp_size += int_part < *vp_offset ? int_part : *vp_offset;
+ *vp_offset -= int_part < *vp_offset ? int_part : *vp_offset;
}
/* Adjust for viewport end clip-off */
- if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+ if ((*vp_offset + *vp_size) < src_size) {
int int_part;
- int end_offset = src.x + src.width
- - data->viewport.x - data->viewport.width;
+ int end_offset = src_size - *vp_offset - *vp_size;
/*
* this is init if vp had no offset, keep in mind this is from the
* right side of vp due to scan direction
*/
- data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
- data->ratios.horz, data->recout.x - recout_full->x));
+ *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
/*
* this is the difference between first pixel of viewport available to read
* and init position, takning into account scan direction
*/
- int_part = dc_fixpt_floor(data->inits.h) - end_offset;
- if (int_part < data->taps.h_taps) {
- int int_adj = end_offset >= (data->taps.h_taps - int_part) ?
- (data->taps.h_taps - int_part) : end_offset;
- data->viewport.width += int_adj;
+ int_part = dc_fixpt_floor(*init) - end_offset;
+ if (int_part < taps) {
+ int int_adj = end_offset >= (taps - int_part) ?
+ (taps - int_part) : end_offset;
+ *vp_size += int_adj;
int_part += int_adj;
- } else if (int_part > data->taps.h_taps) {
- data->viewport.width += int_part - data->taps.h_taps;
- int_part = data->taps.h_taps;
+ } else if (int_part > taps) {
+ *vp_size += int_part - taps;
+ int_part = taps;
}
- data->inits.h.value &= 0xffffffff;
- data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
+ init->value &= 0xffffffff;
+ *init = dc_fixpt_add_int(*init, int_part);
}
-
- if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
- int int_part;
- int end_offset = (src.x + src.width) / vpc_div
- - data->viewport_c.x - data->viewport_c.width;
-
- /*
- * this is init if vp had no offset, keep in mind this is from the
- * right side of vp due to scan direction
- */
- data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
- data->ratios.horz_c, data->recout.x - recout_full->x));
- /*
- * this is the difference between first pixel of viewport available to read
- * and init position, takning into account scan direction
- */
- int_part = dc_fixpt_floor(data->inits.h_c) - end_offset;
- if (int_part < data->taps.h_taps_c) {
- int int_adj = end_offset >= (data->taps.h_taps_c - int_part) ?
- (data->taps.h_taps_c - int_part) : end_offset;
- data->viewport_c.width += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.h_taps_c) {
- data->viewport_c.width += int_part - data->taps.h_taps_c;
- int_part = data->taps.h_taps_c;
- }
- data->inits.h_c.value &= 0xffffffff;
- data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
- }
-
}
- if (!flip_vert_scan_dir) {
- /* Adjust for viewport end clip-off */
- if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
- int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v, data->ratios.vert));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
- int vp_clip = (src.y + src.height) / vpc_div -
- data->viewport_c.height - data->viewport_c.y;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
- }
-
- /* Adjust for non-0 viewport offset */
- if (data->viewport.y) {
- int int_part;
-
- data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
- data->ratios.vert, data->recout.y - recout_full->y));
- int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
- if (int_part < data->taps.v_taps) {
- int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
- (data->taps.v_taps - int_part) : data->viewport.y;
- data->viewport.y -= int_adj;
- data->viewport.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps) {
- data->viewport.y += int_part - data->taps.v_taps;
- data->viewport.height -= int_part - data->taps.v_taps;
- int_part = data->taps.v_taps;
- }
- data->inits.v.value &= 0xffffffff;
- data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
- }
-
- if (data->viewport_c.y) {
- int int_part;
-
- data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
- data->ratios.vert_c, data->recout.y - recout_full->y));
- int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
- if (int_part < data->taps.v_taps_c) {
- int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
- (data->taps.v_taps_c - int_part) : data->viewport_c.y;
- data->viewport_c.y -= int_adj;
- data->viewport_c.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps_c) {
- data->viewport_c.y += int_part - data->taps.v_taps_c;
- data->viewport_c.height -= int_part - data->taps.v_taps_c;
- int_part = data->taps.v_taps_c;
- }
- data->inits.v_c.value &= 0xffffffff;
- data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
- }
- } else {
- /* Adjust for non-0 viewport offset */
- if (data->viewport.y) {
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v, data->ratios.vert));
+}
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.height += int_part < data->viewport.y ? int_part : data->viewport.y;
- data->viewport.y -= int_part < data->viewport.y ? int_part : data->viewport.y;
- }
- if (data->viewport_c.y) {
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+ struct rect src = pipe_ctx->plane_state->src_rect;
+ int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
+ int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
+ || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.height += int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
- data->viewport_c.y -= int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
- }
+ /*
+ * Need to calculate the scan direction for viewport to make adjustments
+ */
+ get_vp_scan_direction(
+ plane_state->rotation,
+ plane_state->horizontal_mirror,
+ &orthogonal_rotation,
+ &flip_vert_scan_dir,
+ &flip_horz_scan_dir);
+
+ /* Calculate src rect rotation adjusted to recout space */
+ surf_size_h = src.x + src.width;
+ surf_size_v = src.y + src.height;
+ if (flip_horz_scan_dir)
+ src.x = 0;
+ if (flip_vert_scan_dir)
+ src.y = 0;
+ if (orthogonal_rotation) {
+ swap(src.x, src.y);
+ swap(src.width, src.height);
+ }
+
+ /* Recout matching initial vp offset = recout_offset - (stream dst offset +
+ * ((surf dst offset - stream src offset) * 1/ stream scaling ratio)
+ * - (surf surf_src offset * 1/ full scl ratio))
+ */
+ recout_skip_h = data->recout.x - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ * stream->dst.width / stream->src.width -
+ src.x * plane_state->dst_rect.width / src.width
+ * stream->dst.width / stream->src.width);
+ recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ * stream->dst.height / stream->src.height -
+ src.y * plane_state->dst_rect.height / src.height
+ * stream->dst.height / stream->src.height);
+ if (orthogonal_rotation)
+ swap(recout_skip_h, recout_skip_v);
+ /*
+ * Init calculated according to formula:
+ * init = (scaling_ratio + number_of_taps + 1) / 2
+ * init_bot = init + scaling_ratio
+ * init_c = init + truncated_vp_c_offset(from calculate viewport)
+ */
+ data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int(
+ dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19);
- /* Adjust for viewport end clip-off */
- if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
- int int_part;
- int end_offset = src.y + src.height
- - data->viewport.y - data->viewport.height;
+ data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
+ dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19);
- /*
- * this is init if vp had no offset, keep in mind this is from the
- * right side of vp due to scan direction
- */
- data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
- data->ratios.vert, data->recout.y - recout_full->y));
- /*
- * this is the difference between first pixel of viewport available to read
- * and init position, taking into account scan direction
- */
- int_part = dc_fixpt_floor(data->inits.v) - end_offset;
- if (int_part < data->taps.v_taps) {
- int int_adj = end_offset >= (data->taps.v_taps - int_part) ?
- (data->taps.v_taps - int_part) : end_offset;
- data->viewport.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps) {
- data->viewport.height += int_part - data->taps.v_taps;
- int_part = data->taps.v_taps;
- }
- data->inits.v.value &= 0xffffffff;
- data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
- }
+ data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int(
+ dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19);
- if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
- int int_part;
- int end_offset = (src.y + src.height) / vpc_div
- - data->viewport_c.y - data->viewport_c.height;
+ data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
+ dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
- /*
- * this is init if vp had no offset, keep in mind this is from the
- * right side of vp due to scan direction
- */
- data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
- data->ratios.vert_c, data->recout.y - recout_full->y));
- /*
- * this is the difference between first pixel of viewport available to read
- * and init position, taking into account scan direction
- */
- int_part = dc_fixpt_floor(data->inits.v_c) - end_offset;
- if (int_part < data->taps.v_taps_c) {
- int int_adj = end_offset >= (data->taps.v_taps_c - int_part) ?
- (data->taps.v_taps_c - int_part) : end_offset;
- data->viewport_c.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps_c) {
- data->viewport_c.height += int_part - data->taps.v_taps_c;
- int_part = data->taps.v_taps_c;
- }
- data->inits.v_c.value &= 0xffffffff;
- data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
- }
- }
+ /*
+ * Taps, inits and scaling ratios are in recout space need to rotate
+ * to viewport rotation before adjustment
+ */
+ adjust_vp_and_init_for_seamless_clip(
+ flip_horz_scan_dir,
+ recout_skip_h,
+ surf_size_h,
+ orthogonal_rotation ? data->taps.v_taps : data->taps.h_taps,
+ orthogonal_rotation ? data->ratios.vert : data->ratios.horz,
+ orthogonal_rotation ? &data->inits.v : &data->inits.h,
+ &data->viewport.x,
+ &data->viewport.width);
+ adjust_vp_and_init_for_seamless_clip(
+ flip_horz_scan_dir,
+ recout_skip_h,
+ surf_size_h / vpc_div,
+ orthogonal_rotation ? data->taps.v_taps_c : data->taps.h_taps_c,
+ orthogonal_rotation ? data->ratios.vert_c : data->ratios.horz_c,
+ orthogonal_rotation ? &data->inits.v_c : &data->inits.h_c,
+ &data->viewport_c.x,
+ &data->viewport_c.width);
+ adjust_vp_and_init_for_seamless_clip(
+ flip_vert_scan_dir,
+ recout_skip_v,
+ surf_size_v,
+ orthogonal_rotation ? data->taps.h_taps : data->taps.v_taps,
+ orthogonal_rotation ? data->ratios.horz : data->ratios.vert,
+ orthogonal_rotation ? &data->inits.h : &data->inits.v,
+ &data->viewport.y,
+ &data->viewport.height);
+ adjust_vp_and_init_for_seamless_clip(
+ flip_vert_scan_dir,
+ recout_skip_v,
+ surf_size_v / vpc_div,
+ orthogonal_rotation ? data->taps.h_taps_c : data->taps.v_taps_c,
+ orthogonal_rotation ? data->ratios.horz_c : data->ratios.vert_c,
+ orthogonal_rotation ? &data->inits.h_c : &data->inits.v_c,
+ &data->viewport_c.y,
+ &data->viewport_c.height);
/* Interlaced inits based on final vert inits */
data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
- rect_swap_helper(&data->viewport_c);
- rect_swap_helper(&data->viewport);
- }
}
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
- struct rect recout_full = { 0 };
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Important: scaling ratio calculation requires pixel format,
@@ -1115,9 +924,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
- if (pipe_ctx->stream->timing.flags.INTERLACE)
- pipe_ctx->stream->dst.height *= 2;
-
calculate_scaling_ratios(pipe_ctx);
calculate_viewport(pipe_ctx);
@@ -1125,7 +931,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
return false;
- calculate_recout(pipe_ctx, &recout_full);
+ calculate_recout(pipe_ctx);
/**
* Setting line buffer pixel depth to 24bpp yields banding
@@ -1138,9 +944,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
- if (pipe_ctx->stream->timing.flags.INTERLACE)
- pipe_ctx->plane_res.scl_data.v_active *= 2;
-
/* Taps calculations */
if (pipe_ctx->plane_res.xfm != NULL)
@@ -1169,7 +972,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (res)
/* May need to re-check lb size after this in some obscure scenario */
- calculate_inits_and_adj_vp(pipe_ctx, &recout_full);
+ calculate_inits_and_adj_vp(pipe_ctx);
DC_LOG_SCALER(
"%s: Viewport:\nheight:%d width:%d x:%d "
@@ -1185,9 +988,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
plane_state->dst_rect.x,
plane_state->dst_rect.y);
- if (pipe_ctx->stream->timing.flags.INTERLACE)
- pipe_ctx->stream->dst.height /= 2;
-
return res;
}
@@ -1382,6 +1182,9 @@ bool dc_add_plane_to_context(
return false;
}
+ tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
+ ASSERT(tail_pipe);
+
free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -1399,10 +1202,6 @@ bool dc_add_plane_to_context(
free_pipe->plane_state = plane_state;
if (head_pipe != free_pipe) {
-
- tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
- ASSERT(tail_pipe);
-
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
@@ -1648,6 +1447,14 @@ static bool are_stream_backends_same(
return true;
}
+/**
+ * dc_is_stream_unchanged() - Compare two stream states for equivalence.
+ *
+ * Checks if there a difference between the two states
+ * that would require a mode change.
+ *
+ * Does not compare cursor position or attributes.
+ */
bool dc_is_stream_unchanged(
struct dc_stream_state *old_stream, struct dc_stream_state *stream)
{
@@ -1658,6 +1465,9 @@ bool dc_is_stream_unchanged(
return true;
}
+/**
+ * dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams.
+ */
bool dc_is_stream_scaling_unchanged(
struct dc_stream_state *old_stream, struct dc_stream_state *stream)
{
@@ -1817,16 +1627,19 @@ bool resource_is_stream_unchanged(
return false;
}
+/**
+ * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
+ */
enum dc_status dc_add_stream_to_ctx(
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *stream)
{
- struct dc_context *dc_ctx = dc->ctx;
enum dc_status res;
+ DC_LOGGER_INIT(dc->ctx->logger);
if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
- DC_ERROR("Max streams reached, can't add stream %p !\n", stream);
+ DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
return DC_ERROR_UNEXPECTED;
}
@@ -1836,11 +1649,14 @@ enum dc_status dc_add_stream_to_ctx(
res = dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream);
if (res != DC_OK)
- DC_ERROR("Adding stream %p to context failed with err %d!\n", stream, res);
+ DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
return res;
}
+/**
+ * dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
+ */
enum dc_status dc_remove_stream_from_ctx(
struct dc *dc,
struct dc_state *new_ctx,
@@ -2002,6 +1818,8 @@ enum dc_status resource_map_pool_resources(
}
*/
+ calculate_phy_pix_clks(stream);
+
/* acquire new resources */
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
@@ -2059,6 +1877,12 @@ enum dc_status resource_map_pool_resources(
return DC_ERROR_UNEXPECTED;
}
+/**
+ * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state
+ * Is a shallow copy. Increments refcounts on existing streams and planes.
+ * @dc: copy out of dc->current_state
+ * @dst_ctx: copy into this
+ */
void dc_resource_state_copy_construct_current(
const struct dc *dc,
struct dc_state *dst_ctx)
@@ -2071,9 +1895,17 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx)
{
- dst_ctx->dis_clk = dc->res_pool->dccg;
+ dst_ctx->dccg = dc->res_pool->clk_mgr;
}
+/**
+ * dc_validate_global_state() - Determine if HW can support a given state
+ * Checks HW resource availability and bandwidth requirement.
+ * @dc: dc struct for this driver
+ * @new_ctx: state to be validated
+ *
+ * Return: DC_OK if the result can be programmed. Otherwise, an error code.
+ */
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx)
@@ -2401,113 +2233,15 @@ static void set_vendor_info_packet(
struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
{
- uint32_t length = 0;
- bool hdmi_vic_mode = false;
- uint8_t checksum = 0;
- uint32_t i = 0;
- enum dc_timing_3d_format format;
- // Can be different depending on packet content /*todo*/
- // unsigned int length = pPathMode->dolbyVision ? 24 : 5;
-
- info_packet->valid = false;
-
- format = stream->timing.timing_3d_format;
- if (stream->view_format == VIEW_3D_FORMAT_NONE)
- format = TIMING_3D_FORMAT_NONE;
-
- /* Can be different depending on packet content */
- length = 5;
-
- if (stream->timing.hdmi_vic != 0
- && stream->timing.h_total >= 3840
- && stream->timing.v_total >= 2160)
- hdmi_vic_mode = true;
-
- /* According to HDMI 1.4a CTS, VSIF should be sent
- * for both 3D stereo and HDMI VIC modes.
- * For all other modes, there is no VSIF sent. */
+ /* SPD info packet for FreeSync */
- if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
+ /* Check if Freesync is supported. Return if false. If true,
+ * set the corresponding bit in the info packet
+ */
+ if (!stream->vsp_infopacket.valid)
return;
- /* 24bit IEEE Registration identifier (0x000c03). LSB first. */
- info_packet->sb[1] = 0x03;
- info_packet->sb[2] = 0x0C;
- info_packet->sb[3] = 0x00;
-
- /*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
- * The value for HDMI_Video_Format are:
- * 0x0 (0b000) - No additional HDMI video format is presented in this
- * packet
- * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
- * parameter follows
- * 0x2 (0b010) - 3D format indication present. 3D_Structure and
- * potentially 3D_Ext_Data follows
- * 0x3..0x7 (0b011..0b111) - reserved for future use */
- if (format != TIMING_3D_FORMAT_NONE)
- info_packet->sb[4] = (2 << 5);
- else if (hdmi_vic_mode)
- info_packet->sb[4] = (1 << 5);
-
- /* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
- * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
- * The value for 3D_Structure are:
- * 0x0 - Frame Packing
- * 0x1 - Field Alternative
- * 0x2 - Line Alternative
- * 0x3 - Side-by-Side (full)
- * 0x4 - L + depth
- * 0x5 - L + depth + graphics + graphics-depth
- * 0x6 - Top-and-Bottom
- * 0x7 - Reserved for future use
- * 0x8 - Side-by-Side (Half)
- * 0x9..0xE - Reserved for future use
- * 0xF - Not used */
- switch (format) {
- case TIMING_3D_FORMAT_HW_FRAME_PACKING:
- case TIMING_3D_FORMAT_SW_FRAME_PACKING:
- info_packet->sb[5] = (0x0 << 4);
- break;
-
- case TIMING_3D_FORMAT_SIDE_BY_SIDE:
- case TIMING_3D_FORMAT_SBS_SW_PACKED:
- info_packet->sb[5] = (0x8 << 4);
- length = 6;
- break;
-
- case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
- case TIMING_3D_FORMAT_TB_SW_PACKED:
- info_packet->sb[5] = (0x6 << 4);
- break;
-
- default:
- break;
- }
-
- /*PB5: If PB4 is set to 0x1 (extended resolution format)
- * fill PB5 with the correct HDMI VIC code */
- if (hdmi_vic_mode)
- info_packet->sb[5] = stream->timing.hdmi_vic;
-
- /* Header */
- info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; /* VSIF packet type. */
- info_packet->hb1 = 0x01; /* Version */
-
- /* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
- info_packet->hb2 = (uint8_t) (length);
-
- /* Calculate checksum */
- checksum = 0;
- checksum += info_packet->hb0;
- checksum += info_packet->hb1;
- checksum += info_packet->hb2;
-
- for (i = 1; i <= length; i++)
- checksum += info_packet->sb[i];
-
- info_packet->sb[0] = (uint8_t) (0x100 - checksum);
-
- info_packet->valid = true;
+ *info_packet = stream->vsp_infopacket;
}
static void set_spd_info_packet(
@@ -2563,10 +2297,6 @@ void dc_resource_state_destruct(struct dc_state *context)
}
}
-/*
- * Copy src_ctx into dst_ctx and retain all surfaces and streams referenced
- * by the src_ctx
- */
void dc_resource_state_copy_construct(
const struct dc_state *src_ctx,
struct dc_state *dst_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 2ac848a106ba..66e5c4623a49 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -100,12 +100,11 @@ static void construct(struct dc_stream_state *stream,
/* EDID CAP translation for HDMI 2.0 */
stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
- stream->status.link = stream->sink->link;
-
update_stream_signal(stream);
stream->out_transfer_func = dc_create_transfer_func();
stream->out_transfer_func->type = TF_TYPE_BYPASS;
+ stream->out_transfer_func->ctx = stream->ctx;
}
static void destruct(struct dc_stream_state *stream)
@@ -171,7 +170,7 @@ struct dc_stream_status *dc_stream_get_status(
}
/**
- * Update the cursor attributes and set cursor surface address
+ * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
bool dc_stream_set_cursor_attributes(
struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 8fb3aefd195c..c60c9b4c3075 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -44,6 +44,7 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
plane_state->in_transfer_func = dc_create_transfer_func();
plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
+ plane_state->in_transfer_func->ctx = ctx;
}
static void destruct(struct dc_plane_state *plane_state)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 199527171100..4b5bbb13ce7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -36,9 +36,10 @@
#include "inc/hw_sequencer.h"
#include "inc/compressor.h"
+#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.1.68"
+#define DC_VER "3.2.08"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
@@ -47,13 +48,6 @@
/*******************************************************************************
* Display Core Interfaces
******************************************************************************/
-struct dmcu_version {
- unsigned int date;
- unsigned int month;
- unsigned int year;
- unsigned int interface_version;
-};
-
struct dc_versions {
const char *dc_ver;
struct dmcu_version dmcu_version;
@@ -169,6 +163,7 @@ struct link_training_settings;
struct dc_config {
bool gpu_vm_support;
bool disable_disp_pll_sharing;
+ bool fbc_support;
};
enum visual_confirm {
@@ -249,8 +244,6 @@ struct dc_debug_options {
bool disable_dmcu;
bool disable_psr;
bool force_abm_enable;
- bool disable_hbup_pg;
- bool disable_dpp_pg;
bool disable_stereo_support;
bool vsr_support;
bool performance_trace;
@@ -304,11 +297,6 @@ struct dc {
struct hw_sequencer_funcs hwss;
struct dce_hwseq *hwseq;
- /* temp store of dm_pp_display_configuration
- * to compare to see if display config changed
- */
- struct dm_pp_display_configuration prev_display_config;
-
bool optimized_required;
/* FBC compressor */
@@ -754,5 +742,6 @@ void dc_set_power_state(
struct dc *dc,
enum dc_acpi_cm_power_state power_state);
void dc_resume(struct dc *dc);
+bool dc_is_dmcu_initialized(struct dc *dc);
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 8130b95ccc53..a8b3cedf9431 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -86,6 +86,10 @@ struct dc_vbios_funcs {
bool (*is_accelerated_mode)(
struct dc_bios *bios);
+ bool (*is_active_display)(
+ struct dc_bios *bios,
+ enum signal_type signal,
+ const struct connector_device_tag_info *device_tag);
void (*set_scratch_critical_state)(
struct dc_bios *bios,
bool state);
@@ -141,6 +145,7 @@ struct dc_vbios_funcs {
};
struct bios_registers {
+ uint32_t BIOS_SCRATCH_0;
uint32_t BIOS_SCRATCH_3;
uint32_t BIOS_SCRATCH_6;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 7825e4b5e97c..9ddfe4c6938b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -358,15 +358,16 @@ union dc_tiling_info {
} gfx8;
struct {
+ enum swizzle_mode_values swizzle;
unsigned int num_pipes;
- unsigned int num_banks;
+ unsigned int max_compressed_frags;
unsigned int pipe_interleave;
+
+ unsigned int num_banks;
unsigned int num_shader_engines;
unsigned int num_rb_per_se;
- unsigned int max_compressed_frags;
bool shaderEnable;
- enum swizzle_mode_values swizzle;
bool meta_linear;
bool rb_aligned;
bool pipe_aligned;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 3bfdccceb524..29f19d57ff7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -128,8 +128,10 @@ struct dc_link {
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
-/*
- * Return an enumerated dc_link. dc_link order is constant and determined at
+/**
+ * dc_get_link_at_index() - Return an enumerated dc_link.
+ *
+ * dc_link order is constant and determined at
* boot time. They cannot be created or destroyed.
* Use dc_get_caps() to get number of links.
*/
@@ -138,9 +140,14 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
return dc->links[link_index];
}
-/* Set backlight level of an embedded panel (eDP, LVDS). */
-bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
- uint32_t frame_ramp, const struct dc_stream_state *stream);
+/* Set backlight level of an embedded panel (eDP, LVDS).
+ * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
+ * and 16 bit fractional, where 1.0 is max backlight value.
+ */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp,
+ const struct dc_stream_state *stream);
int dc_link_get_backlight_level(const struct dc_link *dc_link);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index c5bd1fbb6982..be34d638e15d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -56,6 +56,7 @@ struct dc_stream_state {
struct dc_crtc_timing_adjust adjust;
struct dc_info_packet vrr_infopacket;
struct dc_info_packet vsc_infopacket;
+ struct dc_info_packet vsp_infopacket;
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -104,8 +105,6 @@ struct dc_stream_state {
bool dpms_off;
bool apply_edp_fast_boot_optimization;
- struct dc_stream_status status;
-
struct dc_cursor_attributes cursor_attributes;
struct dc_cursor_position cursor_position;
uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
@@ -131,11 +130,13 @@ struct dc_stream_update {
struct dc_crtc_timing_adjust *adjust;
struct dc_info_packet *vrr_infopacket;
struct dc_info_packet *vsc_infopacket;
+ struct dc_info_packet *vsp_infopacket;
bool *dpms_off;
struct colorspace_transform *gamut_remap;
enum dc_color_space *output_color_space;
+ enum dc_dither_option *dither_option;
struct dc_csc_transform *output_csc_transform;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 6e12d640d020..0b20ae23f169 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -73,10 +73,18 @@ struct hw_asic_id {
void *atombios_base_address;
};
+struct dc_perf_trace {
+ unsigned long read_count;
+ unsigned long write_count;
+ unsigned long last_entry_read;
+ unsigned long last_entry_write;
+};
+
struct dc_context {
struct dc *dc;
void *driver_context; /* e.g. amdgpu_device */
+ struct dc_perf_trace *perf_trace;
void *cgs_device;
enum dce_environment dce_environment;
@@ -191,7 +199,6 @@ union display_content_support {
};
struct dc_panel_patch {
- unsigned int disconnect_delay;
unsigned int dppowerup_delay;
unsigned int extra_t12_ms;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 8f7f0e8b341f..6d7b64a743ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -28,7 +28,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
+dce_clk_mgr.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 29294db1a96b..2a342eae80fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -54,7 +54,7 @@
#define MCP_DISABLE_ABM_IMMEDIATELY 255
-static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
+static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
{
uint64_t current_backlight;
uint32_t round_result;
@@ -103,45 +103,21 @@ static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
return (uint32_t)(current_backlight);
}
-static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
+static void driver_set_backlight_level(struct dce_abm *abm_dce,
+ uint32_t backlight_pwm_u16_16)
{
- uint32_t backlight_24bit;
- uint32_t backlight_17bit;
uint32_t backlight_16bit;
uint32_t masked_pwm_period;
- uint8_t rounding_bit;
uint8_t bit_count;
uint64_t active_duty_cycle;
uint32_t pwm_period_bitcnt;
/*
- * 1. Convert 8-bit value to 17 bit U1.16 format
- * (1 integer, 16 fractional bits)
- */
-
- /* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value,
- * effectively multiplying value by 256/255
- * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF
- */
- backlight_24bit = level * 0x10101;
-
- /* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8
- * used for rounding, take most significant bit of fraction for
- * rounding, e.g. for 0xEFEFEF, rounding bit is 1
- */
- rounding_bit = (backlight_24bit >> 7) & 1;
-
- /* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit
- * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1
- */
- backlight_17bit = (backlight_24bit >> 8) + rounding_bit;
-
- /*
- * 2. Find 16 bit backlight active duty cycle, where 0 <= backlight
+ * 1. Find 16 bit backlight active duty cycle, where 0 <= backlight
* active duty cycle <= backlight period
*/
- /* 2.1 Apply bitmask for backlight period value based on value of BITCNT
+ /* 1.1 Apply bitmask for backlight period value based on value of BITCNT
*/
REG_GET_2(BL_PWM_PERIOD_CNTL,
BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
@@ -155,13 +131,13 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
/* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
- /* 2.2 Calculate integer active duty cycle required upper 16 bits
+ /* 1.2 Calculate integer active duty cycle required upper 16 bits
* contain integer component, lower 16 bits contain fractional component
* of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
*/
- active_duty_cycle = backlight_17bit * masked_pwm_period;
+ active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
- /* 2.3 Calculate 16 bit active duty cycle from integer and fractional
+ /* 1.3 Calculate 16 bit active duty cycle from integer and fractional
* components shift by bitCount then mask 16 bits and add rounding bit
* from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
*/
@@ -170,23 +146,23 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
/*
- * 3. Program register with updated value
+ * 2. Program register with updated value
*/
- /* 3.1 Lock group 2 backlight registers */
+ /* 2.1 Lock group 2 backlight registers */
REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
BL_PWM_GRP1_REG_LOCK, 1);
- // 3.2 Write new active duty cycle
+ // 2.2 Write new active duty cycle
REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
- /* 3.3 Unlock group 2 backlight registers */
+ /* 2.3 Unlock group 2 backlight registers */
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
- /* 5.4.4 Wait for pending bit to be cleared */
+ /* 3 Wait for pending bit to be cleared */
REG_WAIT(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
1, 10000);
@@ -194,16 +170,21 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
static void dmcu_set_backlight_level(
struct dce_abm *abm_dce,
- uint32_t level,
+ uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp,
uint32_t controller_id)
{
- unsigned int backlight_16_bit = (level * 0x10101) >> 8;
- unsigned int backlight_17_bit = backlight_16_bit +
- (((backlight_16_bit & 0x80) >> 7) & 1);
+ unsigned int backlight_8_bit = 0;
uint32_t rampingBoundary = 0xFFFF;
uint32_t s2;
+ if (backlight_pwm_u16_16 & 0x10000)
+ // Check for max backlight condition
+ backlight_8_bit = 0xFF;
+ else
+ // Take MSB of fractional part since backlight is not max
+ backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
+
/* set ramping boundary */
REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
@@ -220,7 +201,7 @@ static void dmcu_set_backlight_level(
0, 1, 80000);
/* setDMCUParam_BL */
- REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17_bit);
+ REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16);
/* write ramp */
if (controller_id == 0)
@@ -237,9 +218,9 @@ static void dmcu_set_backlight_level(
s2 = REG_READ(BIOS_SCRATCH_2);
s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
- level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+ backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
- s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+ s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
REG_WRITE(BIOS_SCRATCH_2, s2);
}
@@ -247,7 +228,7 @@ static void dmcu_set_backlight_level(
static void dce_abm_init(struct abm *abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- unsigned int backlight = get_current_backlight_16_bit(abm_dce);
+ unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
@@ -284,12 +265,26 @@ static void dce_abm_init(struct abm *abm)
ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
}
-static unsigned int dce_abm_get_current_backlight_8_bit(struct abm *abm)
+static unsigned int dce_abm_get_current_backlight(struct abm *abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
- return (backlight >> 8);
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
+}
+
+static unsigned int dce_abm_get_target_backlight(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
}
static bool dce_abm_set_level(struct abm *abm, uint32_t level)
@@ -396,9 +391,9 @@ static bool dce_abm_init_backlight(struct abm *abm)
return true;
}
-static bool dce_abm_set_backlight_level(
+static bool dce_abm_set_backlight_level_pwm(
struct abm *abm,
- unsigned int backlight_level,
+ unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
unsigned int controller_id,
bool use_smooth_brightness)
@@ -406,16 +401,16 @@ static bool dce_abm_set_backlight_level(
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
- backlight_level, backlight_level);
+ backlight_pwm_u16_16, backlight_pwm_u16_16);
/* If DMCU is in reset state, DMCU is uninitialized */
if (use_smooth_brightness)
dmcu_set_backlight_level(abm_dce,
- backlight_level,
+ backlight_pwm_u16_16,
frame_ramp,
controller_id);
else
- driver_set_backlight_level(abm_dce, backlight_level);
+ driver_set_backlight_level(abm_dce, backlight_pwm_u16_16);
return true;
}
@@ -424,8 +419,9 @@ static const struct abm_funcs dce_funcs = {
.abm_init = dce_abm_init,
.set_abm_level = dce_abm_set_level,
.init_backlight = dce_abm_init_backlight,
- .set_backlight_level = dce_abm_set_backlight_level,
- .get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit,
+ .set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
+ .get_current_backlight = dce_abm_get_current_backlight,
+ .get_target_backlight = dce_abm_get_target_backlight,
.set_abm_immediate_disable = dce_abm_immediate_disable
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
new file mode 100644
index 000000000000..bd22f51813bf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -0,0 +1,884 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_clk_mgr.h"
+
+#include "reg_helper.h"
+#include "dmcu.h"
+#include "core_types.h"
+#include "dal_asic_id.h"
+
+#define TO_DCE_CLK_MGR(clocks)\
+ container_of(clocks, struct dce_clk_mgr, base)
+
+#define REG(reg) \
+ (clk_mgr_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
+
+#define CTX \
+ clk_mgr_dce->base.ctx
+#define DC_LOGGER \
+ clk_mgr->ctx->logger
+
+/* Max clock values for each state indexed by "enum clocks_state": */
+static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+/* ClocksStateInvalid - should not be used */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateLow */
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+/* ClocksStateNominal */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+/* ClocksStatePerformance */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
+
+static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
+
+int dentist_get_divider_from_did(int did)
+{
+ if (did < DENTIST_BASE_DID_1)
+ did = DENTIST_BASE_DID_1;
+ if (did > DENTIST_MAX_DID)
+ did = DENTIST_MAX_DID;
+
+ if (did < DENTIST_BASE_DID_2) {
+ return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
+ * (did - DENTIST_BASE_DID_1);
+ } else if (did < DENTIST_BASE_DID_3) {
+ return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
+ * (did - DENTIST_BASE_DID_2);
+ } else if (did < DENTIST_BASE_DID_4) {
+ return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
+ * (did - DENTIST_BASE_DID_3);
+ } else {
+ return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
+ * (did - DENTIST_BASE_DID_4);
+ }
+}
+
+/* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+ if clock is spread for all cases:
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+ calculations (not planned to be used, but average clock should still
+ be valid)
+ -if SS enabled on DP Ref clock and HW de-spreading disabled
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+ */
+static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz)
+{
+ if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) {
+ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+ dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage,
+ clk_mgr_dce->dprefclk_ss_divider), 200);
+ struct fixed31_32 adj_dp_ref_clk_khz;
+
+ ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+ adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+ }
+ return dp_ref_clk_khz;
+}
+
+static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ int dprefclk_wdivider;
+ int dprefclk_src_sel;
+ int dp_ref_clk_khz = 600000;
+ int target_div;
+
+ /* ASSERT DP Reference Clock source is from DFS*/
+ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
+ ASSERT(dprefclk_src_sel == 0);
+
+ /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+ * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+
+ /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+ target_div = dentist_get_divider_from_did(dprefclk_wdivider);
+
+ /* Calculate the current DFS clock, in kHz.*/
+ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr_dce->dentist_vco_freq_khz) / target_div;
+
+ return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz);
+}
+
+int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+
+ return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz);
+}
+
+/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+ * may not be programmed yet
+ */
+static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
+{
+ uint32_t max_pix_clk = 0;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ /* do not check under lay */
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+
+ /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
+ * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+ */
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
+ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
+ }
+
+ return max_pix_clk;
+}
+
+static enum dm_pp_clocks_state dce_get_required_clocks_state(
+ struct clk_mgr *clk_mgr,
+ struct dc_state *context)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ int i;
+ enum dm_pp_clocks_state low_req_clk;
+ int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+
+ /* Iterate from highest supported to lowest valid state, and update
+ * lowest RequiredState with the lowest state that satisfies
+ * all required clocks
+ */
+ for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+ if (context->bw.dce.dispclk_khz >
+ clk_mgr_dce->max_clks_by_state[i].display_clk_khz
+ || max_pix_clk >
+ clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
+ break;
+
+ low_req_clk = i + 1;
+ if (low_req_clk > clk_mgr_dce->max_clks_state) {
+ /* set max clock state for high phyclock, invalid on exceeding display clock */
+ if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
+ < context->bw.dce.dispclk_khz)
+ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ else
+ low_req_clk = clk_mgr_dce->max_clks_state;
+ }
+
+ return low_req_clk;
+}
+
+static int dce_set_clock(
+ struct clk_mgr *clk_mgr,
+ int requested_clk_khz)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+ struct dc_bios *bp = clk_mgr->ctx->dc_bios;
+ int actual_clock = requested_clk_khz;
+ struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu;
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+ clk_mgr_dce->dentist_vco_freq_khz / 64);
+
+ /* Prepare to program display clock*/
+ pxl_clk_params.target_pixel_clock = requested_clk_khz;
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+
+ if (clk_mgr_dce->dfs_bypass_active)
+ pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
+
+ bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+
+ if (clk_mgr_dce->dfs_bypass_active) {
+ /* Cache the fixed display clock*/
+ clk_mgr_dce->dfs_bypass_disp_clk =
+ pxl_clk_params.dfs_bypass_display_clock;
+ actual_clock = pxl_clk_params.dfs_bypass_display_clock;
+ }
+
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
+ dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
+
+ return actual_clock;
+}
+
+int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct bp_set_dce_clock_parameters dce_clk_params;
+ struct dc_bios *bp = clk_mgr->ctx->dc_bios;
+ struct dc *core_dc = clk_mgr->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int actual_clock = requested_clk_khz;
+ /* Prepare to program display clock*/
+ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+ clk_mgr_dce->dentist_vco_freq_khz / 62);
+
+ dce_clk_params.target_clock_frequency = requested_clk_khz;
+ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+ actual_clock = dce_clk_params.target_clock_frequency;
+
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+ /*Program DP ref Clock*/
+ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+ dce_clk_params.target_clock_frequency = 0;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+ if (!ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev))
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+ (dce_clk_params.pll_id ==
+ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+ else
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+ if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_clock / 1000 / 7);
+ }
+ }
+
+ clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
+ return actual_clock;
+}
+
+static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce)
+{
+ struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug;
+ struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
+ struct integrated_info info = { { { 0 } } };
+ struct dc_firmware_info fw_info = { { 0 } };
+ int i;
+
+ if (bp->integrated_info)
+ info = *bp->integrated_info;
+
+ clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
+ if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
+ bp->funcs->get_firmware_info(bp, &fw_info);
+ clk_mgr_dce->dentist_vco_freq_khz =
+ fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr_dce->dentist_vco_freq_khz == 0)
+ clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+ }
+
+ /*update the maximum display clock for each power state*/
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ switch (i) {
+ case 0:
+ clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
+ break;
+
+ case 1:
+ clk_state = DM_PP_CLOCKS_STATE_LOW;
+ break;
+
+ case 2:
+ clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ break;
+
+ case 3:
+ clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
+ break;
+
+ default:
+ clk_state = DM_PP_CLOCKS_STATE_INVALID;
+ break;
+ }
+
+ /*Do not allow bad VBIOS/SBIOS to override with invalid values,
+ * check for > 100MHz*/
+ if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
+ clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
+ info.disp_clk_voltage[i].max_supported_clk;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_mgr_dce->dfs_bypass_enabled = true;
+}
+
+void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce)
+{
+ struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
+ int ss_info_num = bp->funcs->get_ss_entry_number(
+ bp, AS_SIGNAL_TYPE_GPU_PLL);
+
+ if (ss_info_num) {
+ struct spread_spectrum_info info = { { 0 } };
+ enum bp_result result = bp->funcs->get_spread_spectrum_info(
+ bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
+
+ /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
+ * even if SS not enabled and in that case
+ * SSInfo.spreadSpectrumPercentage !=0 would be sign
+ * that SS is enabled
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+ clk_mgr_dce->ss_on_dprefclk = true;
+ clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* TODO: Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+ clk_mgr_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+
+ return;
+ }
+
+ result = bp->funcs->get_spread_spectrum_info(
+ bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
+
+ /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
+ * even if SS not enabled and in that case
+ * SSInfo.spreadSpectrumPercentage !=0 would be sign
+ * that SS is enabled
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+ clk_mgr_dce->ss_on_dprefclk = true;
+ clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+ clk_mgr_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+ }
+ }
+}
+
+void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg)
+{
+ int j;
+ int num_cfgs = 0;
+
+ for (j = 0; j < context->stream_count; j++) {
+ int k;
+
+ const struct dc_stream_state *stream = context->streams[j];
+ struct dm_pp_single_disp_config *cfg =
+ &pp_display_cfg->disp_configs[num_cfgs];
+ const struct pipe_ctx *pipe_ctx = NULL;
+
+ for (k = 0; k < MAX_PIPES; k++)
+ if (stream == context->res_ctx.pipe_ctx[k].stream) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[k];
+ break;
+ }
+
+ ASSERT(pipe_ctx != NULL);
+
+ /* only notify active stream */
+ if (stream->dpms_off)
+ continue;
+
+ num_cfgs++;
+ cfg->signal = pipe_ctx->stream->signal;
+ cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
+ cfg->src_height = stream->src.height;
+ cfg->src_width = stream->src.width;
+ cfg->ddi_channel_mapping =
+ stream->sink->link->ddi_channel_mapping.raw;
+ cfg->transmitter =
+ stream->sink->link->link_enc->transmitter;
+ cfg->link_settings.lane_count =
+ stream->sink->link->cur_link_settings.lane_count;
+ cfg->link_settings.link_rate =
+ stream->sink->link->cur_link_settings.link_rate;
+ cfg->link_settings.link_spread =
+ stream->sink->link->cur_link_settings.link_spread;
+ cfg->sym_clock = stream->phy_pix_clk;
+ /* Round v_refresh*/
+ cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
+ cfg->v_refresh /= stream->timing.h_total;
+ cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+ / stream->timing.v_total;
+ }
+
+ pp_display_cfg->display_count = num_cfgs;
+}
+
+static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
+{
+ uint8_t j;
+ uint32_t min_vertical_blank_time = -1;
+
+ for (j = 0; j < context->stream_count; j++) {
+ struct dc_stream_state *stream = context->streams[j];
+ uint32_t vertical_blank_in_pixels = 0;
+ uint32_t vertical_blank_time = 0;
+
+ vertical_blank_in_pixels = stream->timing.h_total *
+ (stream->timing.v_total
+ - stream->timing.v_addressable);
+
+ vertical_blank_time = vertical_blank_in_pixels
+ * 1000 / stream->timing.pix_clk_khz;
+
+ if (min_vertical_blank_time > vertical_blank_time)
+ min_vertical_blank_time = vertical_blank_time;
+ }
+
+ return min_vertical_blank_time;
+}
+
+static int determine_sclk_from_bounding_box(
+ const struct dc *dc,
+ int required_sclk)
+{
+ int i;
+
+ /*
+ * Some asics do not give us sclk levels, so we just report the actual
+ * required sclk
+ */
+ if (dc->sclk_lvls.num_levels == 0)
+ return required_sclk;
+
+ for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+ if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+ return dc->sclk_lvls.clocks_in_khz[i];
+ }
+ /*
+ * even maximum level could not satisfy requirement, this
+ * is unexpected at this stage, should have been caught at
+ * validation time
+ */
+ ASSERT(0);
+ return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+}
+
+static void dce_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static void dce11_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->all_displays_in_sync =
+ context->bw.dce.all_displays_in_sync;
+ pp_display_cfg->nb_pstate_switch_disable =
+ context->bw.dce.nbp_state_change_enable == false;
+ pp_display_cfg->cpu_cc6_disable =
+ context->bw.dce.cpuc_state_change_enable == false;
+ pp_display_cfg->cpu_pstate_disable =
+ context->bw.dce.cpup_state_change_enable == false;
+ pp_display_cfg->cpu_pstate_separation_time =
+ context->bw.dce.blackout_recovery_time_us;
+
+ pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+ / MEMORY_TYPE_MULTIPLIER_CZ;
+
+ pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+ dc,
+ context->bw.dce.sclk_khz);
+
+ pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
+
+ pp_display_cfg->min_engine_clock_deep_sleep_khz
+ = context->bw.dce.sclk_deep_sleep_khz;
+
+ pp_display_cfg->avail_mclk_switch_time_us =
+ dce110_get_min_vblank_time_us(context);
+ /* TODO: dce11.2*/
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+
+ pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ /* TODO: is this still applicable?*/
+ if (pp_display_cfg->display_count == 1) {
+ const struct dc_crtc_timing *timing =
+ &context->streams[0]->timing;
+
+ pp_display_cfg->crtc_index =
+ pp_display_cfg->disp_configs[0].pipe_idx;
+ pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz;
+ }
+
+ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static void dce_update_clocks(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct dm_pp_power_level_change_request level_change_req;
+ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+ }
+
+ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+ context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+ dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+
+ context->bw.dce.dispclk_khz = unpatched_disp_clk;
+}
+
+static void dce11_update_clocks(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct dm_pp_power_level_change_request level_change_req;
+
+ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+ }
+
+ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+ context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+}
+
+static void dce112_update_clocks(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct dm_pp_power_level_change_request level_change_req;
+
+ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+ }
+
+ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+ context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+}
+
+static void dce12_update_clocks(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+ int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz;
+ context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
+ }
+
+ if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+ clock_voltage_req.clocks_in_khz = max_pix_clk;
+ clk_mgr->clks.phyclk_khz = max_pix_clk;
+
+ dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
+ }
+ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+
+ context->bw.dce.dispclk_khz = unpatched_disp_clk;
+}
+
+static const struct clk_mgr_funcs dce120_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dce12_update_clocks
+};
+
+static const struct clk_mgr_funcs dce112_funcs = {
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .update_clocks = dce112_update_clocks
+};
+
+static const struct clk_mgr_funcs dce110_funcs = {
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .update_clocks = dce11_update_clocks,
+};
+
+static const struct clk_mgr_funcs dce_funcs = {
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .update_clocks = dce_update_clocks
+};
+
+static void dce_clk_mgr_construct(
+ struct dce_clk_mgr *clk_mgr_dce,
+ struct dc_context *ctx,
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask)
+{
+ struct clk_mgr *base = &clk_mgr_dce->base;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+
+ clk_mgr_dce->regs = regs;
+ clk_mgr_dce->clk_mgr_shift = clk_shift;
+ clk_mgr_dce->clk_mgr_mask = clk_mask;
+
+ clk_mgr_dce->dfs_bypass_disp_clk = 0;
+
+ clk_mgr_dce->dprefclk_ss_percentage = 0;
+ clk_mgr_dce->dprefclk_ss_divider = 1000;
+ clk_mgr_dce->ss_on_dprefclk = false;
+
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state;
+ else
+ clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ dce_clock_read_integrated_info(clk_mgr_dce);
+ dce_clock_read_ss_info(clk_mgr_dce);
+}
+
+struct clk_mgr *dce_clk_mgr_create(
+ struct dc_context *ctx,
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask)
+{
+ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+ if (clk_mgr_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_mgr_dce->max_clks_by_state,
+ dce80_max_clks_by_state,
+ sizeof(dce80_max_clks_by_state));
+
+ dce_clk_mgr_construct(
+ clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+ return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce110_clk_mgr_create(
+ struct dc_context *ctx,
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask)
+{
+ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+ if (clk_mgr_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_mgr_dce->max_clks_by_state,
+ dce110_max_clks_by_state,
+ sizeof(dce110_max_clks_by_state));
+
+ dce_clk_mgr_construct(
+ clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+ clk_mgr_dce->base.funcs = &dce110_funcs;
+
+ return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce112_clk_mgr_create(
+ struct dc_context *ctx,
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask)
+{
+ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+ if (clk_mgr_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_mgr_dce->max_clks_by_state,
+ dce112_max_clks_by_state,
+ sizeof(dce112_max_clks_by_state));
+
+ dce_clk_mgr_construct(
+ clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+ clk_mgr_dce->base.funcs = &dce112_funcs;
+
+ return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx)
+{
+ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+ if (clk_mgr_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_mgr_dce->max_clks_by_state,
+ dce120_max_clks_by_state,
+ sizeof(dce120_max_clks_by_state));
+
+ dce_clk_mgr_construct(
+ clk_mgr_dce, ctx, NULL, NULL, NULL);
+
+ clk_mgr_dce->dprefclk_khz = 600000;
+ clk_mgr_dce->base.funcs = &dce120_funcs;
+
+ return &clk_mgr_dce->base;
+}
+
+void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr);
+
+ kfree(clk_mgr_dce);
+ *clk_mgr = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
index 34fdb386c884..3bceb31d910d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
@@ -24,10 +24,13 @@
*/
-#ifndef _DCE_CLOCKS_H_
-#define _DCE_CLOCKS_H_
+#ifndef _DCE_CLK_MGR_H_
+#define _DCE_CLK_MGR_H_
-#include "display_clock.h"
+#include "clk_mgr.h"
+#include "dccg.h"
+
+#define MEMORY_TYPE_MULTIPLIER_CZ 4
#define CLK_COMMON_REG_LIST_DCE_BASE() \
.DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
@@ -53,24 +56,31 @@
type DENTIST_DISPCLK_WDIVIDER; \
type DENTIST_DISPCLK_CHG_DONE;
-struct dccg_shift {
+struct clk_mgr_shift {
CLK_REG_FIELD_LIST(uint8_t)
};
-struct dccg_mask {
+struct clk_mgr_mask {
CLK_REG_FIELD_LIST(uint32_t)
};
-struct dccg_registers {
+struct clk_mgr_registers {
uint32_t DPREFCLK_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
};
-struct dce_dccg {
- struct dccg base;
- const struct dccg_registers *regs;
- const struct dccg_shift *clk_shift;
- const struct dccg_mask *clk_mask;
+struct state_dependent_clocks {
+ int display_clk_khz;
+ int pixel_clk_khz;
+};
+
+struct dce_clk_mgr {
+ struct clk_mgr base;
+ const struct clk_mgr_registers *regs;
+ const struct clk_mgr_shift *clk_mgr_shift;
+ const struct clk_mgr_mask *clk_mgr_mask;
+
+ struct dccg *dccg;
struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
@@ -91,33 +101,70 @@ struct dce_dccg {
/* DPREFCLK SS percentage Divider (100 or 1000) */
int dprefclk_ss_divider;
int dprefclk_khz;
+
+ enum dm_pp_clocks_state max_clks_state;
+ enum dm_pp_clocks_state cur_min_clks_state;
+};
+
+/* Starting DID for each range */
+enum dentist_base_divider_id {
+ DENTIST_BASE_DID_1 = 0x08,
+ DENTIST_BASE_DID_2 = 0x40,
+ DENTIST_BASE_DID_3 = 0x60,
+ DENTIST_BASE_DID_4 = 0x7e,
+ DENTIST_MAX_DID = 0x7f
};
+/* Starting point and step size for each divider range.*/
+enum dentist_divider_range {
+ DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
+ DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
+ DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
+ DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
+ DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
+ DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
+ DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
+ DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
+ DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
+};
+
+static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+{
+ return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+}
+
+void dce_clock_read_ss_info(struct dce_clk_mgr *dccg_dce);
+
+int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg);
+
+void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg);
+
+int dce112_set_clock(struct clk_mgr *dccg, int requested_clk_khz);
-struct dccg *dce_dccg_create(
+struct clk_mgr *dce_clk_mgr_create(
struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask);
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask);
-struct dccg *dce110_dccg_create(
+struct clk_mgr *dce110_clk_mgr_create(
struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask);
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask);
-struct dccg *dce112_dccg_create(
+struct clk_mgr *dce112_clk_mgr_create(
struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask);
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask);
-struct dccg *dce120_dccg_create(struct dc_context *ctx);
+struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx);
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-struct dccg *dcn1_dccg_create(struct dc_context *ctx);
-#endif
+void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr);
-void dce_dccg_destroy(struct dccg **dccg);
+int dentist_get_divider_from_did(int did);
-#endif /* _DCE_CLOCKS_H_ */
+#endif /* _DCE_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
deleted file mode 100644
index d89a097ba936..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ /dev/null
@@ -1,947 +0,0 @@
-/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "dce_clocks.h"
-#include "dm_services.h"
-#include "reg_helper.h"
-#include "fixed31_32.h"
-#include "bios_parser_interface.h"
-#include "dc.h"
-#include "dmcu.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-#include "dcn_calcs.h"
-#endif
-#include "core_types.h"
-#include "dc_types.h"
-#include "dal_asic_id.h"
-
-#define TO_DCE_CLOCKS(clocks)\
- container_of(clocks, struct dce_dccg, base)
-
-#define REG(reg) \
- (clk_dce->regs->reg)
-
-#undef FN
-#define FN(reg_name, field_name) \
- clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name
-
-#define CTX \
- clk_dce->base.ctx
-#define DC_LOGGER \
- clk->ctx->logger
-
-/* Max clock values for each state indexed by "enum clocks_state": */
-static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
-/* ClocksStateInvalid - should not be used */
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/* ClocksStateLow */
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
-/* ClocksStateNominal */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
-/* ClocksStatePerformance */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
-
-static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
-
-static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
-
-static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
-
-/* Starting DID for each range */
-enum dentist_base_divider_id {
- DENTIST_BASE_DID_1 = 0x08,
- DENTIST_BASE_DID_2 = 0x40,
- DENTIST_BASE_DID_3 = 0x60,
- DENTIST_BASE_DID_4 = 0x7e,
- DENTIST_MAX_DID = 0x7f
-};
-
-/* Starting point and step size for each divider range.*/
-enum dentist_divider_range {
- DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
- DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
- DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
- DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
- DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
- DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
- DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
- DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
- DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
-};
-
-static int dentist_get_divider_from_did(int did)
-{
- if (did < DENTIST_BASE_DID_1)
- did = DENTIST_BASE_DID_1;
- if (did > DENTIST_MAX_DID)
- did = DENTIST_MAX_DID;
-
- if (did < DENTIST_BASE_DID_2) {
- return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
- * (did - DENTIST_BASE_DID_1);
- } else if (did < DENTIST_BASE_DID_3) {
- return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
- * (did - DENTIST_BASE_DID_2);
- } else if (did < DENTIST_BASE_DID_4) {
- return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
- * (did - DENTIST_BASE_DID_3);
- } else {
- return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
- * (did - DENTIST_BASE_DID_4);
- }
-}
-
-/* SW will adjust DP REF Clock average value for all purposes
- * (DP DTO / DP Audio DTO and DP GTC)
- if clock is spread for all cases:
- -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
- calculations for DS_INCR/DS_MODULO (this is planned to be default case)
- -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
- calculations (not planned to be used, but average clock should still
- be valid)
- -if SS enabled on DP Ref clock and HW de-spreading disabled
- (should not be case with CIK) then SW should program all rates
- generated according to average value (case as with previous ASICs)
- */
-static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz)
-{
- if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
- struct fixed31_32 ss_percentage = dc_fixpt_div_int(
- dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
- clk_dce->dprefclk_ss_divider), 200);
- struct fixed31_32 adj_dp_ref_clk_khz;
-
- ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
- adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
- dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
- }
- return dp_ref_clk_khz;
-}
-
-static int dce_get_dp_ref_freq_khz(struct dccg *clk)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
- int dprefclk_wdivider;
- int dprefclk_src_sel;
- int dp_ref_clk_khz = 600000;
- int target_div;
-
- /* ASSERT DP Reference Clock source is from DFS*/
- REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
- ASSERT(dprefclk_src_sel == 0);
-
- /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
- * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
- REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
-
- /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
- target_div = dentist_get_divider_from_did(dprefclk_wdivider);
-
- /* Calculate the current DFS clock, in kHz.*/
- dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_dce->dentist_vco_freq_khz) / target_div;
-
- return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
-}
-
-static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-
- return dccg_adjust_dp_ref_freq_for_ss(clk_dce, clk_dce->dprefclk_khz);
-}
-
-static enum dm_pp_clocks_state dce_get_required_clocks_state(
- struct dccg *clk,
- struct dc_clocks *req_clocks)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
- int i;
- enum dm_pp_clocks_state low_req_clk;
-
- /* Iterate from highest supported to lowest valid state, and update
- * lowest RequiredState with the lowest state that satisfies
- * all required clocks
- */
- for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
- if (req_clocks->dispclk_khz >
- clk_dce->max_clks_by_state[i].display_clk_khz
- || req_clocks->phyclk_khz >
- clk_dce->max_clks_by_state[i].pixel_clk_khz)
- break;
-
- low_req_clk = i + 1;
- if (low_req_clk > clk->max_clks_state) {
- /* set max clock state for high phyclock, invalid on exceeding display clock */
- if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
- < req_clocks->dispclk_khz)
- low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
- else
- low_req_clk = clk->max_clks_state;
- }
-
- return low_req_clk;
-}
-
-static int dce_set_clock(
- struct dccg *clk,
- int requested_clk_khz)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
- struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
- struct dc_bios *bp = clk->ctx->dc_bios;
- int actual_clock = requested_clk_khz;
-
- /* Make sure requested clock isn't lower than minimum threshold*/
- if (requested_clk_khz > 0)
- requested_clk_khz = max(requested_clk_khz,
- clk_dce->dentist_vco_freq_khz / 64);
-
- /* Prepare to program display clock*/
- pxl_clk_params.target_pixel_clock = requested_clk_khz;
- pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
-
- if (clk_dce->dfs_bypass_active)
- pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
-
- bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
-
- if (clk_dce->dfs_bypass_active) {
- /* Cache the fixed display clock*/
- clk_dce->dfs_bypass_disp_clk =
- pxl_clk_params.dfs_bypass_display_clock;
- actual_clock = pxl_clk_params.dfs_bypass_display_clock;
- }
-
- /* from power down, we need mark the clock state as ClocksStateNominal
- * from HWReset, so when resume we will call pplib voltage regulator.*/
- if (requested_clk_khz == 0)
- clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
- return actual_clock;
-}
-
-static int dce_psr_set_clock(
- struct dccg *clk,
- int requested_clk_khz)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
- struct dc_context *ctx = clk_dce->base.ctx;
- struct dc *core_dc = ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
- int actual_clk_khz = requested_clk_khz;
-
- actual_clk_khz = dce_set_clock(clk, requested_clk_khz);
-
- dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7);
- return actual_clk_khz;
-}
-
-static int dce112_set_clock(
- struct dccg *clk,
- int requested_clk_khz)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
- struct bp_set_dce_clock_parameters dce_clk_params;
- struct dc_bios *bp = clk->ctx->dc_bios;
- struct dc *core_dc = clk->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
- int actual_clock = requested_clk_khz;
- /* Prepare to program display clock*/
- memset(&dce_clk_params, 0, sizeof(dce_clk_params));
-
- /* Make sure requested clock isn't lower than minimum threshold*/
- if (requested_clk_khz > 0)
- requested_clk_khz = max(requested_clk_khz,
- clk_dce->dentist_vco_freq_khz / 62);
-
- dce_clk_params.target_clock_frequency = requested_clk_khz;
- dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
- dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
-
- bp->funcs->set_dce_clock(bp, &dce_clk_params);
- actual_clock = dce_clk_params.target_clock_frequency;
-
- /* from power down, we need mark the clock state as ClocksStateNominal
- * from HWReset, so when resume we will call pplib voltage regulator.*/
- if (requested_clk_khz == 0)
- clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
-
- /*Program DP ref Clock*/
- /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
- dce_clk_params.target_clock_frequency = 0;
- dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
- if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev))
- dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
- (dce_clk_params.pll_id ==
- CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
- else
- dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
-
- bp->funcs->set_dce_clock(bp, &dce_clk_params);
-
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
- if (clk_dce->dfs_bypass_disp_clk != actual_clock)
- dmcu->funcs->set_psr_wait_loop(dmcu,
- actual_clock / 1000 / 7);
- }
-
- clk_dce->dfs_bypass_disp_clk = actual_clock;
- return actual_clock;
-}
-
-static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
-{
- struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
- struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
- struct integrated_info info = { { { 0 } } };
- struct dc_firmware_info fw_info = { { 0 } };
- int i;
-
- if (bp->integrated_info)
- info = *bp->integrated_info;
-
- clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
- if (clk_dce->dentist_vco_freq_khz == 0) {
- bp->funcs->get_firmware_info(bp, &fw_info);
- clk_dce->dentist_vco_freq_khz =
- fw_info.smu_gpu_pll_output_freq;
- if (clk_dce->dentist_vco_freq_khz == 0)
- clk_dce->dentist_vco_freq_khz = 3600000;
- }
-
- /*update the maximum display clock for each power state*/
- for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
- enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
-
- switch (i) {
- case 0:
- clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
- break;
-
- case 1:
- clk_state = DM_PP_CLOCKS_STATE_LOW;
- break;
-
- case 2:
- clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
- break;
-
- case 3:
- clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
- break;
-
- default:
- clk_state = DM_PP_CLOCKS_STATE_INVALID;
- break;
- }
-
- /*Do not allow bad VBIOS/SBIOS to override with invalid values,
- * check for > 100MHz*/
- if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
- clk_dce->max_clks_by_state[clk_state].display_clk_khz =
- info.disp_clk_voltage[i].max_supported_clk;
- }
-
- if (!debug->disable_dfs_bypass && bp->integrated_info)
- if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
- clk_dce->dfs_bypass_enabled = true;
-}
-
-static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
-{
- struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
- int ss_info_num = bp->funcs->get_ss_entry_number(
- bp, AS_SIGNAL_TYPE_GPU_PLL);
-
- if (ss_info_num) {
- struct spread_spectrum_info info = { { 0 } };
- enum bp_result result = bp->funcs->get_spread_spectrum_info(
- bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
-
- /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
- * even if SS not enabled and in that case
- * SSInfo.spreadSpectrumPercentage !=0 would be sign
- * that SS is enabled
- */
- if (result == BP_RESULT_OK &&
- info.spread_spectrum_percentage != 0) {
- clk_dce->ss_on_dprefclk = true;
- clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
-
- if (info.type.CENTER_MODE == 0) {
- /* TODO: Currently for DP Reference clock we
- * need only SS percentage for
- * downspread */
- clk_dce->dprefclk_ss_percentage =
- info.spread_spectrum_percentage;
- }
-
- return;
- }
-
- result = bp->funcs->get_spread_spectrum_info(
- bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
-
- /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
- * even if SS not enabled and in that case
- * SSInfo.spreadSpectrumPercentage !=0 would be sign
- * that SS is enabled
- */
- if (result == BP_RESULT_OK &&
- info.spread_spectrum_percentage != 0) {
- clk_dce->ss_on_dprefclk = true;
- clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
-
- if (info.type.CENTER_MODE == 0) {
- /* Currently for DP Reference clock we
- * need only SS percentage for
- * downspread */
- clk_dce->dprefclk_ss_percentage =
- info.spread_spectrum_percentage;
- }
- }
- }
-}
-
-static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
-{
- return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
-}
-
-static void dce12_update_clocks(struct dccg *dccg,
- struct dc_clocks *new_clocks,
- bool safe_to_lower)
-{
- struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
-
- /* TODO: Investigate why this is needed to fix display corruption. */
- new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
-
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
- clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
- new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-
- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
- clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
- dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
-
- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
- }
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
-{
- bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
- bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
- int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
- bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
-
- /* increase clock, looking for div is 0 for current, request div is 1*/
- if (dispclk_increase) {
- /* already divided by 2, no need to reach target clk with 2 steps*/
- if (cur_dpp_div)
- return new_clocks->dispclk_khz;
-
- /* request disp clk is lower than maximum supported dpp clk,
- * no need to reach target clk with two steps.
- */
- if (new_clocks->dispclk_khz <= disp_clk_threshold)
- return new_clocks->dispclk_khz;
-
- /* target dpp clk not request divided by 2, still within threshold */
- if (!request_dpp_div)
- return new_clocks->dispclk_khz;
-
- } else {
- /* decrease clock, looking for current dppclk divided by 2,
- * request dppclk not divided by 2.
- */
-
- /* current dpp clk not divided by 2, no need to ramp*/
- if (!cur_dpp_div)
- return new_clocks->dispclk_khz;
-
- /* current disp clk is lower than current maximum dpp clk,
- * no need to ramp
- */
- if (dccg->clks.dispclk_khz <= disp_clk_threshold)
- return new_clocks->dispclk_khz;
-
- /* request dpp clk need to be divided by 2 */
- if (request_dpp_div)
- return new_clocks->dispclk_khz;
- }
-
- return disp_clk_threshold;
-}
-
-static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
-{
- struct dc *dc = dccg->ctx->dc;
- int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
- bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
- int i;
-
- /* set disp clk to dpp clk threshold */
- dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold);
-
- /* update request dpp clk division option */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (!pipe_ctx->plane_state)
- continue;
-
- pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
- pipe_ctx->plane_res.dpp,
- request_dpp_div,
- true);
- }
-
- /* If target clk not same as dppclk threshold, set to target clock */
- if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
- dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
-
- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
- dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
- dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
-}
-
-static void dcn1_update_clocks(struct dccg *dccg,
- struct dc_clocks *new_clocks,
- bool safe_to_lower)
-{
- struct dc *dc = dccg->ctx->dc;
- struct pp_smu_display_requirement_rv *smu_req_cur =
- &dc->res_pool->pp_smu_req;
- struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
- struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
- bool send_request_to_increase = false;
- bool send_request_to_lower = false;
-
- if (new_clocks->phyclk_khz)
- smu_req.display_count = 1;
- else
- smu_req.display_count = 0;
-
- if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
- || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
- || new_clocks->fclk_khz > dccg->clks.fclk_khz
- || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
- send_request_to_increase = true;
-
- if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
- dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
-
- send_request_to_lower = true;
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
- dccg->clks.fclk_khz = new_clocks->fclk_khz;
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
- clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
- smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
-
- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
- send_request_to_lower = true;
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
- dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
- smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
-
- send_request_to_lower = true;
- }
-
- if (should_set_clock(safe_to_lower,
- new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
- dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
- smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
-
- send_request_to_lower = true;
- }
-
- /* make sure dcf clk is before dpp clk to
- * make sure we have enough voltage to run dpp clk
- */
- if (send_request_to_increase) {
- /*use dcfclk to request voltage*/
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- }
-
- /* dcn1 dppclk is tied to dispclk */
- /* program dispclk on = as a w/a for sleep resume clock ramping issues */
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
- || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
- dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-
- send_request_to_lower = true;
- }
-
- if (!send_request_to_increase && send_request_to_lower) {
- /*use dcfclk to request voltage*/
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- }
-
-
- *smu_req_cur = smu_req;
-}
-#endif
-
-static void dce_update_clocks(struct dccg *dccg,
- struct dc_clocks *new_clocks,
- bool safe_to_lower)
-{
- struct dm_pp_power_level_change_request level_change_req;
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
-
- /* TODO: Investigate why this is needed to fix display corruption. */
- if (!clk_dce->dfs_bypass_active)
- new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
-
- level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
- /* get max clock state from PPLIB */
- if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
- || level_change_req.power_level > dccg->cur_min_clks_state) {
- if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
- dccg->cur_min_clks_state = level_change_req.power_level;
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
- new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
- }
-}
-
-static bool dce_update_dfs_bypass(
- struct dccg *dccg,
- struct dc *dc,
- struct dc_state *context,
- int requested_clock_khz)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
- struct resource_context *res_ctx = &context->res_ctx;
- enum signal_type signal_type = SIGNAL_TYPE_NONE;
- bool was_active = clk_dce->dfs_bypass_active;
- int i;
-
- /* Disable DFS bypass by default. */
- clk_dce->dfs_bypass_active = false;
-
- /* Check that DFS bypass is available. */
- if (!clk_dce->dfs_bypass_enabled)
- goto update;
-
- /* Check if the requested display clock is below the threshold. */
- if (requested_clock_khz >= 400000)
- goto update;
-
- /* DFS-bypass should only be enabled on single stream setups */
- if (context->stream_count != 1)
- goto update;
-
- /* Check that the stream's signal type is an embedded panel */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (res_ctx->pipe_ctx[i].stream) {
- struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
-
- signal_type = pipe_ctx->stream->sink->link->connector_signal;
- break;
- }
- }
-
- if (signal_type == SIGNAL_TYPE_EDP ||
- signal_type == SIGNAL_TYPE_LVDS)
- clk_dce->dfs_bypass_active = true;
-
-update:
- /* Update the clock state. We don't need to respect safe_to_lower
- * because DFS bypass should always be greater than the current
- * display clock frequency.
- */
- if (was_active != clk_dce->dfs_bypass_active) {
- dccg->clks.dispclk_khz =
- dccg->funcs->set_dispclk(dccg, dccg->clks.dispclk_khz);
- return true;
- }
-
- return false;
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-static const struct display_clock_funcs dcn1_funcs = {
- .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
- .set_dispclk = dce112_set_clock,
- .update_clocks = dcn1_update_clocks
-};
-#endif
-
-static const struct display_clock_funcs dce120_funcs = {
- .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
- .set_dispclk = dce112_set_clock,
- .update_clocks = dce12_update_clocks
-};
-
-static const struct display_clock_funcs dce112_funcs = {
- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
- .set_dispclk = dce112_set_clock,
- .update_clocks = dce_update_clocks
-};
-
-static const struct display_clock_funcs dce110_funcs = {
- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
- .set_dispclk = dce_psr_set_clock,
- .update_clocks = dce_update_clocks,
- .update_dfs_bypass = dce_update_dfs_bypass
-};
-
-static const struct display_clock_funcs dce_funcs = {
- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
- .set_dispclk = dce_set_clock,
- .update_clocks = dce_update_clocks
-};
-
-static void dce_dccg_construct(
- struct dce_dccg *clk_dce,
- struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask)
-{
- struct dccg *base = &clk_dce->base;
-
- base->ctx = ctx;
- base->funcs = &dce_funcs;
-
- clk_dce->regs = regs;
- clk_dce->clk_shift = clk_shift;
- clk_dce->clk_mask = clk_mask;
-
- clk_dce->dfs_bypass_disp_clk = 0;
-
- clk_dce->dprefclk_ss_percentage = 0;
- clk_dce->dprefclk_ss_divider = 1000;
- clk_dce->ss_on_dprefclk = false;
-
- base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
- base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
-
- dce_clock_read_integrated_info(clk_dce);
- dce_clock_read_ss_info(clk_dce);
-}
-
-struct dccg *dce_dccg_create(
- struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask)
-{
- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
- if (clk_dce == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- memcpy(clk_dce->max_clks_by_state,
- dce80_max_clks_by_state,
- sizeof(dce80_max_clks_by_state));
-
- dce_dccg_construct(
- clk_dce, ctx, regs, clk_shift, clk_mask);
-
- return &clk_dce->base;
-}
-
-struct dccg *dce110_dccg_create(
- struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask)
-{
- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
- if (clk_dce == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- memcpy(clk_dce->max_clks_by_state,
- dce110_max_clks_by_state,
- sizeof(dce110_max_clks_by_state));
-
- dce_dccg_construct(
- clk_dce, ctx, regs, clk_shift, clk_mask);
-
- clk_dce->base.funcs = &dce110_funcs;
-
- return &clk_dce->base;
-}
-
-struct dccg *dce112_dccg_create(
- struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask)
-{
- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
- if (clk_dce == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- memcpy(clk_dce->max_clks_by_state,
- dce112_max_clks_by_state,
- sizeof(dce112_max_clks_by_state));
-
- dce_dccg_construct(
- clk_dce, ctx, regs, clk_shift, clk_mask);
-
- clk_dce->base.funcs = &dce112_funcs;
-
- return &clk_dce->base;
-}
-
-struct dccg *dce120_dccg_create(struct dc_context *ctx)
-{
- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
- if (clk_dce == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- memcpy(clk_dce->max_clks_by_state,
- dce120_max_clks_by_state,
- sizeof(dce120_max_clks_by_state));
-
- dce_dccg_construct(
- clk_dce, ctx, NULL, NULL, NULL);
-
- clk_dce->dprefclk_khz = 600000;
- clk_dce->base.funcs = &dce120_funcs;
-
- return &clk_dce->base;
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-struct dccg *dcn1_dccg_create(struct dc_context *ctx)
-{
- struct dc_debug_options *debug = &ctx->dc->debug;
- struct dc_bios *bp = ctx->dc_bios;
- struct dc_firmware_info fw_info = { { 0 } };
- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
- if (clk_dce == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- clk_dce->base.ctx = ctx;
- clk_dce->base.funcs = &dcn1_funcs;
-
- clk_dce->dfs_bypass_disp_clk = 0;
-
- clk_dce->dprefclk_ss_percentage = 0;
- clk_dce->dprefclk_ss_divider = 1000;
- clk_dce->ss_on_dprefclk = false;
-
- clk_dce->dprefclk_khz = 600000;
- if (bp->integrated_info)
- clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
- if (clk_dce->dentist_vco_freq_khz == 0) {
- bp->funcs->get_firmware_info(bp, &fw_info);
- clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
- if (clk_dce->dentist_vco_freq_khz == 0)
- clk_dce->dentist_vco_freq_khz = 3600000;
- }
-
- if (!debug->disable_dfs_bypass && bp->integrated_info)
- if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
- clk_dce->dfs_bypass_enabled = true;
-
- dce_clock_read_ss_info(clk_dce);
-
- return &clk_dce->base;
-}
-#endif
-
-void dce_dccg_destroy(struct dccg **dccg)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg);
-
- kfree(clk_dce);
- *dccg = NULL;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 64dc75378541..c83a7f05f14c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -233,6 +233,16 @@ struct dce_hwseq_registers {
uint32_t DOMAIN5_PG_CONFIG;
uint32_t DOMAIN6_PG_CONFIG;
uint32_t DOMAIN7_PG_CONFIG;
+ uint32_t DOMAIN8_PG_CONFIG;
+ uint32_t DOMAIN9_PG_CONFIG;
+ uint32_t DOMAIN10_PG_CONFIG;
+ uint32_t DOMAIN11_PG_CONFIG;
+ uint32_t DOMAIN16_PG_CONFIG;
+ uint32_t DOMAIN17_PG_CONFIG;
+ uint32_t DOMAIN18_PG_CONFIG;
+ uint32_t DOMAIN19_PG_CONFIG;
+ uint32_t DOMAIN20_PG_CONFIG;
+ uint32_t DOMAIN21_PG_CONFIG;
uint32_t DOMAIN0_PG_STATUS;
uint32_t DOMAIN1_PG_STATUS;
uint32_t DOMAIN2_PG_STATUS;
@@ -241,6 +251,16 @@ struct dce_hwseq_registers {
uint32_t DOMAIN5_PG_STATUS;
uint32_t DOMAIN6_PG_STATUS;
uint32_t DOMAIN7_PG_STATUS;
+ uint32_t DOMAIN8_PG_STATUS;
+ uint32_t DOMAIN9_PG_STATUS;
+ uint32_t DOMAIN10_PG_STATUS;
+ uint32_t DOMAIN11_PG_STATUS;
+ uint32_t DOMAIN16_PG_STATUS;
+ uint32_t DOMAIN17_PG_STATUS;
+ uint32_t DOMAIN18_PG_STATUS;
+ uint32_t DOMAIN19_PG_STATUS;
+ uint32_t DOMAIN20_PG_STATUS;
+ uint32_t DOMAIN21_PG_STATUS;
uint32_t DIO_MEM_PWR_CTRL;
uint32_t DCCG_GATE_DISABLE_CNTL;
uint32_t DCCG_GATE_DISABLE_CNTL2;
@@ -262,6 +282,8 @@ struct dce_hwseq_registers {
uint32_t D2VGA_CONTROL;
uint32_t D3VGA_CONTROL;
uint32_t D4VGA_CONTROL;
+ uint32_t D5VGA_CONTROL;
+ uint32_t D6VGA_CONTROL;
uint32_t VGA_TEST_CONTROL;
/* MMHUB registers. read only. temporary hack */
uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
@@ -489,6 +511,26 @@ struct dce_hwseq_registers {
type DOMAIN6_POWER_GATE; \
type DOMAIN7_POWER_FORCEON; \
type DOMAIN7_POWER_GATE; \
+ type DOMAIN8_POWER_FORCEON; \
+ type DOMAIN8_POWER_GATE; \
+ type DOMAIN9_POWER_FORCEON; \
+ type DOMAIN9_POWER_GATE; \
+ type DOMAIN10_POWER_FORCEON; \
+ type DOMAIN10_POWER_GATE; \
+ type DOMAIN11_POWER_FORCEON; \
+ type DOMAIN11_POWER_GATE; \
+ type DOMAIN16_POWER_FORCEON; \
+ type DOMAIN16_POWER_GATE; \
+ type DOMAIN17_POWER_FORCEON; \
+ type DOMAIN17_POWER_GATE; \
+ type DOMAIN18_POWER_FORCEON; \
+ type DOMAIN18_POWER_GATE; \
+ type DOMAIN19_POWER_FORCEON; \
+ type DOMAIN19_POWER_GATE; \
+ type DOMAIN20_POWER_FORCEON; \
+ type DOMAIN20_POWER_GATE; \
+ type DOMAIN21_POWER_FORCEON; \
+ type DOMAIN21_POWER_GATE; \
type DOMAIN0_PGFSM_PWR_STATUS; \
type DOMAIN1_PGFSM_PWR_STATUS; \
type DOMAIN2_PGFSM_PWR_STATUS; \
@@ -497,6 +539,16 @@ struct dce_hwseq_registers {
type DOMAIN5_PGFSM_PWR_STATUS; \
type DOMAIN6_PGFSM_PWR_STATUS; \
type DOMAIN7_PGFSM_PWR_STATUS; \
+ type DOMAIN8_PGFSM_PWR_STATUS; \
+ type DOMAIN9_PGFSM_PWR_STATUS; \
+ type DOMAIN10_PGFSM_PWR_STATUS; \
+ type DOMAIN11_PGFSM_PWR_STATUS; \
+ type DOMAIN16_PGFSM_PWR_STATUS; \
+ type DOMAIN17_PGFSM_PWR_STATUS; \
+ type DOMAIN18_PGFSM_PWR_STATUS; \
+ type DOMAIN19_PGFSM_PWR_STATUS; \
+ type DOMAIN20_PGFSM_PWR_STATUS; \
+ type DOMAIN21_PGFSM_PWR_STATUS; \
type DCFCLK_GATE_DIS; \
type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
type VGA_TEST_ENABLE; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 366bc8c2c643..3e18ea84b1f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -645,7 +645,7 @@ static bool dce110_link_encoder_validate_hdmi_output(
return false;
/* DCE11 HW does not support 420 */
- if (!enc110->base.features.ycbcr420_supported &&
+ if (!enc110->base.features.hdmi_ycbcr420_supported &&
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index c47c81883d3c..cce0d18f91da 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -908,7 +908,6 @@ static void dce110_stream_encoder_dp_blank(
struct stream_encoder *enc)
{
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
- uint32_t retries = 0;
uint32_t reg1 = 0;
uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
@@ -926,30 +925,28 @@ static void dce110_stream_encoder_dp_blank(
* (2 = start of the next vertical blank) */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
/* Larger delay to wait until VBLANK - use max retry of
- * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
- * a little more because we may not trust delay accuracy.
- */
+ * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+ * a little more because we may not trust delay accuracy.
+ */
max_retries = DP_BLANK_MAX_RETRY * 150;
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
/* the encoder stops sending the video stream
- * at the start of the vertical blanking.
- * Poll for DP_VID_STREAM_STATUS == 0
- */
+ * at the start of the vertical blanking.
+ * Poll for DP_VID_STREAM_STATUS == 0
+ */
REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
0,
10, max_retries);
- ASSERT(retries <= max_retries);
-
/* Tell the DP encoder to ignore timing from CRTC, must be done after
- * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
- * complete, stream status will be stuck in video stream enabled state,
- * i.e. DP_VID_STREAM_STATUS stuck at 1.
- */
+ * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
+ * complete, stream status will be stuck in video stream enabled state,
+ * i.e. DP_VID_STREAM_STATUS stuck at 1.
+ */
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index 74c05e878807..87771676acac 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -105,74 +105,30 @@ bool dce100_enable_display_power_gating(
return false;
}
-static void dce100_pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context)
-{
- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
- pp_display_cfg->avail_mclk_switch_time_us =
- dce110_get_min_vblank_time_us(context);
- /*pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
- / MEMORY_TYPE_MULTIPLIER;*/
-
- dce110_fill_display_configs(context, pp_display_cfg);
-
- if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
- struct dm_pp_display_configuration)) != 0)
- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
- dc->prev_display_config = *pp_display_cfg;
-}
-
-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet
- */
-static uint32_t get_max_pixel_clock_for_all_paths(
- struct dc *dc,
- struct dc_state *context)
+void dce100_prepare_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
{
- uint32_t max_pix_clk = 0;
- int i;
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == NULL)
- continue;
-
- /* do not check under lay */
- if (pipe_ctx->top_pipe)
- continue;
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
- if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
- max_pix_clk =
- pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
- }
- return max_pix_clk;
+ dc->res_pool->clk_mgr->funcs->update_clocks(
+ dc->res_pool->clk_mgr,
+ context,
+ false);
}
-void dce100_set_bandwidth(
+void dce100_optimize_bandwidth(
struct dc *dc,
- struct dc_state *context,
- bool decrease_allowed)
+ struct dc_state *context)
{
- struct dc_clocks req_clks;
-
- req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
- req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
-
dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
- dc->res_pool->dccg->funcs->update_clocks(
- dc->res_pool->dccg,
- &req_clks,
- decrease_allowed);
-
- dce100_pplib_apply_display_requirements(dc, context);
+ dc->res_pool->clk_mgr->funcs->update_clocks(
+ dc->res_pool->clk_mgr,
+ context,
+ true);
}
-
/**************************************************************************/
void dce100_hw_sequencer_construct(struct dc *dc)
@@ -180,8 +136,7 @@ void dce100_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc);
dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
- dc->hwss.set_bandwidth = dce100_set_bandwidth;
- dc->hwss.pplib_apply_display_requirements =
- dce100_pplib_apply_display_requirements;
+ dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
+ dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index c6ec0ed6ec3d..acd418515346 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -33,10 +33,9 @@ struct dc_state;
void dce100_hw_sequencer_construct(struct dc *dc);
-void dce100_set_bandwidth(
+void dce100_prepare_bandwidth(
struct dc *dc,
- struct dc_state *context,
- bool decrease_allowed);
+ struct dc_state *context);
bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
struct dc_bios *dcb,
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 14754a87156c..6ae51a5dfc04 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -36,11 +36,11 @@
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
+#include "dce/dce_clk_mgr.h"
#include "dce/dce_mem_input.h"
#include "dce/dce_ipp.h"
#include "dce/dce_transform.h"
#include "dce/dce_opp.h"
-#include "dce/dce_clocks.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
@@ -137,15 +137,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -722,8 +722,8 @@ static void destruct(struct dce110_resource_pool *pool)
dce_aud_destroy(&pool->base.audios[i]);
}
- if (pool->base.dccg != NULL)
- dce_dccg_destroy(&pool->base.dccg);
+ if (pool->base.clk_mgr != NULL)
+ dce_clk_mgr_destroy(&pool->base.clk_mgr);
if (pool->base.abm != NULL)
dce_abm_destroy(&pool->base.abm);
@@ -767,7 +767,7 @@ bool dce100_validate_bandwidth(
if (at_least_one_pipe) {
/* TODO implement when needed but for now hardcode max value*/
context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
context->bw.dce.dispclk_khz = 0;
context->bw.dce.yclk_khz = 0;
@@ -860,7 +860,6 @@ static bool construct(
struct dc_context *ctx = dc->ctx;
struct dc_firmware_info info;
struct dc_bios *bp;
- struct dm_pp_static_clock_info static_clk_info = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -908,11 +907,11 @@ static bool construct(
}
}
- pool->base.dccg = dce_dccg_create(ctx,
+ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.dccg == NULL) {
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -938,12 +937,6 @@ static bool construct(
goto res_create_fail;
}
- /* get static clock information for PPLIB or firmware, save
- * max_clock_state
- */
- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.dccg->max_clks_state =
- static_clk_info.max_clocks_state;
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 1f7f25013217..52d50e24a995 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -64,65 +64,37 @@ static const struct dce110_compressor_reg_offsets reg_offsets[] = {
static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
-enum fbc_idle_force {
- /* Bit 0 - Display registers updated */
- FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
-
- /* Bit 2 - FBC_GRPH_COMP_EN register updated */
- FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
- /* Bit 3 - FBC_SRC_SEL register updated */
- FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
- /* Bit 4 - FBC_MIN_COMPRESSION register updated */
- FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
- /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
- FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
- /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
- FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
- /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
- FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
-
- /* Bit 24 - Memory write to region 0 defined by MC registers. */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
- /* Bit 25 - Memory write to region 1 defined by MC registers */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
- /* Bit 26 - Memory write to region 2 defined by MC registers */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
- /* Bit 27 - Memory write to region 3 defined by MC registers. */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
-
- /* Bit 28 - Memory write from any client other than MCIF */
- FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
- /* Bit 29 - CG statics screen signal is inactive */
- FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
-};
-
-
static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
{
return 256 * ((pixels + 255) / 256);
}
-static void reset_lb_on_vblank(struct dc_context *ctx)
+static void reset_lb_on_vblank(struct compressor *compressor, uint32_t crtc_inst)
{
- uint32_t value, frame_count;
+ uint32_t value;
+ uint32_t frame_count;
+ uint32_t status_pos;
uint32_t retry = 0;
- uint32_t status_pos =
- dm_read_reg(ctx, mmCRTC_STATUS_POSITION);
+ struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+
+ cp110->offsets = reg_offsets[crtc_inst];
+
+ status_pos = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION));
/* Only if CRTC is enabled and counter is moving we wait for one frame. */
- if (status_pos != dm_read_reg(ctx, mmCRTC_STATUS_POSITION)) {
+ if (status_pos != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION))) {
/* Resetting LB on VBlank */
- value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+ value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL));
set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
- dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
+ dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value);
- frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT);
+ frame_count = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT));
for (retry = 10000; retry > 0; retry--) {
- if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT))
+ if (frame_count != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT)))
break;
udelay(10);
}
@@ -130,13 +102,11 @@ static void reset_lb_on_vblank(struct dc_context *ctx)
dm_error("Frame count did not increase for 100ms.\n");
/* Resetting LB on VBlank */
- value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+ value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL));
set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
- dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
-
+ dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value);
}
-
}
static void wait_for_fbc_state_changed(
@@ -226,10 +196,10 @@ void dce110_compressor_enable_fbc(
uint32_t addr;
uint32_t value, misc_value;
-
addr = mmFBC_CNTL;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ /* params->inst is valid HW CRTC instance start from 0 */
set_reg_field_value(
value,
params->inst,
@@ -238,8 +208,10 @@ void dce110_compressor_enable_fbc(
/* Keep track of enum controller_id FBC is attached to */
compressor->is_enabled = true;
- compressor->attached_inst = params->inst;
- cp110->offsets = reg_offsets[params->inst];
+ /* attached_inst is SW CRTC instance start from 1
+ * 0 = CONTROLLER_ID_UNDEFINED means not attached crtc
+ */
+ compressor->attached_inst = params->inst + CONTROLLER_ID_D0;
/* Toggle it as there is bug in HW */
set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
@@ -268,9 +240,10 @@ void dce110_compressor_enable_fbc(
void dce110_compressor_disable_fbc(struct compressor *compressor)
{
struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+ uint32_t crtc_inst = 0;
if (compressor->options.bits.FBC_SUPPORT) {
- if (dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ if (dce110_compressor_is_fbc_enabled_in_hw(compressor, &crtc_inst)) {
uint32_t reg_data;
/* Turn off compression */
reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
@@ -284,8 +257,10 @@ void dce110_compressor_disable_fbc(struct compressor *compressor)
wait_for_fbc_state_changed(cp110, false);
}
- /* Sync line buffer - dce100/110 only*/
- reset_lb_on_vblank(compressor->ctx);
+ /* Sync line buffer which fbc was attached to dce100/110 only */
+ if (crtc_inst > CONTROLLER_ID_UNDEFINED && crtc_inst < CONTROLLER_ID_D3)
+ reset_lb_on_vblank(compressor,
+ crtc_inst - CONTROLLER_ID_D0);
}
}
@@ -328,6 +303,8 @@ void dce110_compressor_program_compressed_surface_address_and_pitch(
uint32_t compressed_surf_address_low_part =
compressor->compr_surface_address.addr.low_part;
+ cp110->offsets = reg_offsets[params->inst];
+
/* Clear content first. */
dm_write_reg(
compressor->ctx,
@@ -410,13 +387,7 @@ void dce110_compressor_set_fbc_invalidation_triggers(
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(
value,
- fbc_trigger |
- FBC_IDLE_FORCE_GRPH_COMP_EN |
- FBC_IDLE_FORCE_SRC_SEL_CHANGE |
- FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
- FBC_IDLE_FORCE_ALPHA_COMP_EN |
- FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
- FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+ fbc_trigger,
FBC_IDLE_FORCE_CLEAR_MASK,
FBC_IDLE_FORCE_CLEAR_MASK);
dm_write_reg(compressor->ctx, addr, value);
@@ -549,7 +520,7 @@ void dce110_compressor_construct(struct dce110_compressor *compressor,
compressor->base.channel_interleave_size = 0;
compressor->base.dram_channels_num = 0;
compressor->base.lpt_channels_num = 0;
- compressor->base.attached_inst = 0;
+ compressor->base.attached_inst = CONTROLLER_ID_UNDEFINED;
compressor->base.is_enabled = false;
compressor->base.funcs = &dce110_compressor_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index b75ede5f84f7..6349ba7bec7c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -548,14 +548,14 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
regamma_params->hw_points_num = hw_points;
- i = 1;
- for (k = 0; k < 16 && i < 16; k++) {
+ k = 0;
+ for (i = 1; i < 16; i++) {
if (seg_distr[k] != -1) {
regamma_params->arr_curve_points[k].segments_num = seg_distr[k];
regamma_params->arr_curve_points[i].offset =
regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
}
- i++;
+ k++;
}
if (seg_distr[k] != -1)
@@ -1085,7 +1085,6 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
link->dc->hwss.edp_backlight_control(link, true);
- stream->bl_pwm_level = EDP_BACKLIGHT_RAMP_DISABLE_LEVEL;
}
}
void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
@@ -1192,8 +1191,8 @@ static void build_audio_output(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
audio_output->pll_info.dp_dto_source_clock_in_khz =
- state->dis_clk->funcs->get_dp_ref_clk_frequency(
- state->dis_clk);
+ state->dccg->funcs->get_dp_ref_clk_frequency(
+ state->dccg);
}
audio_output->pll_info.feed_back_divider =
@@ -1547,6 +1546,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
int i;
struct dc_link *edp_link_to_turnoff = NULL;
struct dc_link *edp_link = get_link_for_edp(dc);
+ struct dc_bios *bios = dc->ctx->dc_bios;
bool can_edp_fast_boot_optimize = false;
bool apply_edp_fast_boot_optimization = false;
@@ -1573,6 +1573,20 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
context->streams[i]->apply_edp_fast_boot_optimization = true;
apply_edp_fast_boot_optimization = true;
+
+ /* When after S4 and S5, vbios may post edp and previous dpms_off
+ * doesn't make sense.
+ * Update dpms_off state to align hw and sw state via check
+ * vBios scratch register.
+ */
+ if (bios->funcs->is_active_display) {
+ const struct connector_device_tag_info *device_tag = &(edp_link->device_tag);
+
+ if (bios->funcs->is_active_display(bios,
+ context->streams[i]->signal,
+ device_tag))
+ context->streams[i]->dpms_off = false;
+ }
}
}
}
@@ -1736,51 +1750,29 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
if (events->force_trigger)
value |= 0x1;
- value |= 0x84;
+ if (num_pipes) {
+ struct dc *dc = pipe_ctx[0]->stream->ctx->dc;
+
+ if (dc->fbc_compressor)
+ value |= 0x84;
+ }
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
}
-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet
- */
-static uint32_t get_max_pixel_clock_for_all_paths(
- struct dc *dc,
- struct dc_state *context)
-{
- uint32_t max_pix_clk = 0;
- int i;
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == NULL)
- continue;
-
- /* do not check under lay */
- if (pipe_ctx->top_pipe)
- continue;
-
- if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
- max_pix_clk =
- pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
- }
-
- return max_pix_clk;
-}
-
/*
* Check if FBC can be enabled
*/
static bool should_enable_fbc(struct dc *dc,
- struct dc_state *context,
- uint32_t *pipe_idx)
+ struct dc_state *context,
+ uint32_t *pipe_idx)
{
uint32_t i;
struct pipe_ctx *pipe_ctx = NULL;
struct resource_context *res_ctx = &context->res_ctx;
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
ASSERT(dc->fbc_compressor);
@@ -1795,14 +1787,28 @@ static bool should_enable_fbc(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (res_ctx->pipe_ctx[i].stream) {
+
pipe_ctx = &res_ctx->pipe_ctx[i];
- *pipe_idx = i;
- break;
+
+ if (!pipe_ctx)
+ continue;
+
+ /* fbc not applicable on underlay pipe */
+ if (pipe_ctx->pipe_idx != underlay_idx) {
+ *pipe_idx = i;
+ break;
+ }
}
}
- /* Pipe context should be found */
- ASSERT(pipe_ctx);
+ if (i == dc->res_pool->pipe_count)
+ return false;
+
+ if (!pipe_ctx->stream->sink)
+ return false;
+
+ if (!pipe_ctx->stream->sink->link)
+ return false;
/* Only supports eDP */
if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
@@ -1826,8 +1832,9 @@ static bool should_enable_fbc(struct dc *dc,
/*
* Enable FBC
*/
-static void enable_fbc(struct dc *dc,
- struct dc_state *context)
+static void enable_fbc(
+ struct dc *dc,
+ struct dc_state *context)
{
uint32_t pipe_idx = 0;
@@ -1837,10 +1844,9 @@ static void enable_fbc(struct dc *dc,
struct compressor *compr = dc->fbc_compressor;
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
-
params.source_view_width = pipe_ctx->stream->timing.h_addressable;
params.source_view_height = pipe_ctx->stream->timing.v_addressable;
-
+ params.inst = pipe_ctx->stream_res.tg->inst;
compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
compr->funcs->surface_address_and_pitch(compr, &params);
@@ -2055,10 +2061,10 @@ enum dc_status dce110_apply_ctx_to_hw(
return status;
}
- dcb->funcs->set_scratch_critical_state(dcb, false);
-
if (dc->fbc_compressor)
- enable_fbc(dc, context);
+ enable_fbc(dc, dc->current_state);
+
+ dcb->funcs->set_scratch_critical_state(dcb, false);
return DC_OK;
}
@@ -2291,7 +2297,7 @@ static void dce110_enable_per_frame_crtc_position_reset(
int i;
gsl_params.gsl_group = 0;
- gsl_params.gsl_master = grouped_pipes[0]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst;
+ gsl_params.gsl_master = 0;
for (i = 0; i < group_size; i++)
grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock(
@@ -2380,191 +2386,33 @@ static void init_hw(struct dc *dc)
}
-void dce110_fill_display_configs(
- const struct dc_state *context,
- struct dm_pp_display_configuration *pp_display_cfg)
-{
- int j;
- int num_cfgs = 0;
-
- for (j = 0; j < context->stream_count; j++) {
- int k;
-
- const struct dc_stream_state *stream = context->streams[j];
- struct dm_pp_single_disp_config *cfg =
- &pp_display_cfg->disp_configs[num_cfgs];
- const struct pipe_ctx *pipe_ctx = NULL;
-
- for (k = 0; k < MAX_PIPES; k++)
- if (stream == context->res_ctx.pipe_ctx[k].stream) {
- pipe_ctx = &context->res_ctx.pipe_ctx[k];
- break;
- }
-
- ASSERT(pipe_ctx != NULL);
-
- /* only notify active stream */
- if (stream->dpms_off)
- continue;
-
- num_cfgs++;
- cfg->signal = pipe_ctx->stream->signal;
- cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
- cfg->src_height = stream->src.height;
- cfg->src_width = stream->src.width;
- cfg->ddi_channel_mapping =
- stream->sink->link->ddi_channel_mapping.raw;
- cfg->transmitter =
- stream->sink->link->link_enc->transmitter;
- cfg->link_settings.lane_count =
- stream->sink->link->cur_link_settings.lane_count;
- cfg->link_settings.link_rate =
- stream->sink->link->cur_link_settings.link_rate;
- cfg->link_settings.link_spread =
- stream->sink->link->cur_link_settings.link_spread;
- cfg->sym_clock = stream->phy_pix_clk;
- /* Round v_refresh*/
- cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
- cfg->v_refresh /= stream->timing.h_total;
- cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
- / stream->timing.v_total;
- }
-
- pp_display_cfg->display_count = num_cfgs;
-}
-
-uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
-{
- uint8_t j;
- uint32_t min_vertical_blank_time = -1;
-
- for (j = 0; j < context->stream_count; j++) {
- struct dc_stream_state *stream = context->streams[j];
- uint32_t vertical_blank_in_pixels = 0;
- uint32_t vertical_blank_time = 0;
-
- vertical_blank_in_pixels = stream->timing.h_total *
- (stream->timing.v_total
- - stream->timing.v_addressable);
-
- vertical_blank_time = vertical_blank_in_pixels
- * 1000 / stream->timing.pix_clk_khz;
-
- if (min_vertical_blank_time > vertical_blank_time)
- min_vertical_blank_time = vertical_blank_time;
- }
-
- return min_vertical_blank_time;
-}
-
-static int determine_sclk_from_bounding_box(
- const struct dc *dc,
- int required_sclk)
-{
- int i;
-
- /*
- * Some asics do not give us sclk levels, so we just report the actual
- * required sclk
- */
- if (dc->sclk_lvls.num_levels == 0)
- return required_sclk;
-
- for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
- if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
- return dc->sclk_lvls.clocks_in_khz[i];
- }
- /*
- * even maximum level could not satisfy requirement, this
- * is unexpected at this stage, should have been caught at
- * validation time
- */
- ASSERT(0);
- return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
-}
-static void pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context)
+void dce110_prepare_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
{
- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
- pp_display_cfg->all_displays_in_sync =
- context->bw.dce.all_displays_in_sync;
- pp_display_cfg->nb_pstate_switch_disable =
- context->bw.dce.nbp_state_change_enable == false;
- pp_display_cfg->cpu_cc6_disable =
- context->bw.dce.cpuc_state_change_enable == false;
- pp_display_cfg->cpu_pstate_disable =
- context->bw.dce.cpup_state_change_enable == false;
- pp_display_cfg->cpu_pstate_separation_time =
- context->bw.dce.blackout_recovery_time_us;
-
- pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
- / MEMORY_TYPE_MULTIPLIER;
-
- pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
- dc,
- context->bw.dce.sclk_khz);
-
- pp_display_cfg->min_engine_clock_deep_sleep_khz
- = context->bw.dce.sclk_deep_sleep_khz;
-
- pp_display_cfg->avail_mclk_switch_time_us =
- dce110_get_min_vblank_time_us(context);
- /* TODO: dce11.2*/
- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+ struct clk_mgr *dccg = dc->res_pool->clk_mgr;
- pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
- dce110_fill_display_configs(context, pp_display_cfg);
-
- /* TODO: is this still applicable?*/
- if (pp_display_cfg->display_count == 1) {
- const struct dc_crtc_timing *timing =
- &context->streams[0]->timing;
-
- pp_display_cfg->crtc_index =
- pp_display_cfg->disp_configs[0].pipe_idx;
- pp_display_cfg->line_time_in_us = timing->h_total * 1000
- / timing->pix_clk_khz;
- }
-
- if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
- struct dm_pp_display_configuration)) != 0)
- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
- dc->prev_display_config = *pp_display_cfg;
+ dccg->funcs->update_clocks(
+ dccg,
+ context,
+ false);
}
-static void dce110_set_bandwidth(
+void dce110_optimize_bandwidth(
struct dc *dc,
- struct dc_state *context,
- bool decrease_allowed)
+ struct dc_state *context)
{
- struct dc_clocks req_clks;
- struct dccg *dccg = dc->res_pool->dccg;
-
- req_clks.dispclk_khz = context->bw.dce.dispclk_khz;
- req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
+ struct clk_mgr *dccg = dc->res_pool->clk_mgr;
- if (decrease_allowed)
- dce110_set_displaymarks(dc, context);
- else
- dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
-
- if (dccg->funcs->update_dfs_bypass)
- dccg->funcs->update_dfs_bypass(
- dccg,
- dc,
- context,
- req_clks.dispclk_khz);
+ dce110_set_displaymarks(dc, context);
dccg->funcs->update_clocks(
dccg,
- &req_clks,
- decrease_allowed);
- pplib_apply_display_requirements(dc, context);
+ context,
+ true);
}
static void dce110_program_front_end_for_pipe(
@@ -2575,7 +2423,6 @@ static void dce110_program_front_end_for_pipe(
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
- unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
unsigned int i;
DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
@@ -2616,15 +2463,6 @@ static void dce110_program_front_end_for_pipe(
program_scaler(dc, pipe_ctx);
- /* fbc not applicable on Underlay pipe */
- if (dc->fbc_compressor && old_pipe->stream &&
- pipe_ctx->pipe_idx != underlay_idx) {
- if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
- dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
- else
- enable_fbc(dc, dc->current_state);
- }
-
mi->funcs->mem_input_program_surface_config(
mi,
plane_state->format,
@@ -2701,6 +2539,9 @@ static void dce110_apply_ctx_for_surface(
if (num_planes == 0)
return;
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -2743,6 +2584,9 @@ static void dce110_apply_ctx_for_surface(
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
+
+ if (dc->fbc_compressor)
+ enable_fbc(dc, dc->current_state);
}
static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
@@ -2769,28 +2613,6 @@ static void dce110_wait_for_mpcc_disconnect(
/* do nothing*/
}
-static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
- enum dc_color_space colorspace,
- uint16_t *matrix)
-{
- int i;
- struct out_csc_color_matrix tbl_entry;
-
- if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
- == true) {
- enum dc_color_space color_space =
- pipe_ctx->stream->output_color_space;
-
- //uint16_t matrix[12];
- for (i = 0; i < 12; i++)
- tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
-
- tbl_entry.color_space = color_space;
- //tbl_entry.regval = matrix;
- pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.xfm, &tbl_entry);
- }
-}
-
void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -2839,13 +2661,8 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.xfm, attributes);
}
-static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
-
-static void optimize_shared_resources(struct dc *dc) {}
-
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
- .program_csc_matrix = program_csc_matrix,
.init_hw = init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
@@ -2868,7 +2685,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.enable_display_power_gating = dce110_enable_display_power_gating,
.disable_plane = dce110_power_down_fe,
.pipe_control_lock = dce_pipe_control_lock,
- .set_bandwidth = dce110_set_bandwidth,
+ .prepare_bandwidth = dce110_prepare_bandwidth,
+ .optimize_bandwidth = dce110_optimize_bandwidth,
.set_drr = set_drr,
.get_position = get_position,
.set_static_screen_control = set_static_screen_control,
@@ -2877,9 +2695,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.setup_stereo = NULL,
.set_avmute = dce110_set_avmute,
.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
- .ready_shared_resources = ready_shared_resources,
- .optimize_shared_resources = optimize_shared_resources,
- .pplib_apply_display_requirements = pplib_apply_display_requirements,
.edp_backlight_control = hwss_edp_backlight_control,
.edp_power_control = hwss_edp_power_control,
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index d6db3dbd9015..cd3e36d52a52 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -40,7 +40,6 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc_state *context);
-
void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option);
@@ -64,11 +63,13 @@ void dce110_set_safe_displaymarks(
struct resource_context *res_ctx,
const struct resource_pool *pool);
-void dce110_fill_display_configs(
- const struct dc_state *context,
- struct dm_pp_display_configuration *pp_display_cfg);
+void dce110_prepare_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
-uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
+void dce110_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index e3624ca24574..e33d11785b1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -31,6 +31,7 @@
#include "resource.h"
#include "dce110/dce110_resource.h"
+#include "dce/dce_clk_mgr.h"
#include "include/irq_service_interface.h"
#include "dce/dce_audio.h"
#include "dce110/dce110_timing_generator.h"
@@ -45,7 +46,6 @@
#include "dce110/dce110_transform_v.h"
#include "dce/dce_opp.h"
#include "dce110/dce110_opp_v.h"
-#include "dce/dce_clocks.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -760,8 +760,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.dccg != NULL)
- dce_dccg_destroy(&pool->base.dccg);
+ if (pool->base.clk_mgr != NULL)
+ dce_clk_mgr_destroy(&pool->base.clk_mgr);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -1173,12 +1173,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
&clks);
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+ clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+ clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+ clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
}
@@ -1201,7 +1201,6 @@ static bool construct(
struct dc_context *ctx = dc->ctx;
struct dc_firmware_info info;
struct dc_bios *bp;
- struct dm_pp_static_clock_info static_clk_info = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -1257,11 +1256,11 @@ static bool construct(
}
}
- pool->base.dccg = dce110_dccg_create(ctx,
+ pool->base.clk_mgr = dce110_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.dccg == NULL) {
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1287,13 +1286,6 @@ static bool construct(
goto res_create_fail;
}
- /* get static clock information for PPLIB or firmware, save
- * max_clock_state
- */
- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.dccg->max_clks_state =
- static_clk_info.max_clocks_state;
-
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
@@ -1362,7 +1354,8 @@ static bool construct(
pool->base.sw_i2cs[i] = NULL;
}
- dc->fbc_compressor = dce110_compressor_create(ctx);
+ if (dc->config.fbc_support)
+ dc->fbc_compressor = dce110_compressor_create(ctx);
if (!underlay_create(ctx, &pool->base))
goto res_create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 3ce79c208ddf..969d4e72dc94 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -35,6 +35,7 @@
#include "irq/dce110/irq_service_dce110.h"
+#include "dce/dce_clk_mgr.h"
#include "dce/dce_mem_input.h"
#include "dce/dce_transform.h"
#include "dce/dce_link_encoder.h"
@@ -42,7 +43,6 @@
#include "dce/dce_audio.h"
#include "dce/dce_opp.h"
#include "dce/dce_ipp.h"
-#include "dce/dce_clocks.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_hwseq.h"
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -551,7 +551,8 @@ static struct transform *dce112_transform_create(
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
- .ycbcr420_supported = true,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = false,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -749,8 +750,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.dccg != NULL)
- dce_dccg_destroy(&pool->base.dccg);
+ if (pool->base.clk_mgr != NULL)
+ dce_clk_mgr_destroy(&pool->base.clk_mgr);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -1015,12 +1016,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
&clks);
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+ clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+ clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+ clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
return;
@@ -1056,12 +1057,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
* YCLK = UMACLK*m_memoryTypeMultiplier
*/
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+ mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -1131,7 +1132,6 @@ static bool construct(
{
unsigned int i;
struct dc_context *ctx = dc->ctx;
- struct dm_pp_static_clock_info static_clk_info = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -1199,11 +1199,11 @@ static bool construct(
}
}
- pool->base.dccg = dce112_dccg_create(ctx,
+ pool->base.clk_mgr = dce112_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.dccg == NULL) {
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1229,13 +1229,6 @@ static bool construct(
goto res_create_fail;
}
- /* get static clock information for PPLIB or firmware, save
- * max_clock_state
- */
- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.dccg->max_clks_state =
- static_clk_info.max_clocks_state;
-
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 79ab5f9f9115..f12696674eb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -31,6 +31,7 @@
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dce120_resource.h"
+
#include "dce112/dce112_resource.h"
#include "dce110/dce110_resource.h"
@@ -39,7 +40,6 @@
#include "irq/dce120/irq_service_dce120.h"
#include "dce/dce_opp.h"
#include "dce/dce_clock_source.h"
-#include "dce/dce_clocks.h"
#include "dce/dce_ipp.h"
#include "dce/dce_mem_input.h"
@@ -47,6 +47,7 @@
#include "dce120/dce120_hw_sequencer.h"
#include "dce/dce_transform.h"
+#include "dce/dce_clk_mgr.h"
#include "dce/dce_audio.h"
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
@@ -573,8 +574,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.dccg != NULL)
- dce_dccg_destroy(&pool->base.dccg);
+ if (pool->base.clk_mgr != NULL)
+ dce_clk_mgr_destroy(&pool->base.clk_mgr);
}
static void read_dce_straps(
@@ -606,7 +607,8 @@ static struct audio *create_audio(
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
- .ycbcr420_supported = true,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = false,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -834,12 +836,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
* YCLK = UMACLK*m_memoryTypeMultiplier
*/
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+ mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -973,8 +975,8 @@ static bool construct(
}
}
- pool->base.dccg = dce120_dccg_create(ctx);
- if (pool->base.dccg == NULL) {
+ pool->base.clk_mgr = dce120_clk_mgr_create(ctx);
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto dccg_create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index 6c6a1a16af19..a60a90e68d91 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -76,6 +76,7 @@ void dce80_hw_sequencer_construct(struct dc *dc)
dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.pipe_control_lock = dce_pipe_control_lock;
- dc->hwss.set_bandwidth = dce100_set_bandwidth;
+ dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
+ dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index d68f951f9869..cdd1d6b7b9f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -37,14 +37,13 @@
#include "dce110/dce110_timing_generator.h"
#include "dce110/dce110_resource.h"
#include "dce80/dce80_timing_generator.h"
+#include "dce/dce_clk_mgr.h"
#include "dce/dce_mem_input.h"
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
-#include "dce/dce_mem_input.h"
#include "dce/dce_ipp.h"
#include "dce/dce_transform.h"
#include "dce/dce_opp.h"
-#include "dce/dce_clocks.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
@@ -155,15 +154,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -779,8 +778,8 @@ static void destruct(struct dce110_resource_pool *pool)
}
}
- if (pool->base.dccg != NULL)
- dce_dccg_destroy(&pool->base.dccg);
+ if (pool->base.clk_mgr != NULL)
+ dce_clk_mgr_destroy(&pool->base.clk_mgr);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -793,7 +792,7 @@ bool dce80_validate_bandwidth(
{
/* TODO implement when needed but for now hardcode max value*/
context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
return true;
}
@@ -855,7 +854,6 @@ static bool dce80_construct(
struct dc_context *ctx = dc->ctx;
struct dc_firmware_info info;
struct dc_bios *bp;
- struct dm_pp_static_clock_info static_clk_info = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -918,11 +916,11 @@ static bool dce80_construct(
}
}
- pool->base.dccg = dce_dccg_create(ctx,
+ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.dccg == NULL) {
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -948,10 +946,6 @@ static bool dce80_construct(
goto res_create_fail;
}
- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.dccg->max_clks_state =
- static_clk_info.max_clocks_state;
-
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
@@ -1065,7 +1059,6 @@ static bool dce81_construct(
struct dc_context *ctx = dc->ctx;
struct dc_firmware_info info;
struct dc_bios *bp;
- struct dm_pp_static_clock_info static_clk_info = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -1128,11 +1121,11 @@ static bool dce81_construct(
}
}
- pool->base.dccg = dce_dccg_create(ctx,
+ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.dccg == NULL) {
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1158,10 +1151,6 @@ static bool dce81_construct(
goto res_create_fail;
}
- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.dccg->max_clks_state =
- static_clk_info.max_clocks_state;
-
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
@@ -1275,7 +1264,6 @@ static bool dce83_construct(
struct dc_context *ctx = dc->ctx;
struct dc_firmware_info info;
struct dc_bios *bp;
- struct dm_pp_static_clock_info static_clk_info = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -1334,11 +1322,11 @@ static bool dce83_construct(
}
}
- pool->base.dccg = dce_dccg_create(ctx,
+ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.dccg == NULL) {
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1364,10 +1352,6 @@ static bool dce83_construct(
goto res_create_fail;
}
- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.dccg->max_clks_state =
- static_clk_info.max_clocks_state;
-
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 032f872be89c..55f293c8a3c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -24,7 +24,7 @@
DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
- dcn10_hubp.o dcn10_mpc.o \
+ dcn10_hubp.o dcn10_mpc.o dcn10_clk_mgr.o \
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
new file mode 100644
index 000000000000..54abedbf1b43
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn10_clk_mgr.h"
+
+#include "reg_helper.h"
+#include "core_types.h"
+
+#define TO_DCE_CLK_MGR(clocks)\
+ container_of(clocks, struct dce_clk_mgr, base)
+
+#define REG(reg) \
+ (clk_mgr_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
+
+#define CTX \
+ clk_mgr_dce->base.ctx
+#define DC_LOGGER \
+ clk_mgr->ctx->logger
+
+void dcn1_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
+ pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz;
+ pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
+ pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
+{
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+ bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz;
+ int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
+ bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz;
+
+ /* increase clock, looking for div is 0 for current, request div is 1*/
+ if (dispclk_increase) {
+ /* already divided by 2, no need to reach target clk with 2 steps*/
+ if (cur_dpp_div)
+ return new_clocks->dispclk_khz;
+
+ /* request disp clk is lower than maximum supported dpp clk,
+ * no need to reach target clk with two steps.
+ */
+ if (new_clocks->dispclk_khz <= disp_clk_threshold)
+ return new_clocks->dispclk_khz;
+
+ /* target dpp clk not request divided by 2, still within threshold */
+ if (!request_dpp_div)
+ return new_clocks->dispclk_khz;
+
+ } else {
+ /* decrease clock, looking for current dppclk divided by 2,
+ * request dppclk not divided by 2.
+ */
+
+ /* current dpp clk not divided by 2, no need to ramp*/
+ if (!cur_dpp_div)
+ return new_clocks->dispclk_khz;
+
+ /* current disp clk is lower than current maximum dpp clk,
+ * no need to ramp
+ */
+ if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold)
+ return new_clocks->dispclk_khz;
+
+ /* request dpp clk need to be divided by 2 */
+ if (request_dpp_div)
+ return new_clocks->dispclk_khz;
+ }
+
+ return disp_clk_threshold;
+}
+
+static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
+{
+ struct dc *dc = clk_mgr->ctx->dc;
+ int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks);
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+ int i;
+
+ /* set disp clk to dpp clk threshold */
+ dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold);
+
+ /* update request dpp clk division option */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+ pipe_ctx->plane_res.dpp,
+ request_dpp_div,
+ true);
+ }
+
+ /* If target clk not same as dppclk threshold, set to target clock */
+ if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
+ dce112_set_clock(clk_mgr, new_clocks->dispclk_khz);
+
+ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+ clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
+ clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
+}
+
+static int get_active_display_cnt(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i, display_count;
+
+ display_count = 0;
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_state *stream = context->streams[i];
+
+ /*
+ * Only notify active stream or virtual stream.
+ * Need to notify virtual stream to work around
+ * headless case. HPD does not fire when system is in
+ * S0i2.
+ */
+ if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
+ display_count++;
+ }
+
+ return display_count;
+}
+
+static void notify_deep_sleep_dcfclk_to_smu(
+ struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz)
+{
+ int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz
+ /*
+ * if function pointer not set up, this message is
+ * sent as part of pplib_apply_display_requirements.
+ * So just return.
+ */
+ if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk)
+ return;
+
+ min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up
+ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz);
+}
+
+static void notify_hard_min_dcfclk_to_smu(
+ struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz)
+{
+ int min_dcf_clk_mhz; //minimum required DCF clock in mhz
+
+ /*
+ * if function pointer not set up, this message is
+ * sent as part of pplib_apply_display_requirements.
+ * So just return.
+ */
+ if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq)
+ return;
+
+ min_dcf_clk_mhz = min_dcf_clk_khz / 1000;
+
+ pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz);
+}
+
+static void notify_hard_min_fclk_to_smu(
+ struct pp_smu_funcs_rv *pp_smu, int min_f_clk_khz)
+{
+ int min_f_clk_mhz; //minimum required F clock in mhz
+
+ /*
+ * if function pointer not set up, this message is
+ * sent as part of pplib_apply_display_requirements.
+ * So just return.
+ */
+ if (!pp_smu || !pp_smu->set_hard_min_fclk_by_freq)
+ return;
+
+ min_f_clk_mhz = min_f_clk_khz / 1000;
+
+ pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, min_f_clk_mhz);
+}
+
+static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct dc *dc = clk_mgr->ctx->dc;
+ struct dc_clocks *new_clocks = &context->bw.dcn.clk;
+ struct pp_smu_display_requirement_rv *smu_req_cur =
+ &dc->res_pool->pp_smu_req;
+ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ uint32_t requested_dcf_clock_in_khz = 0;
+ bool send_request_to_increase = false;
+ bool send_request_to_lower = false;
+ int display_count;
+
+ bool enter_display_off = false;
+
+ display_count = get_active_display_cnt(dc, context);
+
+ if (display_count == 0)
+ enter_display_off = true;
+
+ if (enter_display_off == safe_to_lower) {
+ /*
+ * Notify SMU active displays
+ * if function pointer not set up, this message is
+ * sent as part of pplib_apply_display_requirements.
+ */
+ if (pp_smu->set_display_count)
+ pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
+ else
+ smu_req.display_count = display_count;
+
+ }
+
+ if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
+ || new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz
+ || new_clocks->fclk_khz > clk_mgr->clks.fclk_khz
+ || new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz)
+ send_request_to_increase = true;
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
+ clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ // F Clock
+ if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
+ clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
+ smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000;
+
+ notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz);
+
+ send_request_to_lower = true;
+ }
+
+ //DCF Clock
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
+ clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000;
+
+ send_request_to_lower = true;
+ }
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
+ clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz / 1000;
+
+ send_request_to_lower = true;
+ }
+
+ /* make sure dcf clk is before dpp clk to
+ * make sure we have enough voltage to run dpp clk
+ */
+ if (send_request_to_increase) {
+ /*use dcfclk to request voltage*/
+ requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+
+ notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz);
+
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+ notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
+ dcn1_pplib_apply_display_requirements(dc, context);
+ }
+
+ /* dcn1 dppclk is tied to dispclk */
+ /* program dispclk on = as a w/a for sleep resume clock ramping issues */
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)
+ || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) {
+ dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks);
+ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ if (!send_request_to_increase && send_request_to_lower) {
+ /*use dcfclk to request voltage*/
+ requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+
+ notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz);
+
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+ notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
+ dcn1_pplib_apply_display_requirements(dc, context);
+ }
+
+
+ *smu_req_cur = smu_req;
+}
+static const struct clk_mgr_funcs dcn1_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dcn1_update_clocks
+};
+struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
+{
+ struct dc_debug_options *debug = &ctx->dc->debug;
+ struct dc_bios *bp = ctx->dc_bios;
+ struct dc_firmware_info fw_info = { { 0 } };
+ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+ if (clk_mgr_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ clk_mgr_dce->base.ctx = ctx;
+ clk_mgr_dce->base.funcs = &dcn1_funcs;
+
+ clk_mgr_dce->dfs_bypass_disp_clk = 0;
+
+ clk_mgr_dce->dprefclk_ss_percentage = 0;
+ clk_mgr_dce->dprefclk_ss_divider = 1000;
+ clk_mgr_dce->ss_on_dprefclk = false;
+
+ clk_mgr_dce->dprefclk_khz = 600000;
+ if (bp->integrated_info)
+ clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
+ bp->funcs->get_firmware_info(bp, &fw_info);
+ clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr_dce->dentist_vco_freq_khz == 0)
+ clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_mgr_dce->dfs_bypass_enabled = true;
+
+ dce_clock_read_ss_info(clk_mgr_dce);
+
+ return &clk_mgr_dce->base;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
new file mode 100644
index 000000000000..a995eda443a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN10_CLK_MGR_H__
+#define __DCN10_CLK_MGR_H__
+
+#include "../dce/dce_clk_mgr.h"
+
+struct clk_bypass {
+ uint32_t dcfclk_bypass;
+ uint32_t dispclk_pypass;
+ uint32_t dprefclk_bypass;
+};
+
+void dcn1_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context);
+
+struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx);
+
+#endif //__DCN10_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 5d95a997fd9f..7469333a2c8a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -71,39 +71,39 @@ void cm_helper_program_xfer_func(
unsigned int i = 0;
REG_SET_2(reg->start_cntl_b, 0,
- exp_region_start, params->arr_points[0].custom_float_x,
+ exp_region_start, params->corner_points[0].blue.custom_float_x,
exp_resion_start_segment, 0);
REG_SET_2(reg->start_cntl_g, 0,
- exp_region_start, params->arr_points[0].custom_float_x,
+ exp_region_start, params->corner_points[0].green.custom_float_x,
exp_resion_start_segment, 0);
REG_SET_2(reg->start_cntl_r, 0,
- exp_region_start, params->arr_points[0].custom_float_x,
+ exp_region_start, params->corner_points[0].red.custom_float_x,
exp_resion_start_segment, 0);
REG_SET(reg->start_slope_cntl_b, 0,
- field_region_linear_slope, params->arr_points[0].custom_float_slope);
+ field_region_linear_slope, params->corner_points[0].blue.custom_float_slope);
REG_SET(reg->start_slope_cntl_g, 0,
- field_region_linear_slope, params->arr_points[0].custom_float_slope);
+ field_region_linear_slope, params->corner_points[0].green.custom_float_slope);
REG_SET(reg->start_slope_cntl_r, 0,
- field_region_linear_slope, params->arr_points[0].custom_float_slope);
+ field_region_linear_slope, params->corner_points[0].red.custom_float_slope);
REG_SET(reg->start_end_cntl1_b, 0,
- field_region_end, params->arr_points[1].custom_float_x);
+ field_region_end, params->corner_points[1].blue.custom_float_x);
REG_SET_2(reg->start_end_cntl2_b, 0,
- field_region_end_slope, params->arr_points[1].custom_float_slope,
- field_region_end_base, params->arr_points[1].custom_float_y);
+ field_region_end_slope, params->corner_points[1].blue.custom_float_slope,
+ field_region_end_base, params->corner_points[1].blue.custom_float_y);
REG_SET(reg->start_end_cntl1_g, 0,
- field_region_end, params->arr_points[1].custom_float_x);
+ field_region_end, params->corner_points[1].green.custom_float_x);
REG_SET_2(reg->start_end_cntl2_g, 0,
- field_region_end_slope, params->arr_points[1].custom_float_slope,
- field_region_end_base, params->arr_points[1].custom_float_y);
+ field_region_end_slope, params->corner_points[1].green.custom_float_slope,
+ field_region_end_base, params->corner_points[1].green.custom_float_y);
REG_SET(reg->start_end_cntl1_r, 0,
- field_region_end, params->arr_points[1].custom_float_x);
+ field_region_end, params->corner_points[1].red.custom_float_x);
REG_SET_2(reg->start_end_cntl2_r, 0,
- field_region_end_slope, params->arr_points[1].custom_float_slope,
- field_region_end_base, params->arr_points[1].custom_float_y);
+ field_region_end_slope, params->corner_points[1].red.custom_float_slope,
+ field_region_end_base, params->corner_points[1].red.custom_float_y);
for (reg_region_cur = reg->region_start;
reg_region_cur <= reg->region_end;
@@ -127,7 +127,7 @@ void cm_helper_program_xfer_func(
bool cm_helper_convert_to_custom_float(
struct pwl_result_data *rgb_resulted,
- struct curve_points *arr_points,
+ struct curve_points3 *corner_points,
uint32_t hw_points_num,
bool fixpoint)
{
@@ -141,20 +141,53 @@ bool cm_helper_convert_to_custom_float(
fmt.mantissa_bits = 12;
fmt.sign = false;
- if (!convert_to_custom_float_format(arr_points[0].x, &fmt,
- &arr_points[0].custom_float_x)) {
+ /* corner_points[0] - beginning base, slope offset for R,G,B
+ * corner_points[1] - end base, slope offset for R,G,B
+ */
+ if (!convert_to_custom_float_format(corner_points[0].red.x, &fmt,
+ &corner_points[0].red.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].green.x, &fmt,
+ &corner_points[0].green.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].blue.x, &fmt,
+ &corner_points[0].blue.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(arr_points[0].offset, &fmt,
- &arr_points[0].custom_float_offset)) {
+ if (!convert_to_custom_float_format(corner_points[0].red.offset, &fmt,
+ &corner_points[0].red.custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].green.offset, &fmt,
+ &corner_points[0].green.custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].blue.offset, &fmt,
+ &corner_points[0].blue.custom_float_offset)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(arr_points[0].slope, &fmt,
- &arr_points[0].custom_float_slope)) {
+ if (!convert_to_custom_float_format(corner_points[0].red.slope, &fmt,
+ &corner_points[0].red.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].green.slope, &fmt,
+ &corner_points[0].green.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].blue.slope, &fmt,
+ &corner_points[0].blue.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
@@ -162,22 +195,59 @@ bool cm_helper_convert_to_custom_float(
fmt.mantissa_bits = 10;
fmt.sign = false;
- if (!convert_to_custom_float_format(arr_points[1].x, &fmt,
- &arr_points[1].custom_float_x)) {
+ if (!convert_to_custom_float_format(corner_points[1].red.x, &fmt,
+ &corner_points[1].red.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
-
- if (fixpoint == true)
- arr_points[1].custom_float_y = dc_fixpt_clamp_u0d14(arr_points[1].y);
- else if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
- &arr_points[1].custom_float_y)) {
+ if (!convert_to_custom_float_format(corner_points[1].green.x, &fmt,
+ &corner_points[1].green.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].blue.x, &fmt,
+ &corner_points[1].blue.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(arr_points[1].slope, &fmt,
- &arr_points[1].custom_float_slope)) {
+ if (fixpoint == true) {
+ corner_points[1].red.custom_float_y =
+ dc_fixpt_clamp_u0d14(corner_points[1].red.y);
+ corner_points[1].green.custom_float_y =
+ dc_fixpt_clamp_u0d14(corner_points[1].green.y);
+ corner_points[1].blue.custom_float_y =
+ dc_fixpt_clamp_u0d14(corner_points[1].blue.y);
+ } else {
+ if (!convert_to_custom_float_format(corner_points[1].red.y,
+ &fmt, &corner_points[1].red.custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].green.y,
+ &fmt, &corner_points[1].green.custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].blue.y,
+ &fmt, &corner_points[1].blue.custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ }
+
+ if (!convert_to_custom_float_format(corner_points[1].red.slope, &fmt,
+ &corner_points[1].red.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].green.slope, &fmt,
+ &corner_points[1].green.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].blue.slope, &fmt,
+ &corner_points[1].blue.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
@@ -242,15 +312,10 @@ bool cm_helper_translate_curve_to_hw_format(
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params, bool fixpoint)
{
- struct curve_points *arr_points;
+ struct curve_points3 *corner_points;
struct pwl_result_data *rgb_resulted;
struct pwl_result_data *rgb;
struct pwl_result_data *rgb_plus_1;
- struct fixed31_32 y_r;
- struct fixed31_32 y_g;
- struct fixed31_32 y_b;
- struct fixed31_32 y1_min;
- struct fixed31_32 y3_max;
int32_t region_start, region_end;
int32_t i;
@@ -259,16 +324,16 @@ bool cm_helper_translate_curve_to_hw_format(
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
- PERF_TRACE();
+ PERF_TRACE_CTX(output_tf->ctx);
- arr_points = lut_params->arr_points;
+ corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
hw_points = 0;
memset(lut_params, 0, sizeof(struct pwl_params));
memset(seg_distr, 0, sizeof(seg_distr));
- if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_GAMMA22) {
/* 32 segments
* segments are from 2^-25 to 2^7
*/
@@ -327,31 +392,37 @@ bool cm_helper_translate_curve_to_hw_format(
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
- arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ // All 3 color channels have same x
+ corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
- arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
- dc_fixpt_from_int(region_end));
+ corner_points[0].green.x = corner_points[0].red.x;
+ corner_points[0].blue.x = corner_points[0].red.x;
- y_r = rgb_resulted[0].red;
- y_g = rgb_resulted[0].green;
- y_b = rgb_resulted[0].blue;
+ corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ dc_fixpt_from_int(region_end));
+ corner_points[1].green.x = corner_points[1].red.x;
+ corner_points[1].blue.x = corner_points[1].red.x;
- y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
+ corner_points[0].red.y = rgb_resulted[0].red;
+ corner_points[0].green.y = rgb_resulted[0].green;
+ corner_points[0].blue.y = rgb_resulted[0].blue;
- arr_points[0].y = y1_min;
- arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
- y_r = rgb_resulted[hw_points - 1].red;
- y_g = rgb_resulted[hw_points - 1].green;
- y_b = rgb_resulted[hw_points - 1].blue;
+ corner_points[0].red.slope = dc_fixpt_div(corner_points[0].red.y,
+ corner_points[0].red.x);
+ corner_points[0].green.slope = dc_fixpt_div(corner_points[0].green.y,
+ corner_points[0].green.x);
+ corner_points[0].blue.slope = dc_fixpt_div(corner_points[0].blue.y,
+ corner_points[0].blue.x);
/* see comment above, m_arrPoints[1].y should be the Y value for the
* region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
*/
- y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
-
- arr_points[1].y = y3_max;
-
- arr_points[1].slope = dc_fixpt_zero;
+ corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+ corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+ corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+ corner_points[1].red.slope = dc_fixpt_zero;
+ corner_points[1].green.slope = dc_fixpt_zero;
+ corner_points[1].blue.slope = dc_fixpt_zero;
if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
/* for PQ, we want to have a straight line from last HW X point,
@@ -360,9 +431,15 @@ bool cm_helper_translate_curve_to_hw_format(
const struct fixed31_32 end_value =
dc_fixpt_from_int(125);
- arr_points[1].slope = dc_fixpt_div(
- dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
- dc_fixpt_sub(end_value, arr_points[1].x));
+ corner_points[1].red.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+ dc_fixpt_sub(end_value, corner_points[1].red.x));
+ corner_points[1].green.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+ dc_fixpt_sub(end_value, corner_points[1].green.x));
+ corner_points[1].blue.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+ dc_fixpt_sub(end_value, corner_points[1].blue.x));
}
lut_params->hw_points_num = hw_points;
@@ -411,7 +488,7 @@ bool cm_helper_translate_curve_to_hw_format(
++i;
}
cm_helper_convert_to_custom_float(rgb_resulted,
- lut_params->arr_points,
+ lut_params->corner_points,
hw_points, fixpoint);
return true;
@@ -424,15 +501,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params)
{
- struct curve_points *arr_points;
+ struct curve_points3 *corner_points;
struct pwl_result_data *rgb_resulted;
struct pwl_result_data *rgb;
struct pwl_result_data *rgb_plus_1;
- struct fixed31_32 y_r;
- struct fixed31_32 y_g;
- struct fixed31_32 y_b;
- struct fixed31_32 y1_min;
- struct fixed31_32 y3_max;
int32_t region_start, region_end;
int32_t i;
@@ -441,9 +513,9 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
- PERF_TRACE();
+ PERF_TRACE_CTX(output_tf->ctx);
- arr_points = lut_params->arr_points;
+ corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
hw_points = 0;
@@ -489,31 +561,28 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
- arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
- arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ corner_points[0].green.x = corner_points[0].red.x;
+ corner_points[0].blue.x = corner_points[0].red.x;
+ corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_end));
+ corner_points[1].green.x = corner_points[1].red.x;
+ corner_points[1].blue.x = corner_points[1].red.x;
- y_r = rgb_resulted[0].red;
- y_g = rgb_resulted[0].green;
- y_b = rgb_resulted[0].blue;
-
- y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
-
- arr_points[0].y = y1_min;
- arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
- y_r = rgb_resulted[hw_points - 1].red;
- y_g = rgb_resulted[hw_points - 1].green;
- y_b = rgb_resulted[hw_points - 1].blue;
+ corner_points[0].red.y = rgb_resulted[0].red;
+ corner_points[0].green.y = rgb_resulted[0].green;
+ corner_points[0].blue.y = rgb_resulted[0].blue;
/* see comment above, m_arrPoints[1].y should be the Y value for the
* region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
*/
- y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
-
- arr_points[1].y = y3_max;
-
- arr_points[1].slope = dc_fixpt_zero;
+ corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+ corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+ corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+ corner_points[1].red.slope = dc_fixpt_zero;
+ corner_points[1].green.slope = dc_fixpt_zero;
+ corner_points[1].blue.slope = dc_fixpt_zero;
if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
/* for PQ, we want to have a straight line from last HW X point,
@@ -522,9 +591,15 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
const struct fixed31_32 end_value =
dc_fixpt_from_int(125);
- arr_points[1].slope = dc_fixpt_div(
- dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
- dc_fixpt_sub(end_value, arr_points[1].x));
+ corner_points[1].red.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+ dc_fixpt_sub(end_value, corner_points[1].red.x));
+ corner_points[1].green.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+ dc_fixpt_sub(end_value, corner_points[1].green.x));
+ corner_points[1].blue.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+ dc_fixpt_sub(end_value, corner_points[1].blue.x));
}
lut_params->hw_points_num = hw_points;
@@ -564,7 +639,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
++i;
}
cm_helper_convert_to_custom_float(rgb_resulted,
- lut_params->arr_points,
+ lut_params->corner_points,
hw_points, false);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 7a531b02871f..5ae4d69391a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -98,7 +98,7 @@ void cm_helper_program_xfer_func(
bool cm_helper_convert_to_custom_float(
struct pwl_result_data *rgb_resulted,
- struct curve_points *arr_points,
+ struct curve_points3 *corner_points,
uint32_t hw_points_num,
bool fixpoint);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 4254e7e1a509..c7d1e678ebf5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -100,7 +100,7 @@ bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
- return true ? false : enable;
+ return enable ? true : false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 74132a1f3046..345af015d061 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -99,6 +99,14 @@ static unsigned int hubp1_get_underflow_status(struct hubp *hubp)
return hubp_underflow;
}
+
+void hubp1_clear_underflow(struct hubp *hubp)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, 1);
+}
+
static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
@@ -565,19 +573,6 @@ void hubp1_program_deadline(
REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler,
DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler);
- if (REG(PREFETCH_SETTINS))
- REG_SET_2(PREFETCH_SETTINS, 0,
- DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
- VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
- else
- REG_SET_2(PREFETCH_SETTINGS, 0,
- DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
- VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
-
- REG_SET_2(VBLANK_PARAMETERS_0, 0,
- DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
- DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
-
REG_SET(REF_FREQ_TO_PIX_FREQ, 0,
REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq);
@@ -585,9 +580,6 @@ void hubp1_program_deadline(
REG_SET(VBLANK_PARAMETERS_1, 0,
REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l);
- REG_SET(VBLANK_PARAMETERS_3, 0,
- REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
-
if (REG(NOM_PARAMETERS_0))
REG_SET(NOM_PARAMETERS_0, 0,
DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
@@ -602,27 +594,13 @@ void hubp1_program_deadline(
REG_SET(NOM_PARAMETERS_5, 0,
REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l);
- REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
- REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
- REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
-
REG_SET_2(PER_LINE_DELIVERY, 0,
REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l,
REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c);
- if (REG(PREFETCH_SETTINS_C))
- REG_SET(PREFETCH_SETTINS_C, 0,
- VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
- else
- REG_SET(PREFETCH_SETTINGS_C, 0,
- VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
-
REG_SET(VBLANK_PARAMETERS_2, 0,
REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c);
- REG_SET(VBLANK_PARAMETERS_4, 0,
- REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
-
if (REG(NOM_PARAMETERS_2))
REG_SET(NOM_PARAMETERS_2, 0,
DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
@@ -642,10 +620,6 @@ void hubp1_program_deadline(
QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm,
QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm);
- REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
- MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
- QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
-
/* TTU - per luma/chroma */
/* Assumed surf0 is luma and 1 is chroma */
@@ -654,25 +628,15 @@ void hubp1_program_deadline(
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l);
- REG_SET(DCN_SURF0_TTU_CNTL1, 0,
- REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_l);
-
REG_SET_3(DCN_SURF1_TTU_CNTL0, 0,
REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c,
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c);
- REG_SET(DCN_SURF1_TTU_CNTL1, 0,
- REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_c);
-
REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
- REG_SET(DCN_CUR0_TTU_CNTL1, 0,
- REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
}
static void hubp1_setup(
@@ -690,6 +654,48 @@ static void hubp1_setup(
hubp1_vready_workaround(hubp, pipe_dest);
}
+static void hubp1_setup_interdependent(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_SET_2(PREFETCH_SETTINS, 0,
+ DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+
+ REG_SET(PREFETCH_SETTINS_C, 0,
+ VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+
+ REG_SET_2(VBLANK_PARAMETERS_0, 0,
+ DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
+
+ REG_SET(VBLANK_PARAMETERS_3, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+ REG_SET(VBLANK_PARAMETERS_4, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+ REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
+
+ REG_SET(DCN_SURF0_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_l);
+ REG_SET(DCN_SURF1_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_c);
+ REG_SET(DCN_CUR0_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
+
+ REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
+ MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
+ QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
+}
+
bool hubp1_is_flip_pending(struct hubp *hubp)
{
uint32_t flip_pending = 0;
@@ -1178,6 +1184,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
hubp1_program_surface_config,
.hubp_is_flip_pending = hubp1_is_flip_pending,
.hubp_setup = hubp1_setup,
+ .hubp_setup_interdependent = hubp1_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp1_set_vm_system_aperture_settings,
.hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
.set_blank = hubp1_set_blank,
@@ -1190,6 +1197,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_clk_cntl = hubp1_clk_cntl,
.hubp_vtg_sel = hubp1_vtg_sel,
.hubp_read_state = hubp1_read_state,
+ .hubp_clear_underflow = hubp1_clear_underflow,
.hubp_disable_control = hubp1_disable_control,
.hubp_get_underflow_status = hubp1_get_underflow_status,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 4890273b632b..62d4232e7796 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -251,6 +251,7 @@
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
@@ -435,6 +436,7 @@
type HUBP_NO_OUTSTANDING_REQ;\
type HUBP_VTG_SEL;\
type HUBP_UNDERFLOW_STATUS;\
+ type HUBP_UNDERFLOW_CLEAR;\
type NUM_PIPES;\
type NUM_BANKS;\
type PIPE_INTERLEAVE;\
@@ -739,6 +741,7 @@ void dcn10_hubp_construct(
const struct dcn_mi_mask *hubp_mask);
void hubp1_read_state(struct hubp *hubp);
+void hubp1_clear_underflow(struct hubp *hubp);
enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 193184affefb..0bd33a713836 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -45,6 +45,7 @@
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
#include "dc_link_dp.h"
+#include "dccg.h"
#define DC_LOGGER_INIT(logger)
@@ -786,7 +787,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
&dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
- if (hubp != NULL) {
+ if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
/* one pipe underflow, we will reset all the pipes*/
need_recover = true;
@@ -812,7 +813,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
- if (hubp != NULL)
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
hubp->funcs->set_hubp_blank_en(hubp, true);
}
}
@@ -825,7 +826,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_DISABLE=1*/
- if (hubp != NULL)
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
hubp->funcs->hubp_disable_control(hubp, true);
}
}
@@ -835,7 +836,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_DISABLE=0*/
- if (hubp != NULL)
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
hubp->funcs->hubp_disable_control(hubp, true);
}
}
@@ -847,7 +848,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
- if (hubp != NULL)
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
hubp->funcs->set_hubp_blank_en(hubp, true);
}
}
@@ -1126,7 +1127,7 @@ static void dcn10_init_hw(struct dc *dc)
enable_power_gating_plane(dc->hwseq, true);
- memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks));
+ memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
}
static void reset_hw_ctx_wrap(
@@ -1226,7 +1227,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
tf = plane_state->in_transfer_func;
if (plane_state->gamma_correction &&
- !plane_state->gamma_correction->is_identity
+ !dpp_base->ctx->dc->debug.always_use_regamma
+ && !plane_state->gamma_correction->is_identity
&& dce_use_lut(plane_state->format))
dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
@@ -1399,7 +1401,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
grouped_pipes[i]->stream_res.tg,
- grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
+ 0,
&grouped_pipes[i]->stream->triggered_crtc_reset);
DC_SYNC_INFO("Waiting for trigger\n");
@@ -1603,7 +1605,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
}
-static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
+void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
struct vm_system_aperture_param apt = { {{ 0 } } };
@@ -1703,33 +1705,22 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
}
-
-static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
+static void dcn10_program_output_csc(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
- uint16_t *matrix)
+ uint16_t *matrix,
+ int opp_id)
{
if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
- if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
- pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
+ if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
} else {
if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
}
}
-static void dcn10_program_output_csc(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum dc_color_space colorspace,
- uint16_t *matrix,
- int opp_id)
-{
- if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
- program_csc_matrix(pipe_ctx,
- colorspace,
- matrix);
-}
-
-static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state->visible)
return true;
@@ -1738,7 +1729,7 @@ static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
return false;
}
-static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state->visible)
return true;
@@ -1747,7 +1738,7 @@ static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
return false;
}
-static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state->visible)
return true;
@@ -1780,7 +1771,7 @@ bool is_rgb_cspace(enum dc_color_space output_color_space)
}
}
-static void dcn10_get_surface_visual_confirm_color(
+void dcn10_get_surface_visual_confirm_color(
const struct pipe_ctx *pipe_ctx,
struct tg_color *color)
{
@@ -1816,7 +1807,7 @@ static void dcn10_get_surface_visual_confirm_color(
}
}
-static void dcn10_get_hdr_visual_confirm_color(
+void dcn10_get_hdr_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
{
@@ -1943,10 +1934,6 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
-
-
- /* TODO: proper fix once fpga works */
-
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
dcn10_get_hdr_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
@@ -2026,8 +2013,6 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
bool per_pixel_alpha =
pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
- /* TODO: proper fix once fpga works */
-
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
/* scaler configuration */
@@ -2035,7 +2020,7 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
}
-static void update_dchubp_dpp(
+void update_dchubp_dpp(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -2052,16 +2037,22 @@ static void update_dchubp_dpp(
*/
if (plane_state->update_flags.bits.full_update) {
bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
- dc->res_pool->dccg->clks.dispclk_khz / 2;
+ dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
dpp,
should_divided_by_2,
true);
- dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
- dc->res_pool->dccg->clks.dispclk_khz / 2 :
- dc->res_pool->dccg->clks.dispclk_khz;
+ if (dc->res_pool->dccg)
+ dc->res_pool->dccg->funcs->update_dpp_dto(
+ dc->res_pool->dccg,
+ dpp->inst,
+ pipe_ctx->plane_res.bw.calc.dppclk_khz);
+ else
+ dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
+ dc->res_pool->clk_mgr->clks.dispclk_khz / 2 :
+ dc->res_pool->clk_mgr->clks.dispclk_khz;
}
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -2077,6 +2068,10 @@ static void update_dchubp_dpp(
&pipe_ctx->ttu_regs,
&pipe_ctx->rq_regs,
&pipe_ctx->pipe_dlg_param);
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
}
size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport;
@@ -2182,7 +2177,7 @@ static void dcn10_blank_pixel_data(
}
}
-static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
+void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
{
struct fixed31_32 multiplier = dc_fixpt_from_fraction(
pipe_ctx->plane_state->sdr_white_level, 80);
@@ -2257,47 +2252,7 @@ static void program_all_pipe_in_tree(
}
}
-static void dcn10_pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context)
-{
- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
- pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
- pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
- pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
- pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
- dce110_fill_display_configs(context, pp_display_cfg);
-
- if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
- struct dm_pp_display_configuration)) != 0)
- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
- dc->prev_display_config = *pp_display_cfg;
-}
-
-static void optimize_shared_resources(struct dc *dc)
-{
- if (dc->current_state->stream_count == 0) {
- /* S0i2 message */
- dcn10_pplib_apply_display_requirements(dc, dc->current_state);
- }
-
- if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
- dcn_bw_notify_pplib_of_wm_ranges(dc);
-}
-
-static void ready_shared_resources(struct dc *dc, struct dc_state *context)
-{
- /* S0i2 message */
- if (dc->current_state->stream_count == 0 &&
- context->stream_count != 0)
- dcn10_pplib_apply_display_requirements(dc, context);
-}
-
-static struct pipe_ctx *find_top_pipe_for_stream(
+struct pipe_ctx *find_top_pipe_for_stream(
struct dc *dc,
struct dc_state *context,
const struct dc_stream_state *stream)
@@ -2387,6 +2342,32 @@ static void dcn10_apply_ctx_for_surface(
dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+ if (top_pipe_to_program->plane_state &&
+ top_pipe_to_program->plane_state->update_flags.bits.full_update)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ /* Skip inactive pipes and ones already updated */
+ if (!pipe_ctx->stream || pipe_ctx->stream == stream)
+ continue;
+
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+
+ pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
+ pipe_ctx->plane_res.hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->stream || pipe_ctx->stream == stream)
+ continue;
+
+ dcn10_pipe_control_lock(dc, pipe_ctx, false);
+ }
+
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
@@ -2398,10 +2379,9 @@ static void dcn10_apply_ctx_for_surface(
hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
}
-static void dcn10_set_bandwidth(
+static void dcn10_prepare_bandwidth(
struct dc *dc,
- struct dc_state *context,
- bool safe_to_lower)
+ struct dc_state *context)
{
if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
@@ -2410,12 +2390,39 @@ static void dcn10_set_bandwidth(
if (context->stream_count == 0)
context->bw.dcn.clk.phyclk_khz = 0;
- dc->res_pool->dccg->funcs->update_clocks(
- dc->res_pool->dccg,
- &context->bw.dcn.clk,
- safe_to_lower);
+ dc->res_pool->clk_mgr->funcs->update_clocks(
+ dc->res_pool->clk_mgr,
+ context,
+ false);
+ }
+
+ hubbub1_program_watermarks(dc->res_pool->hubbub,
+ &context->bw.dcn.watermarks,
+ dc->res_pool->ref_clock_inKhz / 1000,
+ true);
+
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+ dcn_bw_notify_pplib_of_wm_ranges(dc);
+
+ if (dc->debug.sanity_checks)
+ dcn10_verify_allow_pstate_change_high(dc);
+}
+
+static void dcn10_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (dc->debug.sanity_checks)
+ dcn10_verify_allow_pstate_change_high(dc);
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (context->stream_count == 0)
+ context->bw.dcn.clk.phyclk_khz = 0;
- dcn10_pplib_apply_display_requirements(dc, context);
+ dc->res_pool->clk_mgr->funcs->update_clocks(
+ dc->res_pool->clk_mgr,
+ context,
+ true);
}
hubbub1_program_watermarks(dc->res_pool->hubbub,
@@ -2423,6 +2430,9 @@ static void dcn10_set_bandwidth(
dc->res_pool->ref_clock_inKhz / 1000,
true);
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+ dcn_bw_notify_pplib_of_wm_ranges(dc);
+
if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
}
@@ -2694,7 +2704,6 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
- .program_csc_matrix = program_csc_matrix,
.init_hw = dcn10_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
@@ -2721,7 +2730,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.disable_plane = dcn10_disable_plane,
.blank_pixel_data = dcn10_blank_pixel_data,
.pipe_control_lock = dcn10_pipe_control_lock,
- .set_bandwidth = dcn10_set_bandwidth,
+ .prepare_bandwidth = dcn10_prepare_bandwidth,
+ .optimize_bandwidth = dcn10_optimize_bandwidth,
.reset_hw_ctx_wrap = reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.set_drr = set_drr,
@@ -2731,11 +2741,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_avmute = dce110_set_avmute,
.log_hw_state = dcn10_log_hw_state,
.get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
- .ready_shared_resources = ready_shared_resources,
- .optimize_shared_resources = optimize_shared_resources,
- .pplib_apply_display_requirements =
- dcn10_pplib_apply_display_requirements,
.edp_backlight_control = hwss_edp_backlight_control,
.edp_power_control = hwss_edp_power_control,
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 84d461e0ed3e..f8eea10e4c64 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -51,4 +51,34 @@ void dcn10_get_hw_state(
char *pBuf, unsigned int bufSize,
unsigned int mask);
+void dcn10_clear_status_bits(struct dc *dc, unsigned int mask);
+
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp);
+
+void set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
+
+void dcn10_get_surface_visual_confirm_color(
+ const struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
+void dcn10_get_hdr_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
+void update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+
+struct pipe_ctx *find_top_pipe_for_stream(
+ struct dc *dc,
+ struct dc_state *context,
+ const struct dc_stream_state *stream);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 64158900730f..cd469014baa3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -44,6 +44,7 @@
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
+#include "dcn10_clk_mgr.h"
static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
{
@@ -454,12 +455,6 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
remaining_buffer -= chars_printed;
pBuf += chars_printed;
-
- // Clear underflow for debug purposes
- // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
- // This function is called only from Windows or Diags test environment, hence it's safe to clear
- // it from here without affecting the original intent.
- tg->funcs->clear_optc_underflow(tg);
}
}
@@ -469,19 +464,75 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned int bufSize)
{
unsigned int chars_printed = 0;
+ unsigned int remaining_buffer = bufSize;
- chars_printed = snprintf_count(pBuf, bufSize, "dcfclk_khz,dcfclk_deep_sleep_khz,dispclk_khz,"
- "dppclk_khz,max_supported_dppclk_khz,fclk_khz,socclk_khz\n"
- "%d,%d,%d,%d,%d,%d,%d\n",
+ chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk,"
+ "dppclk,fclk,socclk\n"
+ "%d,%d,%d,%d,%d,%d\n",
dc->current_state->bw.dcn.clk.dcfclk_khz,
dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
dc->current_state->bw.dcn.clk.dispclk_khz,
dc->current_state->bw.dcn.clk.dppclk_khz,
- dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
dc->current_state->bw.dcn.clk.fclk_khz,
dc->current_state->bw.dcn.clk.socclk_khz);
- return chars_printed;
+ remaining_buffer -= chars_printed;
+ pBuf += chars_printed;
+
+ return bufSize - remaining_buffer;
+}
+
+static void dcn10_clear_otpc_underflow(struct dc *dc)
+{
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ for (i = 0; i < pool->timing_generator_count; i++) {
+ struct timing_generator *tg = pool->timing_generators[i];
+ struct dcn_otg_state s = {0};
+
+ optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+
+ if (s.otg_enabled & 1)
+ tg->funcs->clear_optc_underflow(tg);
+ }
+}
+
+static void dcn10_clear_hubp_underflow(struct dc *dc)
+{
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct hubp *hubp = pool->hubps[i];
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
+
+ hubp->funcs->hubp_read_state(hubp);
+
+ if (!s->blank_en)
+ hubp->funcs->hubp_clear_underflow(hubp);
+ }
+}
+
+void dcn10_clear_status_bits(struct dc *dc, unsigned int mask)
+{
+ /*
+ * Mask Format
+ * Bit 0 - 31: Status bit to clear
+ *
+ * Mask = 0x0 means clear all status bits
+ */
+ const unsigned int DC_HW_STATE_MASK_HUBP_UNDERFLOW = 0x1;
+ const unsigned int DC_HW_STATE_MASK_OTPC_UNDERFLOW = 0x2;
+
+ if (mask == 0x0)
+ mask = 0xFFFFFFFF;
+
+ if (mask & DC_HW_STATE_MASK_HUBP_UNDERFLOW)
+ dcn10_clear_hubp_underflow(dc);
+
+ if (mask & DC_HW_STATE_MASK_OTPC_UNDERFLOW)
+ dcn10_clear_otpc_underflow(dc);
}
void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask)
@@ -491,16 +542,16 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
* Bit 0 - 15: Hardware block mask
* Bit 15: 1 = Invariant Only, 0 = All
*/
- const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1;
- const unsigned int DC_HW_STATE_MASK_HUBP = 0x2;
- const unsigned int DC_HW_STATE_MASK_RQ = 0x4;
- const unsigned int DC_HW_STATE_MASK_DLG = 0x8;
- const unsigned int DC_HW_STATE_MASK_TTU = 0x10;
- const unsigned int DC_HW_STATE_MASK_CM = 0x20;
- const unsigned int DC_HW_STATE_MASK_MPCC = 0x40;
- const unsigned int DC_HW_STATE_MASK_OTG = 0x80;
- const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100;
- const unsigned int DC_HW_STATE_INVAR_ONLY = 0x8000;
+ const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1;
+ const unsigned int DC_HW_STATE_MASK_HUBP = 0x2;
+ const unsigned int DC_HW_STATE_MASK_RQ = 0x4;
+ const unsigned int DC_HW_STATE_MASK_DLG = 0x8;
+ const unsigned int DC_HW_STATE_MASK_TTU = 0x10;
+ const unsigned int DC_HW_STATE_MASK_CM = 0x20;
+ const unsigned int DC_HW_STATE_MASK_MPCC = 0x40;
+ const unsigned int DC_HW_STATE_MASK_OTG = 0x80;
+ const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100;
+ const unsigned int DC_HW_STATE_INVAR_ONLY = 0x8000;
unsigned int chars_printed = 0;
unsigned int remaining_buf_size = bufSize;
@@ -556,6 +607,9 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
remaining_buf_size -= chars_printed;
}
- if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0)
+ if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0) {
chars_printed = dcn10_get_clock_states(dc, pBuf, remaining_buf_size);
+ pBuf += chars_printed;
+ remaining_buf_size -= chars_printed;
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index ba6a8686062f..477ab9222216 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -589,7 +589,7 @@ static bool dcn10_link_encoder_validate_hdmi_output(
return false;
/* DCE11 HW does not support 420 */
- if (!enc10->base.features.ycbcr420_supported &&
+ if (!enc10->base.features.hdmi_ycbcr420_supported &&
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
return false;
@@ -606,8 +606,10 @@ bool dcn10_link_encoder_validate_dp_output(
const struct dcn10_link_encoder *enc10,
const struct dc_crtc_timing *crtc_timing)
{
- if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- return false;
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ if (!enc10->base.features.dp_ycbcr420_supported)
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 54626682bab2..7c138615f17d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -87,9 +87,8 @@ static void optc1_disable_stereo(struct timing_generator *optc)
REG_SET(OTG_STEREO_CONTROL, 0,
OTG_STEREO_EN, 0);
- REG_SET_3(OTG_3D_STRUCTURE_CONTROL, 0,
+ REG_SET_2(OTG_3D_STRUCTURE_CONTROL, 0,
OTG_3D_STRUCTURE_EN, 0,
- OTG_3D_STRUCTURE_V_UPDATE_MODE, 0,
OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
}
@@ -274,10 +273,12 @@ void optc1_program_timing(
* program the reg for interrupt postition.
*/
vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
- if (vertical_line_start < 0) {
- ASSERT(0);
+ v_fp2 = 0;
+ if (vertical_line_start < 0)
+ v_fp2 = -vertical_line_start;
+ if (vertical_line_start < 0)
vertical_line_start = 0;
- }
+
REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
@@ -296,9 +297,6 @@ void optc1_program_timing(
if (patched_crtc_timing.flags.INTERLACE == 1)
field_num = 1;
}
- v_fp2 = 0;
- if (optc->dlg_otg_param.vstartup_start > asic_blank_end)
- v_fp2 = optc->dlg_otg_param.vstartup_start > asic_blank_end;
/* Interlace */
if (patched_crtc_timing.flags.INTERLACE == 1) {
@@ -337,9 +335,8 @@ void optc1_program_timing(
/* Enable stereo - only when we need to pack 3D frame. Other types
* of stereo handled in explicit call
*/
- h_div_2 = (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ?
- 1 : 0;
+ h_div_2 = optc1_is_two_pixels_per_containter(&patched_crtc_timing);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_BY2, h_div_2);
@@ -362,20 +359,19 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab
static void optc1_unblank_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t vertical_interrupt_enable = 0;
-
- REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
- OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &vertical_interrupt_enable);
-
- /* temporary work around for vertical interrupt, once vertical interrupt enabled,
- * this check will be removed.
- */
- if (vertical_interrupt_enable)
- optc1_set_blank_data_double_buffer(optc, true);
REG_UPDATE_2(OTG_BLANK_CONTROL,
OTG_BLANK_DATA_EN, 0,
OTG_BLANK_DE_MODE, 0);
+
+ /* W/A for automated testing
+ * Automated testing will fail underflow test as there
+ * sporadic underflows which occur during the optc blank
+ * sequence. As a w/a, clear underflow on unblank.
+ * This prevents the failure, but will not mask actual
+ * underflow that affect real use cases.
+ */
+ optc1_clear_optc_underflow(optc);
}
/**
@@ -1155,9 +1151,8 @@ static void optc1_enable_stereo(struct timing_generator *optc,
OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1);
if (flags->PROGRAM_STEREO)
- REG_UPDATE_3(OTG_3D_STRUCTURE_CONTROL,
+ REG_UPDATE_2(OTG_3D_STRUCTURE_CONTROL,
OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED,
- OTG_3D_STRUCTURE_V_UPDATE_MODE, flags->FRAME_PACKED,
OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED);
}
@@ -1425,3 +1420,9 @@ void dcn10_timing_generator_init(struct optc *optc1)
optc1->min_h_sync_width = 8;
optc1->min_v_sync_width = 1;
}
+
+bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+ return timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index c1b114209fe8..8bacf0b6e27e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -565,4 +565,6 @@ bool optc1_configure_crc(struct timing_generator *optc,
bool optc1_get_crc(struct timing_generator *optc,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
+bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index a71453a15ae3..5d4772dec0ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -28,23 +28,23 @@
#include "resource.h"
#include "include/irq_service_interface.h"
-#include "dcn10/dcn10_resource.h"
+#include "dcn10_resource.h"
-#include "dcn10/dcn10_ipp.h"
-#include "dcn10/dcn10_mpc.h"
+#include "dcn10_ipp.h"
+#include "dcn10_mpc.h"
#include "irq/dcn10/irq_service_dcn10.h"
-#include "dcn10/dcn10_dpp.h"
+#include "dcn10_dpp.h"
#include "dcn10_optc.h"
-#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn10_hw_sequencer.h"
#include "dce110/dce110_hw_sequencer.h"
-#include "dcn10/dcn10_opp.h"
-#include "dcn10/dcn10_link_encoder.h"
-#include "dcn10/dcn10_stream_encoder.h"
-#include "dce/dce_clocks.h"
+#include "dcn10_opp.h"
+#include "dcn10_link_encoder.h"
+#include "dcn10_stream_encoder.h"
+#include "dcn10_clk_mgr.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
-#include "../virtual/virtual_stream_encoder.h"
+#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
#include "dcn10_hubp.h"
@@ -202,7 +202,6 @@ enum dcn10_clk_src_array_id {
#define MMHUB_SR(reg_name)\
.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
-
/* macros to expend register list macro defined in HW object header file
* end *********************/
@@ -436,8 +435,8 @@ static const struct dcn_optc_mask tg_mask = {
TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
-
static const struct bios_registers bios_regs = {
+ NBIO_SR(BIOS_SCRATCH_0),
NBIO_SR(BIOS_SCRATCH_3),
NBIO_SR(BIOS_SCRATCH_6)
};
@@ -496,7 +495,6 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
-
static const struct resource_caps res_cap = {
.num_timing_generator = 4,
.num_opp = 4,
@@ -719,7 +717,8 @@ static struct timing_generator *dcn10_timing_generator_create(
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
- .ycbcr420_supported = true,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = false,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -949,8 +948,8 @@ static void destruct(struct dcn10_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.dccg != NULL)
- dce_dccg_destroy(&pool->base.dccg);
+ if (pool->base.clk_mgr != NULL)
+ dce_clk_mgr_destroy(&pool->base.clk_mgr);
kfree(pool->base.pp_smu);
}
@@ -1275,9 +1274,8 @@ static bool construct(
goto fail;
}
}
-
- pool->base.dccg = dcn1_dccg_create(ctx);
- if (pool->base.dccg == NULL) {
+ pool->base.clk_mgr = dcn1_clk_mgr_create(ctx);
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 6f9078f3c4d3..b8b5525a389a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -766,7 +766,6 @@ void enc1_stream_encoder_dp_blank(
struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- uint32_t retries = 0;
uint32_t reg1 = 0;
uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
@@ -803,8 +802,6 @@ void enc1_stream_encoder_dp_blank(
0,
10, max_retries);
- ASSERT(retries <= max_retries);
-
/* Tell the DP encoder to ignore timing from CRTC, must be done after
* the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
* complete, stream status will be stuck in video stream enabled state,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_event_log.h b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
index 34a701ca879e..65663f4d93e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_event_log.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
@@ -33,6 +33,7 @@
#define EVENT_LOG_AUX_REQ(ddc, type, action, address, len, data)
#define EVENT_LOG_AUX_REP(ddc, type, replyStatus, len, data)
+#define EVENT_LOG_CUST_MSG(tag, a, ...)
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index f2ea8452d48f..0029a39efb1c 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -55,10 +55,10 @@ struct pp_smu {
struct pp_smu_wm_set_range {
unsigned int wm_inst;
- uint32_t min_fill_clk_khz;
- uint32_t max_fill_clk_khz;
- uint32_t min_drain_clk_khz;
- uint32_t max_drain_clk_khz;
+ uint32_t min_fill_clk_mhz;
+ uint32_t max_fill_clk_mhz;
+ uint32_t min_drain_clk_mhz;
+ uint32_t max_drain_clk_mhz;
};
#define MAX_WATERMARK_SETS 4
@@ -77,15 +77,15 @@ struct pp_smu_display_requirement_rv {
*/
unsigned int display_count;
- /* PPSMC_MSG_SetHardMinFclkByFreq: khz
+ /* PPSMC_MSG_SetHardMinFclkByFreq: mhz
* FCLK will vary with DPM, but never below requested hard min
*/
- unsigned int hard_min_fclk_khz;
+ unsigned int hard_min_fclk_mhz;
- /* PPSMC_MSG_SetHardMinDcefclkByFreq: khz
+ /* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz
* fixed clock at requested freq, either from FCH bypass or DFS
*/
- unsigned int hard_min_dcefclk_khz;
+ unsigned int hard_min_dcefclk_mhz;
/* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz
* when DF is in cstate, dcf clock is further divided down
@@ -102,14 +102,20 @@ struct pp_smu_funcs_rv {
*/
void (*set_display_count)(struct pp_smu *pp, int count);
- /* which SMU message? are reader and writer WM separate SMU msg? */
+ /* reader and writer WM's are sent together as part of one table*/
+ /*
+ * PPSMC_MSG_SetDriverDramAddrHigh
+ * PPSMC_MSG_SetDriverDramAddrLow
+ * PPSMC_MSG_TransferTableDram2Smu
+ *
+ * */
void (*set_wm_ranges)(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges);
/* PPSMC_MSG_SetHardMinDcfclkByFreq
* fixed clock at requested freq, either from FCH bypass or DFS
*/
- void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int khz);
+ void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int mhz);
/* PPSMC_MSG_SetMinDeepSleepDcfclk
* when DF is in cstate, dcf clock is further divided down
@@ -120,12 +126,12 @@ struct pp_smu_funcs_rv {
/* PPSMC_MSG_SetHardMinFclkByFreq
* FCLK will vary with DPM, but never below requested hard min
*/
- void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz);
+ void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int mhz);
/* PPSMC_MSG_SetHardMinSocclkByFreq
* Needed for DWB support
*/
- void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int khz);
+ void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int mhz);
/* PME w/a */
void (*set_pme_wa_enable)(struct pp_smu *pp);
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 28128c02de00..1961cc6d9143 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -31,6 +31,8 @@
#define __DM_SERVICES_H__
+#include "amdgpu_dm_trace.h"
+
/* TODO: remove when DC is complete. */
#include "dm_services_types.h"
#include "logger_interface.h"
@@ -70,6 +72,7 @@ static inline uint32_t dm_read_reg_func(
}
#endif
value = cgs_read_register(ctx->cgs_device, address);
+ trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
return value;
}
@@ -90,6 +93,7 @@ static inline void dm_write_reg_func(
}
#endif
cgs_write_register(ctx->cgs_device, address, value);
+ trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
}
static inline uint32_t dm_read_index_reg(
@@ -351,8 +355,12 @@ unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
/*
* performance tracing
*/
-void dm_perf_trace_timestamp(const char *func_name, unsigned int line);
-#define PERF_TRACE() dm_perf_trace_timestamp(__func__, __LINE__)
+#define PERF_TRACE() trace_amdgpu_dc_performance(CTX->perf_trace->read_count,\
+ CTX->perf_trace->write_count, &CTX->perf_trace->last_entry_read,\
+ &CTX->perf_trace->last_entry_write, __func__, __LINE__)
+#define PERF_TRACE_CTX(__CTX) trace_amdgpu_dc_performance(__CTX->perf_trace->read_count,\
+ __CTX->perf_trace->write_count, &__CTX->perf_trace->last_entry_read,\
+ &__CTX->perf_trace->last_entry_write, __func__, __LINE__)
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index 2b83f922ac02..1af8c777b3ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -208,22 +208,20 @@ struct dm_bl_data_point {
/* Brightness level as effective value in range 0-255,
* corresponding to above percentage
*/
- uint8_t signalLevel;
+ uint8_t signal_level;
};
/* Total size of the structure should not exceed 256 bytes */
struct dm_acpi_atif_backlight_caps {
-
-
uint16_t size; /* Bytes 0-1 (2 bytes) */
uint16_t flags; /* Byted 2-3 (2 bytes) */
- uint8_t errorCode; /* Byte 4 */
- uint8_t acLevelPercentage; /* Byte 5 */
- uint8_t dcLevelPercentage; /* Byte 6 */
- uint8_t minInputSignal; /* Byte 7 */
- uint8_t maxInputSignal; /* Byte 8 */
- uint8_t numOfDataPoints; /* Byte 9 */
- struct dm_bl_data_point dataPoints[99]; /* Bytes 10-207 (198 bytes)*/
+ uint8_t error_code; /* Byte 4 */
+ uint8_t ac_level_percentage; /* Byte 5 */
+ uint8_t dc_level_percentage; /* Byte 6 */
+ uint8_t min_input_signal; /* Byte 7 */
+ uint8_t max_input_signal; /* Byte 8 */
+ uint8_t num_data_points; /* Byte 9 */
+ struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/
};
enum dm_acpi_display_type {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index cbafce649e33..5dd04520ceca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -113,7 +113,8 @@ struct _vcs_dpi_soc_bounding_box_st {
int use_urgent_burst_bw;
double max_hscl_ratio;
double max_vscl_ratio;
- struct _vcs_dpi_voltage_scaling_st clock_limits[7];
+ unsigned int num_states;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[8];
};
struct _vcs_dpi_ip_params_st {
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index f20161c5706d..dada04296025 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -56,7 +56,6 @@ struct gpio_service *dal_gpio_service_create(
struct dc_context *ctx)
{
struct gpio_service *service;
-
uint32_t index_of_id;
service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
@@ -78,44 +77,33 @@ struct gpio_service *dal_gpio_service_create(
goto failure_1;
}
- /* allocate and initialize business storage */
+ /* allocate and initialize busyness storage */
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
index_of_id = 0;
service->ctx = ctx;
do {
uint32_t number_of_bits =
service->factory.number_of_pins[index_of_id];
+ uint32_t i = 0;
- uint32_t number_of_uints =
- (number_of_bits + bits_per_uint - 1) /
- bits_per_uint;
-
- uint32_t *slot;
-
- if (number_of_bits) {
- uint32_t index_of_uint = 0;
+ if (number_of_bits) {
+ service->busyness[index_of_id] =
+ kcalloc(number_of_bits, sizeof(char),
+ GFP_KERNEL);
- slot = kcalloc(number_of_uints,
- sizeof(uint32_t),
- GFP_KERNEL);
-
- if (!slot) {
+ if (!service->busyness[index_of_id]) {
BREAK_TO_DEBUGGER();
goto failure_2;
}
do {
- slot[index_of_uint] = 0;
-
- ++index_of_uint;
- } while (index_of_uint < number_of_uints);
- } else
- slot = NULL;
-
- service->busyness[index_of_id] = slot;
+ service->busyness[index_of_id][i] = 0;
+ ++i;
+ } while (i < number_of_bits);
+ } else {
+ service->busyness[index_of_id] = NULL;
+ }
++index_of_id;
} while (index_of_id < GPIO_ID_COUNT);
@@ -125,13 +113,8 @@ struct gpio_service *dal_gpio_service_create(
failure_2:
while (index_of_id) {
- uint32_t *slot;
-
--index_of_id;
-
- slot = service->busyness[index_of_id];
-
- kfree(slot);
+ kfree(service->busyness[index_of_id]);
}
failure_1:
@@ -169,9 +152,7 @@ void dal_gpio_service_destroy(
uint32_t index_of_id = 0;
do {
- uint32_t *slot = (*ptr)->busyness[index_of_id];
-
- kfree(slot);
+ kfree((*ptr)->busyness[index_of_id]);
++index_of_id;
} while (index_of_id < GPIO_ID_COUNT);
@@ -192,11 +173,7 @@ static bool is_pin_busy(
enum gpio_id id,
uint32_t en)
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
- const uint32_t *slot = service->busyness[id] + (en / bits_per_uint);
-
- return 0 != (*slot & (1 << (en % bits_per_uint)));
+ return service->busyness[id][en];
}
static void set_pin_busy(
@@ -204,10 +181,7 @@ static void set_pin_busy(
enum gpio_id id,
uint32_t en)
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
- service->busyness[id][en / bits_per_uint] |=
- (1 << (en % bits_per_uint));
+ service->busyness[id][en] = true;
}
static void set_pin_free(
@@ -215,10 +189,7 @@ static void set_pin_free(
enum gpio_id id,
uint32_t en)
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
- service->busyness[id][en / bits_per_uint] &=
- ~(1 << (en % bits_per_uint));
+ service->busyness[id][en] = false;
}
enum gpio_result dal_gpio_service_open(
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
index c7f3081f59cc..1d501a43d13b 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
@@ -36,10 +36,9 @@ struct gpio_service {
/*
* @brief
* Business storage.
- * For each member of 'enum gpio_id',
- * store array of bits (packed into uint32_t slots),
- * index individual bit by 'en' value */
- uint32_t *busyness[GPIO_ID_COUNT];
+ * one byte For each member of 'enum gpio_id'
+ */
+ char *busyness[GPIO_ID_COUNT];
};
enum gpio_result dal_gpio_service_open(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
index 39ee8eba3c31..d1656c9d50df 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
@@ -126,7 +126,7 @@ static inline struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw
static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2)
{
struct bw_fixed res;
- div64_u64_rem(arg1.value, arg2.value, &res.value);
+ div64_u64_rem(arg1.value, arg2.value, (uint64_t *)&res.value);
return res;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/compressor.h b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
index bcb18f5e1e60..7a147a9762a0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/compressor.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
@@ -77,6 +77,7 @@ struct compressor_funcs {
};
struct compressor {
struct dc_context *ctx;
+ /* CONTROLLER_ID_D0 + instance, CONTROLLER_ID_UNDEFINED = 0 */
uint32_t attached_inst;
bool is_enabled;
const struct compressor_funcs *funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index c1976c175b57..b168a5e9dd9d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -82,7 +82,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
/********** DAL Core*********************/
-#include "display_clock.h"
+#include "hw/clk_mgr.h"
#include "transform.h"
#include "dpp.h"
@@ -169,6 +169,7 @@ struct resource_pool {
unsigned int audio_count;
struct audio_support audio_support;
+ struct clk_mgr *clk_mgr;
struct dccg *dccg;
struct irq_service *irqs;
@@ -271,6 +272,17 @@ union bw_context {
struct dce_bw_output dce;
};
+/**
+ * struct dc_state - The full description of a state requested by a user
+ *
+ * @streams: Stream properties
+ * @stream_status: The planes on a given stream
+ * @res_ctx: Persistent state of resources
+ * @bw: The output from bandwidth and watermark calculations
+ * @pp_display_cfg: PowerPlay clocks and settings
+ * @dcn_bw_vars: non-stack memory to support bandwidth calculations
+ *
+ */
struct dc_state {
struct dc_stream_state *streams[MAX_PIPES];
struct dc_stream_status stream_status[MAX_PIPES];
@@ -278,7 +290,6 @@ struct dc_state {
struct resource_context res_ctx;
- /* The output from BW and WM calculations. */
union bw_context bw;
/* Note: these are big structures, do *not* put on stack! */
@@ -287,7 +298,7 @@ struct dc_state {
struct dcn_bw_internal_vars dcn_bw_vars;
#endif
- struct dccg *dis_clk;
+ struct clk_mgr *dccg;
struct kref refcount;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index e688eb9b975c..ece954a40a8e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -31,8 +31,8 @@
#define __DCN_CALCS_H__
#include "bw_fixed.h"
-#include "display_clock.h"
#include "../dml/display_mode_lib.h"
+#include "hw/clk_mgr.h"
struct dc;
struct dc_state;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index a83a48494613..abc961c0906e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -47,12 +47,18 @@ struct abm_funcs {
bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
bool (*set_abm_immediate_disable)(struct abm *abm);
bool (*init_backlight)(struct abm *abm);
- bool (*set_backlight_level)(struct abm *abm,
- unsigned int backlight_level,
+
+ /* backlight_pwm_u16_16 is unsigned 32 bit,
+ * 16 bit integer + 16 fractional, where 1.0 is max backlight value.
+ */
+ bool (*set_backlight_level_pwm)(struct abm *abm,
+ unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
unsigned int controller_id,
bool use_smooth_brightness);
- unsigned int (*get_current_backlight_8_bit)(struct abm *abm);
+
+ unsigned int (*get_current_backlight)(struct abm *abm);
+ unsigned int (*get_target_backlight)(struct abm *abm);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 689faa16c0ae..23a4b18e5fee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -23,41 +23,25 @@
*
*/
-#ifndef __DISPLAY_CLOCK_H__
-#define __DISPLAY_CLOCK_H__
+#ifndef __DAL_CLK_MGR_H__
+#define __DAL_CLK_MGR_H__
#include "dm_services_types.h"
#include "dc.h"
-/* Structure containing all state-dependent clocks
- * (dependent on "enum clocks_state") */
-struct state_dependent_clocks {
- int display_clk_khz;
- int pixel_clk_khz;
-};
-
-struct dccg {
+struct clk_mgr {
struct dc_context *ctx;
- const struct display_clock_funcs *funcs;
+ const struct clk_mgr_funcs *funcs;
- enum dm_pp_clocks_state max_clks_state;
- enum dm_pp_clocks_state cur_min_clks_state;
struct dc_clocks clks;
};
-struct display_clock_funcs {
- void (*update_clocks)(struct dccg *dccg,
- struct dc_clocks *new_clocks,
+struct clk_mgr_funcs {
+ void (*update_clocks)(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
bool safe_to_lower);
- int (*set_dispclk)(struct dccg *dccg,
- int requested_clock_khz);
-
- int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
- bool (*update_dfs_bypass)(struct dccg *dccg,
- struct dc *dc,
- struct dc_state *context,
- int requested_clock_khz);
+ int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
};
-#endif /* __DISPLAY_CLOCK_H__ */
+#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
new file mode 100644
index 000000000000..95a56d012626
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCCG_H__
+#define __DAL_DCCG_H__
+
+#include "dc_types.h"
+
+struct dccg {
+ struct dc_context *ctx;
+ const struct dccg_funcs *funcs;
+
+ int ref_dppclk;
+};
+
+struct dccg_funcs {
+ void (*update_dpp_dto)(struct dccg *dccg,
+ int dpp_inst,
+ int req_dppclk);
+};
+
+#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index 4550747fb61c..cb85eaa9857f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -32,6 +32,13 @@ enum dmcu_state {
DMCU_RUNNING = 1
};
+struct dmcu_version {
+ unsigned int date;
+ unsigned int month;
+ unsigned int year;
+ unsigned int interface_version;
+};
+
struct dmcu {
struct dc_context *ctx;
const struct dmcu_funcs *funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 334c48cdafdc..04c6989aac58 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -63,6 +63,11 @@ struct hubp_funcs {
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+ void (*hubp_setup_interdependent)(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+
void (*dcc_control)(struct hubp *hubp, bool enable,
bool independent_64b_blks);
void (*mem_program_viewport)(
@@ -121,6 +126,7 @@ struct hubp_funcs {
void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
void (*hubp_read_state)(struct hubp *hubp);
+ void (*hubp_clear_underflow)(struct hubp *hubp);
void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index cf7433ebf91a..da85537a4488 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -53,6 +53,12 @@ struct curve_points {
uint32_t custom_float_slope;
};
+struct curve_points3 {
+ struct curve_points red;
+ struct curve_points green;
+ struct curve_points blue;
+};
+
struct pwl_result_data {
struct fixed31_32 red;
struct fixed31_32 green;
@@ -71,9 +77,17 @@ struct pwl_result_data {
uint32_t delta_blue_reg;
};
+/* arr_curve_points - regamma regions/segments specification
+ * arr_points - beginning and end point specified separately (only one on DCE)
+ * corner_points - beginning and end point for all 3 colors (DCN)
+ * rgb_resulted - final curve
+ */
struct pwl_params {
struct gamma_curve arr_curve_points[34];
- struct curve_points arr_points[2];
+ union {
+ struct curve_points arr_points[2];
+ struct curve_points3 corner_points[2];
+ };
struct pwl_result_data rgb_resulted[256 + 3];
uint32_t hw_points_num;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index e28e9770e0a3..c20fdcaac53b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -65,7 +65,8 @@ struct encoder_feature_support {
enum dc_color_depth max_hdmi_deep_color;
unsigned int max_hdmi_pixel_clock;
- bool ycbcr420_supported;
+ bool hdmi_ycbcr420_supported;
+ bool dp_ycbcr420_supported;
};
union dpcd_psr_configuration {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index da89c2edb07c..06df02ddff6a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -31,7 +31,7 @@
#include "dml/display_mode_structs.h"
struct dchub_init_data;
-struct cstate_pstate_watermarks_st {
+struct cstate_pstate_watermarks_st1 {
uint32_t cstate_exit_ns;
uint32_t cstate_enter_plus_exit_ns;
uint32_t pstate_change_ns;
@@ -40,7 +40,7 @@ struct cstate_pstate_watermarks_st {
struct dcn_watermarks {
uint32_t pte_meta_urgent_ns;
uint32_t urgent_ns;
- struct cstate_pstate_watermarks_st cstate_pstate;
+ struct cstate_pstate_watermarks_st1 cstate_pstate;
};
struct dcn_watermark_set {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 26f29d5da3d8..d6a85f48b6d1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,8 +32,6 @@
#include "inc/hw/link_encoder.h"
#include "core_status.h"
-#define EDP_BACKLIGHT_RAMP_DISABLE_LEVEL 0xFFFFFFFF
-
enum pipe_gating_control {
PIPE_GATING_CONTROL_DISABLE = 0,
PIPE_GATING_CONTROL_ENABLE,
@@ -87,11 +85,6 @@ struct hw_sequencer_funcs {
void (*program_gamut_remap)(
struct pipe_ctx *pipe_ctx);
- void (*program_csc_matrix)(
- struct pipe_ctx *pipe_ctx,
- enum dc_color_space colorspace,
- uint16_t *matrix);
-
void (*program_output_csc)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
@@ -177,10 +170,12 @@ struct hw_sequencer_funcs {
struct pipe_ctx *pipe_ctx,
bool blank);
- void (*set_bandwidth)(
+ void (*prepare_bandwidth)(
struct dc *dc,
- struct dc_state *context,
- bool safe_to_lower);
+ struct dc_state *context);
+ void (*optimize_bandwidth)(
+ struct dc *dc,
+ struct dc_state *context);
void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
int vmin, int vmax);
@@ -205,16 +200,12 @@ struct hw_sequencer_funcs {
void (*log_hw_state)(struct dc *dc,
struct dc_log_buffer_ctx *log_ctx);
void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask);
+ void (*clear_status_bits)(struct dc *dc, unsigned int mask);
void (*wait_for_mpcc_disconnect)(struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx);
- void (*ready_shared_resources)(struct dc *dc, struct dc_state *context);
- void (*optimize_shared_resources)(struct dc *dc);
- void (*pplib_apply_display_requirements)(
- struct dc *dc,
- struct dc_state *context);
void (*edp_power_control)(
struct dc_link *link,
bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 33b99e3ab10d..0086a2f1d21a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -30,9 +30,6 @@
#include "dal_asic_id.h"
#include "dm_pp_smu.h"
-/* TODO unhardcode, 4 for CZ*/
-#define MEMORY_TYPE_MULTIPLIER 4
-
enum dce_version resource_parse_asic_id(
struct hw_asic_id asic_id);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index cdcefd087487..479b77c2e89e 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -306,6 +306,18 @@ static struct fixed31_32 translate_from_linear_space(
a1);
}
+static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
+{
+ struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
+
+ return translate_from_linear_space(arg,
+ dc_fixpt_zero,
+ dc_fixpt_zero,
+ dc_fixpt_zero,
+ dc_fixpt_zero,
+ gamma);
+}
+
static struct fixed31_32 translate_to_linear_space(
struct fixed31_32 arg,
struct fixed31_32 a0,
@@ -709,6 +721,175 @@ static void build_regamma(struct pwl_float_data_ex *rgb_regamma,
}
}
+static void hermite_spline_eetf(struct fixed31_32 input_x,
+ struct fixed31_32 max_display,
+ struct fixed31_32 min_display,
+ struct fixed31_32 max_content,
+ struct fixed31_32 *out_x)
+{
+ struct fixed31_32 min_lum_pq;
+ struct fixed31_32 max_lum_pq;
+ struct fixed31_32 max_content_pq;
+ struct fixed31_32 ks;
+ struct fixed31_32 E1;
+ struct fixed31_32 E2;
+ struct fixed31_32 E3;
+ struct fixed31_32 t;
+ struct fixed31_32 t2;
+ struct fixed31_32 t3;
+ struct fixed31_32 two;
+ struct fixed31_32 three;
+ struct fixed31_32 temp1;
+ struct fixed31_32 temp2;
+ struct fixed31_32 a = dc_fixpt_from_fraction(15, 10);
+ struct fixed31_32 b = dc_fixpt_from_fraction(5, 10);
+ struct fixed31_32 epsilon = dc_fixpt_from_fraction(1, 1000000); // dc_fixpt_epsilon is a bit too small
+
+ if (dc_fixpt_eq(max_content, dc_fixpt_zero)) {
+ *out_x = dc_fixpt_zero;
+ return;
+ }
+
+ compute_pq(input_x, &E1);
+ compute_pq(dc_fixpt_div(min_display, max_content), &min_lum_pq);
+ compute_pq(dc_fixpt_div(max_display, max_content), &max_lum_pq);
+ compute_pq(dc_fixpt_one, &max_content_pq); // always 1? DAL2 code is weird
+ a = dc_fixpt_div(dc_fixpt_add(dc_fixpt_one, b), max_content_pq); // (1+b)/maxContent
+ ks = dc_fixpt_sub(dc_fixpt_mul(a, max_lum_pq), b); // a * max_lum_pq - b
+
+ if (dc_fixpt_lt(E1, ks))
+ E2 = E1;
+ else if (dc_fixpt_le(ks, E1) && dc_fixpt_le(E1, dc_fixpt_one)) {
+ if (dc_fixpt_lt(epsilon, dc_fixpt_sub(dc_fixpt_one, ks)))
+ // t = (E1 - ks) / (1 - ks)
+ t = dc_fixpt_div(dc_fixpt_sub(E1, ks),
+ dc_fixpt_sub(dc_fixpt_one, ks));
+ else
+ t = dc_fixpt_zero;
+
+ two = dc_fixpt_from_int(2);
+ three = dc_fixpt_from_int(3);
+
+ t2 = dc_fixpt_mul(t, t);
+ t3 = dc_fixpt_mul(t2, t);
+ temp1 = dc_fixpt_mul(two, t3);
+ temp2 = dc_fixpt_mul(three, t2);
+
+ // (2t^3 - 3t^2 + 1) * ks
+ E2 = dc_fixpt_mul(ks, dc_fixpt_add(dc_fixpt_one,
+ dc_fixpt_sub(temp1, temp2)));
+
+ // (-2t^3 + 3t^2) * max_lum_pq
+ E2 = dc_fixpt_add(E2, dc_fixpt_mul(max_lum_pq,
+ dc_fixpt_sub(temp2, temp1)));
+
+ temp1 = dc_fixpt_mul(two, t2);
+ temp2 = dc_fixpt_sub(dc_fixpt_one, ks);
+
+ // (t^3 - 2t^2 + t) * (1-ks)
+ E2 = dc_fixpt_add(E2, dc_fixpt_mul(temp2,
+ dc_fixpt_add(t, dc_fixpt_sub(t3, temp1))));
+ } else
+ E2 = dc_fixpt_one;
+
+ temp1 = dc_fixpt_sub(dc_fixpt_one, E2);
+ temp2 = dc_fixpt_mul(temp1, temp1);
+ temp2 = dc_fixpt_mul(temp2, temp2);
+ // temp2 = (1-E2)^4
+
+ E3 = dc_fixpt_add(E2, dc_fixpt_mul(min_lum_pq, temp2));
+ compute_de_pq(E3, out_x);
+
+ *out_x = dc_fixpt_div(*out_x, dc_fixpt_div(max_display, max_content));
+}
+
+static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
+ uint32_t hw_points_num,
+ const struct hw_x_point *coordinate_x,
+ const struct freesync_hdr_tf_params *fs_params)
+{
+ uint32_t i;
+ struct pwl_float_data_ex *rgb = rgb_regamma;
+ const struct hw_x_point *coord_x = coordinate_x;
+ struct fixed31_32 scaledX = dc_fixpt_zero;
+ struct fixed31_32 scaledX1 = dc_fixpt_zero;
+ struct fixed31_32 max_display;
+ struct fixed31_32 min_display;
+ struct fixed31_32 max_content;
+ struct fixed31_32 min_content;
+ struct fixed31_32 clip = dc_fixpt_one;
+ struct fixed31_32 output;
+ bool use_eetf = false;
+ bool is_clipped = false;
+ struct fixed31_32 sdr_white_level;
+
+ if (fs_params == NULL || fs_params->max_content == 0 ||
+ fs_params->max_display == 0)
+ return false;
+
+ max_display = dc_fixpt_from_int(fs_params->max_display);
+ min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
+ max_content = dc_fixpt_from_int(fs_params->max_content);
+ min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);
+ sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
+
+ if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
+ min_display = dc_fixpt_from_fraction(1, 10);
+ if (fs_params->max_display < 100) // cap at 100 at the top
+ max_display = dc_fixpt_from_int(100);
+
+ if (fs_params->min_content < fs_params->min_display)
+ use_eetf = true;
+ else
+ min_content = min_display;
+
+ if (fs_params->max_content > fs_params->max_display)
+ use_eetf = true;
+ else
+ max_content = max_display;
+
+ rgb += 32; // first 32 points have problems with fixed point, too small
+ coord_x += 32;
+ for (i = 32; i <= hw_points_num; i++) {
+ if (!is_clipped) {
+ if (use_eetf) {
+ /*max content is equal 1 */
+ scaledX1 = dc_fixpt_div(coord_x->x,
+ dc_fixpt_div(max_content, sdr_white_level));
+ hermite_spline_eetf(scaledX1, max_display, min_display,
+ max_content, &scaledX);
+ } else
+ scaledX = dc_fixpt_div(coord_x->x,
+ dc_fixpt_div(max_display, sdr_white_level));
+
+ if (dc_fixpt_lt(scaledX, clip)) {
+ if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
+ output = dc_fixpt_zero;
+ else
+ output = calculate_gamma22(scaledX);
+
+ rgb->r = output;
+ rgb->g = output;
+ rgb->b = output;
+ } else {
+ is_clipped = true;
+ rgb->r = clip;
+ rgb->g = clip;
+ rgb->b = clip;
+ }
+ } else {
+ rgb->r = clip;
+ rgb->g = clip;
+ rgb->b = clip;
+ }
+
+ ++coord_x;
+ ++rgb;
+ }
+
+ return true;
+}
+
static void build_degamma(struct pwl_float_data_ex *curve,
uint32_t hw_points_num,
const struct hw_x_point *coordinate_x, bool is_2_4)
@@ -1356,7 +1537,8 @@ static bool map_regamma_hw_to_x_user(
#define _EXTRA_POINTS 3
bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
- const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed)
+ const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+ const struct freesync_hdr_tf_params *fs_params)
{
struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
struct dividers dividers;
@@ -1374,7 +1556,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
/* we can use hardcoded curve for plain SRGB TF */
if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
output_tf->tf == TRANSFER_FUNCTION_SRGB &&
- (!mapUserRamp && ramp->type == GAMMA_RGB_256))
+ (ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256)))
return true;
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
@@ -1424,6 +1606,12 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
MAX_HW_POINTS,
coordinates_x,
output_tf->sdr_ref_white_level);
+ } else if (tf == TRANSFER_FUNCTION_GAMMA22 &&
+ fs_params != NULL) {
+ build_freesync_hdr(rgb_regamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+ fs_params);
} else {
tf_pts->end_exponent = 0;
tf_pts->x_point_at_y1_red = 1;
@@ -1573,7 +1761,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
struct pwl_float_data *rgb_user = NULL;
struct pwl_float_data_ex *curve = NULL;
- struct gamma_pixel *axix_x = NULL;
+ struct gamma_pixel *axis_x = NULL;
struct pixel_gamma_point *coeff = NULL;
enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
bool ret = false;
@@ -1599,10 +1787,10 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
GFP_KERNEL);
if (!curve)
goto curve_alloc_fail;
- axix_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axix_x),
+ axis_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axis_x),
GFP_KERNEL);
- if (!axix_x)
- goto axix_x_alloc_fail;
+ if (!axis_x)
+ goto axis_x_alloc_fail;
coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
GFP_KERNEL);
if (!coeff)
@@ -1615,7 +1803,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
tf = input_tf->tf;
build_evenly_distributed_points(
- axix_x,
+ axis_x,
ramp->num_entries,
dividers);
@@ -1640,7 +1828,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
tf_pts->x_point_at_y1_blue = 1;
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
- coordinates_x, axix_x, curve,
+ coordinates_x, axis_x, curve,
MAX_HW_POINTS, tf_pts,
mapUserRamp && ramp->type != GAMMA_CUSTOM);
if (ramp->type == GAMMA_CUSTOM)
@@ -1650,8 +1838,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
kvfree(coeff);
coeff_alloc_fail:
- kvfree(axix_x);
-axix_x_alloc_fail:
+ kvfree(axis_x);
+axis_x_alloc_fail:
kvfree(curve);
curve_alloc_fail:
kvfree(rgb_user);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 63ccb9c91224..a6e164df090a 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -73,12 +73,21 @@ struct regamma_lut {
};
};
+struct freesync_hdr_tf_params {
+ unsigned int sdr_white_level;
+ unsigned int min_content; // luminance in 1/10000 nits
+ unsigned int max_content; // luminance in nits
+ unsigned int min_display; // luminance in 1/10000 nits
+ unsigned int max_display; // luminance in nits
+};
+
void setup_x_points_distribution(void);
void precompute_pq(void);
void precompute_de_pq(void);
bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
- const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed);
+ const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+ const struct freesync_hdr_tf_params *fs_params);
bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp);
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 4018c7180d00..1544ed3f1747 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,6 +37,8 @@
#define RENDER_TIMES_MAX_COUNT 10
/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
#define BTR_EXIT_MARGIN 2000
+/*Threshold to exit fixed refresh rate*/
+#define FIXED_REFRESH_EXIT_MARGIN_IN_HZ 4
/* Number of consecutive frames to check before entering/exiting fixed refresh*/
#define FIXED_REFRESH_ENTER_FRAME_COUNT 5
#define FIXED_REFRESH_EXIT_FRAME_COUNT 5
@@ -257,40 +259,14 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
if (in_out_vrr->btr.btr_active) {
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->btr.btr_active = false;
-
- /* Exit Fixed Refresh mode */
- } else if (in_out_vrr->fixed.fixed_active) {
-
- in_out_vrr->fixed.frame_counter++;
-
- if (in_out_vrr->fixed.frame_counter >
- FIXED_REFRESH_EXIT_FRAME_COUNT) {
- in_out_vrr->fixed.frame_counter = 0;
- in_out_vrr->fixed.fixed_active = false;
- }
}
} else if (last_render_time_in_us > max_render_time_in_us) {
/* Enter Below the Range */
- if (!in_out_vrr->btr.btr_active &&
- in_out_vrr->btr.btr_enabled) {
- in_out_vrr->btr.btr_active = true;
-
- /* Enter Fixed Refresh mode */
- } else if (!in_out_vrr->fixed.fixed_active &&
- !in_out_vrr->btr.btr_enabled) {
- in_out_vrr->fixed.frame_counter++;
-
- if (in_out_vrr->fixed.frame_counter >
- FIXED_REFRESH_ENTER_FRAME_COUNT) {
- in_out_vrr->fixed.frame_counter = 0;
- in_out_vrr->fixed.fixed_active = true;
- }
- }
+ in_out_vrr->btr.btr_active = true;
}
/* BTR set to "not active" so disengage */
if (!in_out_vrr->btr.btr_active) {
- in_out_vrr->btr.btr_active = false;
in_out_vrr->btr.inserted_duration_in_us = 0;
in_out_vrr->btr.frames_to_insert = 0;
in_out_vrr->btr.frame_counter = 0;
@@ -375,7 +351,12 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
bool update = false;
unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
- if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+ //Compute the exit refresh rate and exit frame duration
+ unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us)
+ + (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ));
+ unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz;
+
+ if (last_render_time_in_us < exit_frame_duration_in_us) {
/* Exit Fixed Refresh mode */
if (in_out_vrr->fixed.fixed_active) {
in_out_vrr->fixed.frame_counter++;
@@ -627,12 +608,12 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
struct dc_info_packet *infopacket)
{
- if (app_tf != transfer_func_unknown) {
+ if (app_tf != TRANSFER_FUNC_UNKNOWN) {
infopacket->valid = true;
infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active]
- if (app_tf == transfer_func_gamma_22) {
+ if (app_tf == TRANSFER_FUNC_GAMMA_22) {
infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
}
}
@@ -707,11 +688,11 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
return;
switch (packet_type) {
- case packet_type_fs2:
+ case PACKET_TYPE_FS2:
build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
break;
- case packet_type_vrr:
- case packet_type_fs1:
+ case PACKET_TYPE_VRR:
+ case PACKET_TYPE_FS1:
default:
build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index 786b34380f85..5b1c9a4c7643 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -26,15 +26,13 @@
#ifndef MOD_INFO_PACKET_H_
#define MOD_INFO_PACKET_H_
-struct info_packet_inputs {
- const struct dc_stream_state *pStream;
-};
+#include "mod_shared.h"
-struct info_packets {
- struct dc_info_packet *pVscInfoPacket;
-};
+//Forward Declarations
+struct dc_stream_state;
+struct dc_info_packet;
-void mod_build_infopackets(struct info_packet_inputs *inputs,
- struct info_packets *info_packets);
+void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet);
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
index 238c431ae483..1bd02c0ac30c 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
@@ -23,27 +23,26 @@
*
*/
-
#ifndef MOD_SHARED_H_
#define MOD_SHARED_H_
enum color_transfer_func {
- transfer_func_unknown,
- transfer_func_srgb,
- transfer_func_bt709,
- transfer_func_pq2084,
- transfer_func_pq2084_interim,
- transfer_func_linear_0_1,
- transfer_func_linear_0_125,
- transfer_func_dolbyvision,
- transfer_func_gamma_22,
- transfer_func_gamma_26
+ TRANSFER_FUNC_UNKNOWN,
+ TRANSFER_FUNC_SRGB,
+ TRANSFER_FUNC_BT709,
+ TRANSFER_FUNC_PQ2084,
+ TRANSFER_FUNC_PQ2084_INTERIM,
+ TRANSFER_FUNC_LINEAR_0_1,
+ TRANSFER_FUNC_LINEAR_0_125,
+ TRANSFER_FUNC_GAMMA_22,
+ TRANSFER_FUNC_GAMMA_26
};
enum vrr_packet_type {
- packet_type_vrr,
- packet_type_fs1,
- packet_type_fs2
+ PACKET_TYPE_VRR,
+ PACKET_TYPE_FS1,
+ PACKET_TYPE_FS2
};
+
#endif /* MOD_SHARED_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index ff8bfb9b43b0..db06fab2ad5c 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -25,6 +25,10 @@
#include "mod_info_packet.h"
#include "core_types.h"
+#include "dc_types.h"
+#include "mod_shared.h"
+
+#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
enum ColorimetryRGBDP {
ColorimetryRGB_DP_sRGB = 0,
@@ -41,7 +45,7 @@ enum ColorimetryYCCDP {
ColorimetryYCC_DP_ITU2020YCbCr = 7,
};
-static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet)
{
unsigned int vscPacketRevision = 0;
@@ -159,7 +163,7 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
* DPCD register is exposed in the new Extended Receiver Capability field for DPCD Rev. 1.4
* (and higher). When MISC1. bit 6. is Set to 1, a Source device uses a VSC SDP to indicate
* the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and
- * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become “don’t care”).)
+ * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").)
*/
if (vscPacketRevision == 0x5) {
/* Secondary-data Packet ID = 0 */
@@ -320,10 +324,3 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
-void mod_build_infopackets(struct info_packet_inputs *inputs,
- struct info_packets *info_packets)
-{
- if (info_packets->pVscInfoPacket != NULL)
- mod_build_vsc_infopacket(inputs->pStream, info_packets->pVscInfoPacket);
-}
-
diff --git a/drivers/gpu/drm/amd/display/modules/power/Makefile b/drivers/gpu/drm/amd/display/modules/power/Makefile
new file mode 100644
index 000000000000..87851f892a52
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/Makefile
@@ -0,0 +1,31 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'power' sub-module of DAL.
+#
+
+MOD_POWER = power_helpers.o
+
+AMD_DAL_MOD_POWER = $(addprefix $(AMDDALPATH)/modules/power/,$(MOD_POWER))
+#$(info ************ DAL POWER MODULE MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MOD_POWER) \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
new file mode 100644
index 000000000000..00f63b7dd32f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -0,0 +1,326 @@
+/* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "power_helpers.h"
+#include "dc/inc/hw/dmcu.h"
+
+#define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
+
+/* Possible Min Reduction config from least aggressive to most aggressive
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12
+ * 100 98.0 94.1 94.1 85.1 80.3 75.3 69.4 60.0 57.6 50.2 49.8 40.0 %
+ */
+static const unsigned char min_reduction_table[13] = {
+0xff, 0xfa, 0xf0, 0xf0, 0xd9, 0xcd, 0xc0, 0xb1, 0x99, 0x93, 0x80, 0x82, 0x66};
+
+/* Possible Max Reduction configs from least aggressive to most aggressive
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12
+ * 96.1 89.8 85.1 80.3 69.4 64.7 64.7 50.2 39.6 30.2 30.2 30.2 19.6 %
+ */
+static const unsigned char max_reduction_table[13] = {
+0xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32};
+
+/* Predefined ABM configuration sets. We may have different configuration sets
+ * in order to satisfy different power/quality requirements.
+ */
+static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_level] = {
+/* ABM Level 1, ABM Level 2, ABM Level 3, ABM Level 4 */
+{ 2, 5, 7, 8 }, /* Default - Medium aggressiveness */
+{ 2, 5, 8, 11 }, /* Alt #1 - Increased aggressiveness */
+{ 0, 2, 4, 8 }, /* Alt #2 - Minimal aggressiveness */
+{ 3, 6, 10, 12 }, /* Alt #3 - Super aggressiveness */
+};
+
+#define NUM_AMBI_LEVEL 5
+#define NUM_AGGR_LEVEL 4
+#define NUM_POWER_FN_SEGS 8
+#define NUM_BL_CURVE_SEGS 16
+
+/* NOTE: iRAM is 256B in size */
+struct iram_table_v_2 {
+ /* flags */
+ uint16_t flags; /* 0x00 U16 */
+
+ /* parameters for ABM2.0 algorithm */
+ uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */
+ uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */
+ uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */
+ uint8_t bright_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */
+ uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x52 U2.6 */
+ uint8_t dark_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x66 U2.6 */
+ uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x7a U0.8 */
+ uint8_t deviation_gain; /* 0x7f U0.8 */
+
+ /* parameters for crgb conversion */
+ uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
+ uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
+ uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
+
+ /* parameters for custom curve */
+ /* thresholds for brightness --> backlight */
+ uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
+ /* offsets for brightness --> backlight */
+ uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
+
+ /* For reading PSR State directly from IRAM */
+ uint8_t psr_state; /* 0xf0 */
+ uint8_t dmcu_interface_version; /* 0xf1 */
+ uint8_t dmcu_date_version_year_b0; /* 0xf2 */
+ uint8_t dmcu_date_version_year_b1; /* 0xf3 */
+ uint8_t dmcu_date_version_month; /* 0xf4 */
+ uint8_t dmcu_date_version_day; /* 0xf5 */
+ uint8_t dmcu_state; /* 0xf6 */
+
+ uint16_t blRampReduction; /* 0xf7 */
+ uint16_t blRampStart; /* 0xf9 */
+ uint8_t dummy5; /* 0xfb */
+ uint8_t dummy6; /* 0xfc */
+ uint8_t dummy7; /* 0xfd */
+ uint8_t dummy8; /* 0xfe */
+ uint8_t dummy9; /* 0xff */
+};
+
+static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
+{
+ return (uint16_t)(backlight_8bit * 0x101);
+}
+
+static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
+ struct iram_table_v_2 *table)
+{
+ unsigned int i;
+ unsigned int num_entries = NUM_BL_CURVE_SEGS;
+ unsigned int query_input_8bit;
+ unsigned int query_output_8bit;
+ unsigned int lut_index;
+
+ table->backlight_thresholds[0] = 0;
+ table->backlight_offsets[0] = params.backlight_lut_array[0];
+ table->backlight_thresholds[num_entries-1] = 0xFFFF;
+ table->backlight_offsets[num_entries-1] =
+ params.backlight_lut_array[params.backlight_lut_array_size - 1];
+
+ /* Setup all brightness levels between 0% and 100% exclusive
+ * Fills brightness-to-backlight transform table. Backlight custom curve
+ * describes transform from brightness to backlight. It will be defined
+ * as set of thresholds and set of offsets, together, implying
+ * extrapolation of custom curve into 16 uniformly spanned linear
+ * segments. Each threshold/offset represented by 16 bit entry in
+ * format U4.10.
+ */
+ for (i = 1; i+1 < num_entries; i++) {
+ query_input_8bit = DIV_ROUNDUP((i * 256), num_entries);
+
+ lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
+ ASSERT(lut_index < params.backlight_lut_array_size);
+ query_output_8bit = params.backlight_lut_array[lut_index] >> 8;
+
+ table->backlight_thresholds[i] =
+ backlight_8_to_16(query_input_8bit);
+ table->backlight_offsets[i] =
+ backlight_8_to_16(query_output_8bit);
+ }
+}
+
+bool dmcu_load_iram(struct dmcu *dmcu,
+ struct dmcu_iram_parameters params)
+{
+ struct iram_table_v_2 ram_table;
+ unsigned int set = params.set;
+
+ if (dmcu == NULL)
+ return false;
+
+ if (!dmcu->funcs->is_dmcu_initialized(dmcu))
+ return true;
+
+ memset(&ram_table, 0, sizeof(ram_table));
+
+ ram_table.flags = 0x0;
+ ram_table.deviation_gain = 0xb3;
+
+ ram_table.blRampReduction =
+ cpu_to_be16(params.backlight_ramping_reduction);
+ ram_table.blRampStart =
+ cpu_to_be16(params.backlight_ramping_start);
+
+ ram_table.min_reduction[0][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.min_reduction[1][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.min_reduction[2][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.min_reduction[3][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.min_reduction[4][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[0][0] = max_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[1][0] = max_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[2][0] = max_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[3][0] = max_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[4][0] = max_reduction_table[abm_config[set][0]];
+
+ ram_table.min_reduction[0][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.min_reduction[1][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.min_reduction[2][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.min_reduction[3][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.min_reduction[4][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[0][1] = max_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[1][1] = max_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[2][1] = max_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[3][1] = max_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[4][1] = max_reduction_table[abm_config[set][1]];
+
+ ram_table.min_reduction[0][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.min_reduction[1][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.min_reduction[2][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.min_reduction[3][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.min_reduction[4][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[0][2] = max_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[1][2] = max_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[2][2] = max_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[3][2] = max_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[4][2] = max_reduction_table[abm_config[set][2]];
+
+ ram_table.min_reduction[0][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.min_reduction[1][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.min_reduction[2][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.min_reduction[3][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.min_reduction[4][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[0][3] = max_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[1][3] = max_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[2][3] = max_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[3][3] = max_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[4][3] = max_reduction_table[abm_config[set][3]];
+
+ ram_table.bright_pos_gain[0][0] = 0x20;
+ ram_table.bright_pos_gain[0][1] = 0x20;
+ ram_table.bright_pos_gain[0][2] = 0x20;
+ ram_table.bright_pos_gain[0][3] = 0x20;
+ ram_table.bright_pos_gain[1][0] = 0x20;
+ ram_table.bright_pos_gain[1][1] = 0x20;
+ ram_table.bright_pos_gain[1][2] = 0x20;
+ ram_table.bright_pos_gain[1][3] = 0x20;
+ ram_table.bright_pos_gain[2][0] = 0x20;
+ ram_table.bright_pos_gain[2][1] = 0x20;
+ ram_table.bright_pos_gain[2][2] = 0x20;
+ ram_table.bright_pos_gain[2][3] = 0x20;
+ ram_table.bright_pos_gain[3][0] = 0x20;
+ ram_table.bright_pos_gain[3][1] = 0x20;
+ ram_table.bright_pos_gain[3][2] = 0x20;
+ ram_table.bright_pos_gain[3][3] = 0x20;
+ ram_table.bright_pos_gain[4][0] = 0x20;
+ ram_table.bright_pos_gain[4][1] = 0x20;
+ ram_table.bright_pos_gain[4][2] = 0x20;
+ ram_table.bright_pos_gain[4][3] = 0x20;
+ ram_table.bright_neg_gain[0][1] = 0x00;
+ ram_table.bright_neg_gain[0][2] = 0x00;
+ ram_table.bright_neg_gain[0][3] = 0x00;
+ ram_table.bright_neg_gain[1][0] = 0x00;
+ ram_table.bright_neg_gain[1][1] = 0x00;
+ ram_table.bright_neg_gain[1][2] = 0x00;
+ ram_table.bright_neg_gain[1][3] = 0x00;
+ ram_table.bright_neg_gain[2][0] = 0x00;
+ ram_table.bright_neg_gain[2][1] = 0x00;
+ ram_table.bright_neg_gain[2][2] = 0x00;
+ ram_table.bright_neg_gain[2][3] = 0x00;
+ ram_table.bright_neg_gain[3][0] = 0x00;
+ ram_table.bright_neg_gain[3][1] = 0x00;
+ ram_table.bright_neg_gain[3][2] = 0x00;
+ ram_table.bright_neg_gain[3][3] = 0x00;
+ ram_table.bright_neg_gain[4][0] = 0x00;
+ ram_table.bright_neg_gain[4][1] = 0x00;
+ ram_table.bright_neg_gain[4][2] = 0x00;
+ ram_table.bright_neg_gain[4][3] = 0x00;
+ ram_table.dark_pos_gain[0][0] = 0x00;
+ ram_table.dark_pos_gain[0][1] = 0x00;
+ ram_table.dark_pos_gain[0][2] = 0x00;
+ ram_table.dark_pos_gain[0][3] = 0x00;
+ ram_table.dark_pos_gain[1][0] = 0x00;
+ ram_table.dark_pos_gain[1][1] = 0x00;
+ ram_table.dark_pos_gain[1][2] = 0x00;
+ ram_table.dark_pos_gain[1][3] = 0x00;
+ ram_table.dark_pos_gain[2][0] = 0x00;
+ ram_table.dark_pos_gain[2][1] = 0x00;
+ ram_table.dark_pos_gain[2][2] = 0x00;
+ ram_table.dark_pos_gain[2][3] = 0x00;
+ ram_table.dark_pos_gain[3][0] = 0x00;
+ ram_table.dark_pos_gain[3][1] = 0x00;
+ ram_table.dark_pos_gain[3][2] = 0x00;
+ ram_table.dark_pos_gain[3][3] = 0x00;
+ ram_table.dark_pos_gain[4][0] = 0x00;
+ ram_table.dark_pos_gain[4][1] = 0x00;
+ ram_table.dark_pos_gain[4][2] = 0x00;
+ ram_table.dark_pos_gain[4][3] = 0x00;
+ ram_table.dark_neg_gain[0][0] = 0x00;
+ ram_table.dark_neg_gain[0][1] = 0x00;
+ ram_table.dark_neg_gain[0][2] = 0x00;
+ ram_table.dark_neg_gain[0][3] = 0x00;
+ ram_table.dark_neg_gain[1][0] = 0x00;
+ ram_table.dark_neg_gain[1][1] = 0x00;
+ ram_table.dark_neg_gain[1][2] = 0x00;
+ ram_table.dark_neg_gain[1][3] = 0x00;
+ ram_table.dark_neg_gain[2][0] = 0x00;
+ ram_table.dark_neg_gain[2][1] = 0x00;
+ ram_table.dark_neg_gain[2][2] = 0x00;
+ ram_table.dark_neg_gain[2][3] = 0x00;
+ ram_table.dark_neg_gain[3][0] = 0x00;
+ ram_table.dark_neg_gain[3][1] = 0x00;
+ ram_table.dark_neg_gain[3][2] = 0x00;
+ ram_table.dark_neg_gain[3][3] = 0x00;
+ ram_table.dark_neg_gain[4][0] = 0x00;
+ ram_table.dark_neg_gain[4][1] = 0x00;
+ ram_table.dark_neg_gain[4][2] = 0x00;
+ ram_table.dark_neg_gain[4][3] = 0x00;
+ ram_table.iir_curve[0] = 0x65;
+ ram_table.iir_curve[1] = 0x65;
+ ram_table.iir_curve[2] = 0x65;
+ ram_table.iir_curve[3] = 0x65;
+ ram_table.iir_curve[4] = 0x65;
+ ram_table.crgb_thresh[0] = cpu_to_be16(0x13b6);
+ ram_table.crgb_thresh[1] = cpu_to_be16(0x1648);
+ ram_table.crgb_thresh[2] = cpu_to_be16(0x18e3);
+ ram_table.crgb_thresh[3] = cpu_to_be16(0x1b41);
+ ram_table.crgb_thresh[4] = cpu_to_be16(0x1d46);
+ ram_table.crgb_thresh[5] = cpu_to_be16(0x1f21);
+ ram_table.crgb_thresh[6] = cpu_to_be16(0x2167);
+ ram_table.crgb_thresh[7] = cpu_to_be16(0x2384);
+ ram_table.crgb_offset[0] = cpu_to_be16(0x2999);
+ ram_table.crgb_offset[1] = cpu_to_be16(0x3999);
+ ram_table.crgb_offset[2] = cpu_to_be16(0x4666);
+ ram_table.crgb_offset[3] = cpu_to_be16(0x5999);
+ ram_table.crgb_offset[4] = cpu_to_be16(0x6333);
+ ram_table.crgb_offset[5] = cpu_to_be16(0x7800);
+ ram_table.crgb_offset[6] = cpu_to_be16(0x8c00);
+ ram_table.crgb_offset[7] = cpu_to_be16(0xa000);
+ ram_table.crgb_slope[0] = cpu_to_be16(0x3147);
+ ram_table.crgb_slope[1] = cpu_to_be16(0x2978);
+ ram_table.crgb_slope[2] = cpu_to_be16(0x23a2);
+ ram_table.crgb_slope[3] = cpu_to_be16(0x1f55);
+ ram_table.crgb_slope[4] = cpu_to_be16(0x1c63);
+ ram_table.crgb_slope[5] = cpu_to_be16(0x1a0f);
+ ram_table.crgb_slope[6] = cpu_to_be16(0x178d);
+ ram_table.crgb_slope[7] = cpu_to_be16(0x15ab);
+
+ fill_backlight_transform_table(
+ params, &ram_table);
+
+ return dmcu->funcs->load_iram(
+ dmcu, 0, (char *)(&ram_table), sizeof(ram_table));
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
new file mode 100644
index 000000000000..da5df00fedce
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -0,0 +1,47 @@
+/* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MODULES_POWER_POWER_HELPERS_H_
+#define MODULES_POWER_POWER_HELPERS_H_
+
+#include "dc/inc/hw/dmcu.h"
+
+
+enum abm_defines {
+ abm_defines_max_level = 4,
+ abm_defines_max_config = 4,
+};
+
+struct dmcu_iram_parameters {
+ unsigned int *backlight_lut_array;
+ unsigned int backlight_lut_array_size;
+ unsigned int backlight_ramping_reduction;
+ unsigned int backlight_ramping_start;
+ unsigned int set;
+};
+
+bool dmcu_load_iram(struct dmcu *dmcu,
+ struct dmcu_iram_parameters params);
+
+#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h
index 9b9699fc433f..c72cbfe8f684 100644
--- a/drivers/gpu/drm/amd/include/amd_acpi.h
+++ b/drivers/gpu/drm/amd/include/amd_acpi.h
@@ -52,6 +52,30 @@ struct atif_sbios_requests {
u8 backlight_level; /* panel backlight level (0-255) */
} __packed;
+struct atif_qbtc_arguments {
+ u16 size; /* structure size in bytes (includes size field) */
+ u8 requested_display; /* which display is requested */
+} __packed;
+
+#define ATIF_QBTC_MAX_DATA_POINTS 99
+
+struct atif_qbtc_data_point {
+ u8 luminance; /* luminance in percent */
+ u8 ipnut_signal; /* input signal in range 0-255 */
+} __packed;
+
+struct atif_qbtc_output {
+ u16 size; /* structure size in bytes (includes size field) */
+ u16 flags; /* all zeroes */
+ u8 error_code; /* error code */
+ u8 ac_level; /* default brightness on AC power */
+ u8 dc_level; /* default brightness on DC power */
+ u8 min_input_signal; /* max input signal in range 0-255 */
+ u8 max_input_signal; /* min input signal in range 0-255 */
+ u8 number_of_points; /* number of data points */
+ struct atif_qbtc_data_point data_points[ATIF_QBTC_MAX_DATA_POINTS];
+} __packed;
+
#define ATIF_NOTIFY_MASK 0x3
#define ATIF_NOTIFY_NONE 0
#define ATIF_NOTIFY_81 1
@@ -126,26 +150,18 @@ struct atcs_pref_req_output {
* DWORD - supported functions bit vector
*/
/* Notifications mask */
-# define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED (1 << 0)
-# define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED (1 << 1)
# define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2)
# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3)
# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4)
-# define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED (1 << 5)
-# define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED (1 << 6)
# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7)
# define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8)
+# define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED (1 << 12)
/* supported functions vector */
# define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0)
# define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1)
-# define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED (1 << 2)
-# define ATIF_GET_LID_STATE_SUPPORTED (1 << 3)
-# define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED (1 << 4)
-# define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED (1 << 5)
-# define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED (1 << 6)
-# define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED (1 << 7)
# define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12)
-# define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED (1 << 14)
+# define ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED (1 << 15)
+# define ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED (1 << 16)
# define ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED (1 << 20)
#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1
/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
@@ -170,6 +186,10 @@ struct atcs_pref_req_output {
* n (0xd0-0xd9) is specified in notify command code.
* bit 2:
* 1 - lid changes not reported though int10
+ * bit 3:
+ * 1 - system bios controls overclocking
+ * bit 4:
+ * 1 - enable overclocking
*/
#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2
/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
@@ -177,28 +197,23 @@ struct atcs_pref_req_output {
* OUTPUT:
* WORD - structure size in bytes (includes size field)
* DWORD - pending sbios requests
- * BYTE - panel expansion mode
+ * BYTE - reserved (all zeroes)
* BYTE - thermal state: target gfx controller
* BYTE - thermal state: state id (0: exit state, non-0: state)
* BYTE - forced power state: target gfx controller
- * BYTE - forced power state: state id
+ * BYTE - forced power state: state id (0: forced state, non-0: state)
* BYTE - system power source
* BYTE - panel backlight level (0-255)
+ * BYTE - GPU package power limit: target gfx controller
+ * DWORD - GPU package power limit: value (24:8 fractional format, Watts)
*/
/* pending sbios requests */
-# define ATIF_DISPLAY_SWITCH_REQUEST (1 << 0)
-# define ATIF_EXPANSION_MODE_CHANGE_REQUEST (1 << 1)
# define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2)
# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3)
# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4)
-# define ATIF_DISPLAY_CONF_CHANGE_REQUEST (1 << 5)
-# define ATIF_PX_GFX_SWITCH_REQUEST (1 << 6)
# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7)
# define ATIF_DGPU_DISPLAY_EVENT (1 << 8)
-/* panel expansion mode */
-# define ATIF_PANEL_EXPANSION_DISABLE 0
-# define ATIF_PANEL_EXPANSION_FULL 1
-# define ATIF_PANEL_EXPANSION_ASPECT 2
+# define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST (1 << 12)
/* target gfx controller */
# define ATIF_TARGET_GFX_SINGLE 0
# define ATIF_TARGET_GFX_PX_IGPU 1
@@ -208,76 +223,6 @@ struct atcs_pref_req_output {
# define ATIF_POWER_SOURCE_DC 2
# define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3
# define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4
-#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS 0x3
-/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
- * ARG1:
- * WORD - structure size in bytes (includes size field)
- * WORD - selected displays
- * WORD - connected displays
- * OUTPUT:
- * WORD - structure size in bytes (includes size field)
- * WORD - selected displays
- */
-# define ATIF_LCD1 (1 << 0)
-# define ATIF_CRT1 (1 << 1)
-# define ATIF_TV (1 << 2)
-# define ATIF_DFP1 (1 << 3)
-# define ATIF_CRT2 (1 << 4)
-# define ATIF_LCD2 (1 << 5)
-# define ATIF_DFP2 (1 << 7)
-# define ATIF_CV (1 << 8)
-# define ATIF_DFP3 (1 << 9)
-# define ATIF_DFP4 (1 << 10)
-# define ATIF_DFP5 (1 << 11)
-# define ATIF_DFP6 (1 << 12)
-#define ATIF_FUNCTION_GET_LID_STATE 0x4
-/* ARG0: ATIF_FUNCTION_GET_LID_STATE
- * ARG1: none
- * OUTPUT:
- * WORD - structure size in bytes (includes size field)
- * BYTE - lid state (0: open, 1: closed)
- *
- * GET_LID_STATE only works at boot and resume, for general lid
- * status, use the kernel provided status
- */
-#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS 0x5
-/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
- * ARG1: none
- * OUTPUT:
- * WORD - structure size in bytes (includes size field)
- * BYTE - 0
- * BYTE - TV standard
- */
-# define ATIF_TV_STD_NTSC 0
-# define ATIF_TV_STD_PAL 1
-# define ATIF_TV_STD_PALM 2
-# define ATIF_TV_STD_PAL60 3
-# define ATIF_TV_STD_NTSCJ 4
-# define ATIF_TV_STD_PALCN 5
-# define ATIF_TV_STD_PALN 6
-# define ATIF_TV_STD_SCART_RGB 9
-#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS 0x6
-/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
- * ARG1:
- * WORD - structure size in bytes (includes size field)
- * BYTE - 0
- * BYTE - TV standard
- * OUTPUT: none
- */
-#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS 0x7
-/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
- * ARG1: none
- * OUTPUT:
- * WORD - structure size in bytes (includes size field)
- * BYTE - panel expansion mode
- */
-#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS 0x8
-/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
- * ARG1:
- * WORD - structure size in bytes (includes size field)
- * BYTE - panel expansion mode
- * OUTPUT: none
- */
#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD
/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
* ARG1:
@@ -286,21 +231,43 @@ struct atcs_pref_req_output {
* BYTE - current temperature (degress Celsius)
* OUTPUT: none
*/
-#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES 0xF
-/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
- * ARG1: none
+#define ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS 0x10
+/* ARG0: ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - requested display
* OUTPUT:
- * WORD - number of gfx devices
- * WORD - device structure size in bytes (excludes device size field)
- * DWORD - flags \
- * WORD - bus number } repeated structure
- * WORD - device number /
+ * WORD - structure size in bytes (includes size field)
+ * WORD - flags (currently all 16 bits are reserved)
+ * BYTE - error code (on failure, disregard all below fields)
+ * BYTE - AC level (default brightness in percent when machine has full power)
+ * BYTE - DC level (default brightness in percent when machine is on battery)
+ * BYTE - min input signal, in range 0-255, corresponding to 0% backlight
+ * BYTE - max input signal, in range 0-255, corresponding to 100% backlight
+ * BYTE - number of reported data points
+ * BYTE - luminance level in percent \ repeated structure
+ * BYTE - input signal in range 0-255 / does not have entries for 0% and 100%
+ */
+/* requested display */
+# define ATIF_QBTC_REQUEST_LCD1 0
+# define ATIF_QBTC_REQUEST_CRT1 1
+# define ATIF_QBTC_REQUEST_DFP1 3
+# define ATIF_QBTC_REQUEST_CRT2 4
+# define ATIF_QBTC_REQUEST_LCD2 5
+# define ATIF_QBTC_REQUEST_DFP2 7
+# define ATIF_QBTC_REQUEST_DFP3 9
+# define ATIF_QBTC_REQUEST_DFP4 10
+# define ATIF_QBTC_REQUEST_DFP5 11
+# define ATIF_QBTC_REQUEST_DFP6 12
+/* error code */
+# define ATIF_QBTC_ERROR_CODE_SUCCESS 0
+# define ATIF_QBTC_ERROR_CODE_FAILURE 1
+# define ATIF_QBTC_ERROR_CODE_DEVICE_NOT_SUPPORTED 2
+#define ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION 0x11
+/* ARG0: ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION
+ * ARG1: none
+ * OUTPUT: none
*/
-/* flags */
-# define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE (1 << 0)
-# define ATIF_XGP_PORT (1 << 1)
-# define ATIF_VGA_ENABLED_GRAPHICS_DEVICE (1 << 2)
-# define ATIF_XGP_PORT_IN_DOCK (1 << 3)
#define ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION 0x15
/* ARG0: ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION
* ARG1: none
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 2083c308007c..470d7b89071a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -133,6 +133,10 @@ enum PP_FEATURE_MASK {
PP_AVFS_MASK = 0x40000,
};
+enum DC_FEATURE_MASK {
+ DC_FBC_MASK = 0x1,
+};
+
/**
* struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
*/
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
new file mode 100644
index 000000000000..8f515875a34d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_0_OFFSET_HEADER
+#define _mmhub_9_4_0_OFFSET_HEADER
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+// base address: 0x6a040
+#define mmMC_VM_XGMI_LFB_CNTL 0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE 0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
new file mode 100644
index 000000000000..0a6b072d191e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_0_SH_MASK_HEADER
+#define _mmhub_9_4_0_SH_MASK_HEADER
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index d2e7c0fa96c2..8eb0bb241210 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1325,7 +1325,7 @@ struct atom_smu_info_v3_3 {
struct atom_common_table_header table_header;
uint8_t smuip_min_ver;
uint8_t smuip_max_ver;
- uint8_t smu_rsd1;
+ uint8_t waflclk_ss_mode;
uint8_t gpuclk_ss_mode;
uint16_t sclk_ss_percentage;
uint16_t sclk_ss_rate_10hz;
@@ -1355,7 +1355,10 @@ struct atom_smu_info_v3_3 {
uint32_t syspll3_1_vco_freq_10khz;
uint32_t bootup_fclk_10khz;
uint32_t bootup_waflclk_10khz;
- uint32_t reserved[3];
+ uint32_t smu_info_caps;
+ uint16_t waflclk_ss_percentage; // in unit of 0.001%
+ uint16_t smuinitoffset;
+ uint32_t reserved;
};
/*
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 64ecffd52126..8154d67388cc 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -188,8 +188,8 @@ struct tile_config {
*/
#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
#define ALLOC_MEM_FLAGS_GTT (1 << 1)
-#define ALLOC_MEM_FLAGS_USERPTR (1 << 2) /* TODO */
-#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) /* TODO */
+#define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
+#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
/*
* Allocation flags attributes/access options.
@@ -205,20 +205,6 @@ struct tile_config {
/**
* struct kfd2kgd_calls
*
- * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
- * The buffer can be used for mqds, hpds, kernel queue, fence and runlists
- *
- * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
- *
- * @get_local_mem_info: Retrieves information about GPU local memory
- *
- * @get_gpu_clock_counter: Retrieves GPU clock counter
- *
- * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
- *
- * @alloc_pasid: Allocate a PASID
- * @free_pasid: Free a PASID
- *
* @program_sh_mem_settings: A function that should initiate the memory
* properties such as main aperture memory type (cache / non cached) and
* secondary aperture base address, size and memory type.
@@ -255,64 +241,16 @@ struct tile_config {
*
* @get_tile_config: Returns GPU-specific tiling mode information
*
- * @get_cu_info: Retrieves activated cu info
- *
- * @get_vram_usage: Returns current VRAM usage
- *
- * @create_process_vm: Create a VM address space for a given process and GPU
- *
- * @destroy_process_vm: Destroy a VM
- *
- * @get_process_page_dir: Get physical address of a VM page directory
- *
* @set_vm_context_page_table_base: Program page table base for a VMID
*
- * @alloc_memory_of_gpu: Allocate GPUVM memory
- *
- * @free_memory_of_gpu: Free GPUVM memory
- *
- * @map_memory_to_gpu: Map GPUVM memory into a specific VM address
- * space. Allocates and updates page tables and page directories as
- * needed. This function may return before all page table updates have
- * completed. This allows multiple map operations (on multiple GPUs)
- * to happen concurrently. Use sync_memory to synchronize with all
- * pending updates.
- *
- * @unmap_memor_to_gpu: Unmap GPUVM memory from a specific VM address space
- *
- * @sync_memory: Wait for pending page table updates to complete
- *
- * @map_gtt_bo_to_kernel: Map a GTT BO for kernel access
- * Pins the BO, maps it to kernel address space. Such BOs are never evicted.
- * The kernel virtual address remains valid until the BO is freed.
- *
- * @restore_process_bos: Restore all BOs that belong to the
- * process. This is intended for restoring memory mappings after a TTM
- * eviction.
- *
* @invalidate_tlbs: Invalidate TLBs for a specific PASID
*
* @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
*
- * @submit_ib: Submits an IB to the engine specified by inserting the
- * IB to the corresponding ring (ring type). The IB is executed with the
- * specified VMID in a user mode context.
- *
- * @get_vm_fault_info: Return information about a recent VM fault on
- * GFXv7 and v8. If multiple VM faults occurred since the last call of
- * this function, it will return information about the first of those
- * faults. On GFXv9 VM fault information is fully contained in the IH
- * packet and this function is not needed.
- *
* @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
* IH ring entry. This function allows the KFD ISR to get the VMID
* from the fault status register as early as possible.
*
- * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
- *
- * @set_compute_idle: Indicates that compute is idle on a device. This
- * can be used to change power profiles depending on compute activity.
- *
* @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled
*
* This structure contains function pointers to services that the kgd driver
@@ -320,21 +258,6 @@ struct tile_config {
*
*/
struct kfd2kgd_calls {
- int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr, bool mqd_gfx9);
-
- void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
-
- void (*get_local_mem_info)(struct kgd_dev *kgd,
- struct kfd_local_mem_info *mem_info);
- uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
-
- uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
-
- int (*alloc_pasid)(unsigned int bits);
- void (*free_pasid)(unsigned int pasid);
-
/* Register access functions */
void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
@@ -398,49 +321,11 @@ struct kfd2kgd_calls {
uint64_t va, uint32_t vmid);
int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
- void (*get_cu_info)(struct kgd_dev *kgd,
- struct kfd_cu_info *cu_info);
- uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
-
- int (*create_process_vm)(struct kgd_dev *kgd, unsigned int pasid, void **vm,
- void **process_info, struct dma_fence **ef);
- int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp,
- unsigned int pasid, void **vm, void **process_info,
- struct dma_fence **ef);
- void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
- void (*release_process_vm)(struct kgd_dev *kgd, void *vm);
- uint64_t (*get_process_page_dir)(void *vm);
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base);
- int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
- uint64_t size, void *vm,
- struct kgd_mem **mem, uint64_t *offset,
- uint32_t flags);
- int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem);
- int (*map_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
- void *vm);
- int (*unmap_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
- void *vm);
- int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
- int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd, struct kgd_mem *mem,
- void **kptr, uint64_t *size);
- int (*restore_process_bos)(void *process_info, struct dma_fence **ef);
-
int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
-
- int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
- uint32_t vmid, uint64_t gpu_addr,
- uint32_t *ib_cmd, uint32_t ib_len);
-
- int (*get_vm_fault_info)(struct kgd_dev *kgd,
- struct kfd_vm_fault_info *info);
uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
-
- void (*gpu_recover)(struct kgd_dev *kgd);
-
- void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
-
uint64_t (*get_hive_id)(struct kgd_dev *kgd);
};
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 980e696989b1..1479ea1dc3e7 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -276,6 +276,10 @@ struct amd_pm_funcs {
struct amd_pp_simple_clock_info *clocks);
int (*notify_smu_enable_pwe)(void *handle);
int (*enable_mgpu_fan_boost)(void *handle);
+ int (*set_active_display_count)(void *handle, uint32_t count);
+ int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock);
+ int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock);
+ int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock);
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index d6aa1d414320..9bc27f468d5b 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -300,7 +300,7 @@ static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
return -EINVAL;
if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -387,7 +387,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
return 0;
if (hwmgr->hwmgr_func->get_sclk == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -405,7 +405,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
return 0;
if (hwmgr->hwmgr_func->get_mclk == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -422,7 +422,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
return;
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
mutex_lock(&hwmgr->smu_lock);
@@ -438,7 +438,7 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
return;
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
mutex_lock(&hwmgr->smu_lock);
@@ -505,7 +505,7 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
return;
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
mutex_lock(&hwmgr->smu_lock);
@@ -522,7 +522,7 @@ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
return 0;
if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -540,7 +540,7 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
return -EINVAL;
if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -558,7 +558,7 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
return -EINVAL;
if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -594,7 +594,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
return -EINVAL;
if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -720,12 +720,12 @@ static int pp_dpm_force_clock_level(void *handle,
return -EINVAL;
if (hwmgr->hwmgr_func->force_clock_level == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("force clock level is for dpm manual mode only.\n");
+ pr_debug("force clock level is for dpm manual mode only.\n");
return -EINVAL;
}
@@ -745,7 +745,7 @@ static int pp_dpm_print_clock_levels(void *handle,
return -EINVAL;
if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -763,7 +763,7 @@ static int pp_dpm_get_sclk_od(void *handle)
return -EINVAL;
if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -781,7 +781,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
return -EINVAL;
if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -800,7 +800,7 @@ static int pp_dpm_get_mclk_od(void *handle)
return -EINVAL;
if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -818,7 +818,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
return -EINVAL;
if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -878,7 +878,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
return -EINVAL;
if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return snprintf(buf, PAGE_SIZE, "\n");
}
@@ -894,12 +894,12 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
return ret;
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return ret;
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("power profile setting is for manual dpm mode only.\n");
+ pr_debug("power profile setting is for manual dpm mode only.\n");
return ret;
}
@@ -917,7 +917,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
return -EINVAL;
if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
@@ -935,7 +935,7 @@ static int pp_dpm_switch_power_profile(void *handle,
return -EINVAL;
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
@@ -972,7 +972,7 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
return -EINVAL;
if (hwmgr->hwmgr_func->set_power_limit == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
@@ -1072,7 +1072,7 @@ static int pp_get_current_clocks(void *handle,
&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
if (ret) {
- pr_info("Error in phm_get_clock_info \n");
+ pr_debug("Error in phm_get_clock_info \n");
mutex_unlock(&hwmgr->smu_lock);
return -EINVAL;
}
@@ -1212,7 +1212,7 @@ static int pp_dpm_powergate_mmhub(void *handle)
return -EINVAL;
if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -1227,7 +1227,7 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate)
return 0;
if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -1242,7 +1242,7 @@ static void pp_dpm_powergate_acp(void *handle, bool gate)
return;
if (hwmgr->hwmgr_func->powergate_acp == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
@@ -1257,7 +1257,7 @@ static void pp_dpm_powergate_sdma(void *handle, bool gate)
return;
if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
@@ -1303,7 +1303,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
return -EINVAL;
if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;;
}
@@ -1332,6 +1332,78 @@ static int pp_enable_mgpu_fan_boost(void *handle)
return 0;
}
+static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
+ pr_debug("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
+ pr_debug("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
+ pr_debug("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_active_display_count(void *handle, uint32_t count)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_set_active_display_count(hwmgr, count);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1378,4 +1450,8 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
+ .set_active_display_count = pp_set_active_display_count,
+ .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
+ .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
+ .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 85119c2bdcc8..333b9b845971 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -286,8 +286,8 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
if (display_config == NULL)
return -EINVAL;
- if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
- hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
+ if (NULL != hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
+ hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
for (index = 0; index < display_config->num_path_including_non_display; index++) {
if (display_config->displays[index].controller_id != 0)
@@ -478,3 +478,44 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
}
+
+int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_active_display_count)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_active_display_count(hwmgr, count);
+}
+
+int phm_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
+}
+
+int phm_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
+}
+
+int phm_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_hard_min_fclk_by_freq)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index dd18cb710391..f95c5f50eb0f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -216,12 +216,12 @@ static inline uint32_t convert_10k_to_mhz(uint32_t clock)
return (clock + 99) / 100;
}
-static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->need_min_deep_sleep_dcefclk &&
- smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
+ smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
@@ -230,6 +230,34 @@ static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
return 0;
}
+static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (smu10_data->dcf_actual_hard_min_freq &&
+ smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
+ smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinDcefclkByFreq,
+ smu10_data->dcf_actual_hard_min_freq);
+ }
+ return 0;
+}
+
+static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (smu10_data->f_actual_hard_min_freq &&
+ smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
+ smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ smu10_data->f_actual_hard_min_freq);
+ }
+ return 0;
+}
+
static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
@@ -1206,7 +1234,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.get_max_high_clocks = smu10_get_max_high_clocks,
.read_sensor = smu10_read_sensor,
.set_active_display_count = smu10_set_active_display_count,
- .set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
+ .set_min_deep_sleep_dcefclk = smu10_set_min_deep_sleep_dcefclk,
.dynamic_state_management_enable = smu10_enable_dpm_tasks,
.power_off_asic = smu10_power_off_asic,
.asic_setup = smu10_setup_asic_task,
@@ -1217,6 +1245,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.display_clock_voltage_request = smu10_display_clock_voltage_request,
.powergate_gfx = smu10_gfx_off_control,
.powergate_sdma = smu10_powergate_sdma,
+ .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
+ .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index ed35ec0341e6..3958729d6265 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -269,7 +269,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
hwmgr->dyn_state.mvdd_dependency_on_mclk);
PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 MVDD table from dependancy table.",
+ "Failed to retrieve SVI2 MVDD table from dependency table.",
return result;);
}
@@ -288,7 +288,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
hwmgr->dyn_state.vddci_dependency_on_mclk);
PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDCI table from dependancy table.",
+ "Failed to retrieve SVI2 VDDCI table from dependency table.",
return result);
}
@@ -317,7 +317,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
table_info->vddc_lookup_table);
PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
+ "Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
}
tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
@@ -2859,7 +2859,10 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+ if (hwmgr->is_kicker)
+ switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
+ else
+ switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
break;
case CHIP_VEGAM:
switch_limit_us = 30;
@@ -4219,9 +4222,17 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
if (tmp & (1 << 23)) {
data->mem_latency_high = MEM_LATENCY_HIGH;
data->mem_latency_low = MEM_LATENCY_LOW;
+ if ((hwmgr->chip_id == CHIP_POLARIS10) ||
+ (hwmgr->chip_id == CHIP_POLARIS11) ||
+ (hwmgr->chip_id == CHIP_POLARIS12))
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
} else {
data->mem_latency_high = 330;
data->mem_latency_low = 330;
+ if ((hwmgr->chip_id == CHIP_POLARIS10) ||
+ (hwmgr->chip_id == CHIP_POLARIS11) ||
+ (hwmgr->chip_id == CHIP_POLARIS12))
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
}
return 0;
@@ -4525,12 +4536,12 @@ static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
struct smu7_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.sclk_table);
- int value;
+ int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+ int golden_value = golden_sclk_table->dpm_levels
+ [golden_sclk_table->count - 1].value;
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
@@ -4567,12 +4578,12 @@ static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
struct smu7_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mclk_table);
- int value;
+ int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+ int golden_value = golden_mclk_table->dpm_levels
+ [golden_mclk_table->count - 1].value;
- value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 5e19f5977eb1..d138ddae563d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -967,7 +967,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_CAP(PHM_PlatformCaps_TDRamping) ||
PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex);
value = 0;
value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
@@ -1014,13 +1014,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
"Failed to enable DPM DIDT.", goto error);
}
mutex_unlock(&adev->grbm_idx_mutex);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
return 0;
error:
mutex_unlock(&adev->grbm_idx_mutex);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return result;
}
@@ -1034,7 +1034,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
PP_CAP(PHM_PlatformCaps_TDRamping) ||
PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
result = smu7_enable_didt(hwmgr, false);
PP_ASSERT_WITH_CODE((result == 0),
@@ -1046,12 +1046,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == result),
"Failed to disable DPM DIDT.", goto error);
}
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
return 0;
error:
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index fef111ddb736..553a203ac47c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -1228,17 +1228,14 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
- if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
- smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
+ if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
- }
return 0;
}
static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
- smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
return smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_UVDPowerON,
@@ -1995,6 +1992,7 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
.power_state_set = smu8_set_power_state_tasks,
.dynamic_state_management_disable = smu8_disable_dpm_tasks,
.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
+ .update_nbdpm_pstate = smu8_nbdpm_pstate_enable_disable,
.get_thermal_temperature_range = smu8_get_thermal_temperature_range,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 99a33c33a32c..101c09b212ad 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -713,20 +713,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
table->WatermarkRow[1][i].MinClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+ 1000));
table->WatermarkRow[1][i].MaxClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+ 1000));
table->WatermarkRow[1][i].MinUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
table->WatermarkRow[1][i].MaxUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
table->WatermarkRow[1][i].WmSetting = (uint8_t)
wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
}
@@ -734,20 +734,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
table->WatermarkRow[0][i].MinClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+ 1000));
table->WatermarkRow[0][i].MaxClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+ 1000));
table->WatermarkRow[0][i].MinUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
table->WatermarkRow[0][i].MaxUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
table->WatermarkRow[0][i].WmSetting = (uint8_t)
wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 8c4db86bb4b7..e2bc6e0c229f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4522,15 +4522,13 @@ static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega10_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
- int value;
-
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels
- [golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels
+ int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+ int golden_value = golden_sclk_table->dpm_levels
[golden_sclk_table->count - 1].value;
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
+
return value;
}
@@ -4575,16 +4573,13 @@ static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
- int value;
-
- value = (mclk_table->dpm_levels
- [mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels
- [golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels
+ int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+ int golden_value = golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value;
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
+
return value;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 2d88abf97e7b..6f26cb241ecc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -937,7 +937,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
@@ -962,7 +962,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -971,11 +971,11 @@ static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -988,7 +988,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
@@ -1007,7 +1007,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
if (PP_CAP(PHM_PlatformCaps_GCEDC))
@@ -1024,11 +1024,11 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
uint32_t data;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
@@ -1049,7 +1049,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
@@ -1070,7 +1070,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -1079,11 +1079,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -1097,7 +1097,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
@@ -1118,7 +1118,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
@@ -1138,11 +1138,11 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
uint32_t data;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
@@ -1160,7 +1160,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
int result;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex);
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
@@ -1173,7 +1173,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, false);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 74bc37308dc0..54364444ecd1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -2243,12 +2243,12 @@ static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega12_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
- int value;
+ int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+ int golden_value = golden_sclk_table->dpm_levels
+ [golden_sclk_table->count - 1].value;
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
@@ -2264,16 +2264,13 @@ static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega12_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
- int value;
-
- value = (mclk_table->dpm_levels
- [mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels
- [golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels
+ int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+ int golden_value = golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value;
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
+
return value;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 57143d51e3ee..2679d1240fa1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -75,7 +75,17 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
- data->registry_data.disallowed_features = 0x0;
+ /*
+ * Disable the following features for now:
+ * GFXCLK DS
+ * SOCLK DS
+ * LCLK DS
+ * DCEFCLK DS
+ * FCLK DS
+ * MP1CLK DS
+ * MP0CLK DS
+ */
+ data->registry_data.disallowed_features = 0xE0041C00;
data->registry_data.od_state_in_dc_support = 0;
data->registry_data.thermal_support = 1;
data->registry_data.skip_baco_hardware = 0;
@@ -120,6 +130,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disable_auto_wattman = 1;
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
+ data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
data->registry_data.auto_wattman_threshold = 50;
data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false;
@@ -829,6 +840,28 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
return 0;
}
+static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_DPM_UCLK].enabled)
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetUclkFastSwitch,
+ 1);
+
+ return 0;
+}
+
+static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFclkGfxClkRatio,
+ data->registry_data.fclk_gfxclk_ratio);
+}
+
static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data =
@@ -1290,12 +1323,13 @@ static int vega20_get_sclk_od(
&(data->dpm_table.gfx_table);
struct vega20_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
- int value;
+ int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+ int golden_value = golden_sclk_table->dpm_levels
+ [golden_sclk_table->count - 1].value;
/* od percentage */
- value = DIV_ROUND_UP((sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 100,
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value);
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
@@ -1335,12 +1369,13 @@ static int vega20_get_mclk_od(
&(data->dpm_table.mem_table);
struct vega20_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
- int value;
+ int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+ int golden_value = golden_mclk_table->dpm_levels
+ [golden_mclk_table->count - 1].value;
/* od percentage */
- value = DIV_ROUND_UP((mclk_table->dpm_levels[mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * 100,
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value);
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
@@ -1532,6 +1567,16 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"[EnableDPMTasks] Failed to enable all smu features!",
return result);
+ result = vega20_notify_smc_display_change(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "[EnableDPMTasks] Failed to notify smc display change!",
+ return result);
+
+ result = vega20_send_clock_ratio(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "[EnableDPMTasks] Failed to send clock ratio!",
+ return result);
+
/* Initialize UVD/VCE powergating state */
vega20_init_powergate_state(hwmgr);
@@ -1972,19 +2017,6 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
return ret;
}
-static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr,
- bool has_disp)
-{
- struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
-
- if (data->smu_features[GNLD_DPM_UCLK].enabled)
- return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 1 : 0);
-
- return 0;
-}
-
int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock_req)
{
@@ -2044,13 +2076,6 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
struct pp_display_clock_request clock_req;
int ret = 0;
- if ((hwmgr->display_config->num_display > 1) &&
- !hwmgr->display_config->multi_monitor_in_sync &&
- !hwmgr->display_config->nb_pstate_switch_disable)
- vega20_notify_smc_display_change(hwmgr, false);
- else
- vega20_notify_smc_display_change(hwmgr, true);
-
min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
@@ -2742,7 +2767,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
case PP_MCLK:
@@ -2759,7 +2784,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
case PP_PCIE:
@@ -3441,109 +3466,64 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
/* init/fini related */
- .backend_init =
- vega20_hwmgr_backend_init,
- .backend_fini =
- vega20_hwmgr_backend_fini,
- .asic_setup =
- vega20_setup_asic_task,
- .power_off_asic =
- vega20_power_off_asic,
- .dynamic_state_management_enable =
- vega20_enable_dpm_tasks,
- .dynamic_state_management_disable =
- vega20_disable_dpm_tasks,
+ .backend_init = vega20_hwmgr_backend_init,
+ .backend_fini = vega20_hwmgr_backend_fini,
+ .asic_setup = vega20_setup_asic_task,
+ .power_off_asic = vega20_power_off_asic,
+ .dynamic_state_management_enable = vega20_enable_dpm_tasks,
+ .dynamic_state_management_disable = vega20_disable_dpm_tasks,
/* power state related */
- .apply_clocks_adjust_rules =
- vega20_apply_clocks_adjust_rules,
- .pre_display_config_changed =
- vega20_pre_display_configuration_changed_task,
- .display_config_changed =
- vega20_display_configuration_changed_task,
+ .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
+ .pre_display_config_changed = vega20_pre_display_configuration_changed_task,
+ .display_config_changed = vega20_display_configuration_changed_task,
.check_smc_update_required_for_display_configuration =
vega20_check_smc_update_required_for_display_configuration,
.notify_smc_display_config_after_ps_adjustment =
vega20_notify_smc_display_config_after_ps_adjustment,
/* export to DAL */
- .get_sclk =
- vega20_dpm_get_sclk,
- .get_mclk =
- vega20_dpm_get_mclk,
- .get_dal_power_level =
- vega20_get_dal_power_level,
- .get_clock_by_type_with_latency =
- vega20_get_clock_by_type_with_latency,
- .get_clock_by_type_with_voltage =
- vega20_get_clock_by_type_with_voltage,
- .set_watermarks_for_clocks_ranges =
- vega20_set_watermarks_for_clocks_ranges,
- .display_clock_voltage_request =
- vega20_display_clock_voltage_request,
- .get_performance_level =
- vega20_get_performance_level,
+ .get_sclk = vega20_dpm_get_sclk,
+ .get_mclk = vega20_dpm_get_mclk,
+ .get_dal_power_level = vega20_get_dal_power_level,
+ .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
+ .get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage,
+ .set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges,
+ .display_clock_voltage_request = vega20_display_clock_voltage_request,
+ .get_performance_level = vega20_get_performance_level,
/* UMD pstate, profile related */
- .force_dpm_level =
- vega20_dpm_force_dpm_level,
- .get_power_profile_mode =
- vega20_get_power_profile_mode,
- .set_power_profile_mode =
- vega20_set_power_profile_mode,
+ .force_dpm_level = vega20_dpm_force_dpm_level,
+ .get_power_profile_mode = vega20_get_power_profile_mode,
+ .set_power_profile_mode = vega20_set_power_profile_mode,
/* od related */
- .set_power_limit =
- vega20_set_power_limit,
- .get_sclk_od =
- vega20_get_sclk_od,
- .set_sclk_od =
- vega20_set_sclk_od,
- .get_mclk_od =
- vega20_get_mclk_od,
- .set_mclk_od =
- vega20_set_mclk_od,
- .odn_edit_dpm_table =
- vega20_odn_edit_dpm_table,
+ .set_power_limit = vega20_set_power_limit,
+ .get_sclk_od = vega20_get_sclk_od,
+ .set_sclk_od = vega20_set_sclk_od,
+ .get_mclk_od = vega20_get_mclk_od,
+ .set_mclk_od = vega20_set_mclk_od,
+ .odn_edit_dpm_table = vega20_odn_edit_dpm_table,
/* for sysfs to retrive/set gfxclk/memclk */
- .force_clock_level =
- vega20_force_clock_level,
- .print_clock_levels =
- vega20_print_clock_levels,
- .read_sensor =
- vega20_read_sensor,
+ .force_clock_level = vega20_force_clock_level,
+ .print_clock_levels = vega20_print_clock_levels,
+ .read_sensor = vega20_read_sensor,
/* powergate related */
- .powergate_uvd =
- vega20_power_gate_uvd,
- .powergate_vce =
- vega20_power_gate_vce,
+ .powergate_uvd = vega20_power_gate_uvd,
+ .powergate_vce = vega20_power_gate_vce,
/* thermal related */
- .start_thermal_controller =
- vega20_start_thermal_controller,
- .stop_thermal_controller =
- vega20_thermal_stop_thermal_controller,
- .get_thermal_temperature_range =
- vega20_get_thermal_temperature_range,
- .register_irq_handlers =
- smu9_register_irq_handlers,
- .disable_smc_firmware_ctf =
- vega20_thermal_disable_alert,
+ .start_thermal_controller = vega20_start_thermal_controller,
+ .stop_thermal_controller = vega20_thermal_stop_thermal_controller,
+ .get_thermal_temperature_range = vega20_get_thermal_temperature_range,
+ .register_irq_handlers = smu9_register_irq_handlers,
+ .disable_smc_firmware_ctf = vega20_thermal_disable_alert,
/* fan control related */
- .get_fan_speed_percent =
- vega20_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent =
- vega20_fan_ctrl_set_fan_speed_percent,
- .get_fan_speed_info =
- vega20_fan_ctrl_get_fan_speed_info,
- .get_fan_speed_rpm =
- vega20_fan_ctrl_get_fan_speed_rpm,
- .set_fan_speed_rpm =
- vega20_fan_ctrl_set_fan_speed_rpm,
- .get_fan_control_mode =
- vega20_get_fan_control_mode,
- .set_fan_control_mode =
- vega20_set_fan_control_mode,
+ .get_fan_speed_percent = vega20_fan_ctrl_get_fan_speed_percent,
+ .set_fan_speed_percent = vega20_fan_ctrl_set_fan_speed_percent,
+ .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info,
+ .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm,
+ .set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm,
+ .get_fan_control_mode = vega20_get_fan_control_mode,
+ .set_fan_control_mode = vega20_set_fan_control_mode,
/* smu memory related */
- .notify_cac_buffer_info =
- vega20_notify_cac_buffer_info,
- .enable_mgpu_fan_boost =
- vega20_enable_mgpu_fan_boost,
+ .notify_cac_buffer_info = vega20_notify_cac_buffer_info,
+ .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost,
};
int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index 56fe6a0d42e8..25faaa5c5b10 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -328,6 +328,7 @@ struct vega20_registry_data {
uint8_t disable_auto_wattman;
uint32_t auto_wattman_debug;
uint32_t auto_wattman_sample_period;
+ uint32_t fclk_gfxclk_ratio;
uint8_t auto_wattman_threshold;
uint8_t log_avfs_param;
uint8_t enable_enginess;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 54fd0125d9cf..f4dab979a3a1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -463,5 +463,8 @@ extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr);
+
+extern int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count);
+
#endif /* _HARDWARE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index e5a60aa44b5d..0d298a0409f5 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -28,7 +28,6 @@
#include "hardwaremanager.h"
#include "hwmgr_ppt.h"
#include "ppatomctrl.h"
-#include "hwmgr_ppt.h"
#include "power_state.h"
#include "smu_helper.h"
@@ -310,7 +309,7 @@ struct pp_hwmgr_func {
int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable);
int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr);
int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count);
- int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
+ int (*set_min_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range);
int (*notify_cac_buffer_info)(struct pp_hwmgr *hwmgr,
uint32_t virtual_addr_low,
@@ -318,6 +317,9 @@ struct pp_hwmgr_func {
uint32_t mc_addr_low,
uint32_t mc_addr_hi,
uint32_t size);
+ int (*update_nbdpm_pstate)(struct pp_hwmgr *hwmgr,
+ bool enable,
+ bool lock);
int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range);
int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf);
@@ -330,6 +332,8 @@ struct pp_hwmgr_func {
int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
int (*powergate_sdma)(struct pp_hwmgr *hwmgr, bool bgate);
int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
+ int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
+ int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
};
struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
index 65eb630bfea3..94bf7b649c20 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
@@ -40,10 +40,6 @@
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
index 62f36ba2435b..d11d6a797ce4 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
@@ -395,6 +395,9 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
+#define PPSMC_MSG_EnableFFC ((uint16_t) 0x307)
+#define PPSMC_MSG_DisableFFC ((uint16_t) 0x308)
+
#define PPSMC_MSG_EnableDpmDidt ((uint16_t) 0x309)
#define PPSMC_MSG_DisableDpmDidt ((uint16_t) 0x30A)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
index 45d64a81e945..4f63a736ea0e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
@@ -105,7 +105,8 @@
#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B
#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C
#define PPSMC_MSG_WaflTest 0x4D
-// Unused ID 0x4E to 0x50
+#define PPSMC_MSG_SetFclkGfxClkRatio 0x4E
+// Unused ID 0x4F to 0x50
#define PPSMC_MSG_AllowGfxOff 0x51
#define PPSMC_MSG_DisallowGfxOff 0x52
#define PPSMC_MSG_GetPptLimit 0x53
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 872d3824337b..b3e06e498834 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -44,7 +44,6 @@
#include "smu7_hwmgr.h"
#include "hardwaremanager.h"
-#include "ppatomctrl.h"
#include "atombios.h"
#include "pppcielanes.h"
@@ -1529,8 +1528,21 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
efuse = efuse >> 24;
if (hwmgr->chip_id == CHIP_POLARIS10) {
- min = 1000;
- max = 2300;
+ if (hwmgr->is_kicker) {
+ min = 1200;
+ max = 2500;
+ } else {
+ min = 1000;
+ max = 2300;
+ }
+ } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+ if (hwmgr->is_kicker) {
+ min = 900;
+ max = 2100;
+ } else {
+ min = 1100;
+ max = 2100;
+ }
} else {
min = 1100;
max = 2100;
@@ -1627,6 +1639,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
int result = 0;
@@ -1647,6 +1660,59 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
if (0 == result) {
+ if (((adev->pdev->device == 0x67ef) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe5))) ||
+ ((adev->pdev->device == 0x67ff) &&
+ ((adev->pdev->revision == 0xcf) ||
+ (adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
+ avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+ if ((adev->pdev->device == 0x67ef && adev->pdev->revision == 0xe5) ||
+ (adev->pdev->device == 0x67ff && adev->pdev->revision == 0xef)) {
+ if ((avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 == 0xEA522DD3) &&
+ (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 == 0x5645A) &&
+ (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 == 0x33F9E) &&
+ (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 == 0xFFFFC5CC) &&
+ (avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 == 0x1B1A) &&
+ (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b == 0xFFFFFCED)) {
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF718F1D4;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x323FD;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0x1E455;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
+ avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x23;
+ }
+ }
+ } else if (hwmgr->chip_id == CHIP_POLARIS12 && !hwmgr->is_kicker) {
+ avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF6B024DD;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x3005E;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0x18A5F;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0x315;
+ avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFED1;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x3B;
+ } else if (((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe3) ||
+ (adev->pdev->revision == 0xe4) ||
+ (adev->pdev->revision == 0xe5) ||
+ (adev->pdev->revision == 0xe7) ||
+ (adev->pdev->revision == 0xef))) ||
+ ((adev->pdev->device == 0x6fdf) &&
+ ((adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
+ avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF843B66B;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x59CB5;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0xFFFF287F;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
+ avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFF23;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x58;
+ }
+ }
+
+ if (0 == result) {
table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index d0eb8ab50148..d111dd4e03d7 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -29,7 +29,6 @@
#include "rv_ppsmc.h"
#include "smu10_driver_if.h"
#include "smu10.h"
-#include "ppatomctrl.h"
#include "pp_debug.h"
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 09b844ec3eab..e2787e14a500 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -61,9 +62,13 @@ static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
mmSMU_MP1_SRBM2P_ARG_0);
}
-static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
+/* Send a message to the SMC, and wait for its response.*/
+static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
{
int result = 0;
+ ktime_t t_start;
+ s64 elapsed_us;
if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
@@ -74,28 +79,31 @@ static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
/* Read the last message to SMU, to report actual cause */
uint32_t val = cgs_read_register(hwmgr->device,
mmSMU_MP1_SRBM2P_MSG_0);
- pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg);
- pr_err("SMU still servicing msg (0x%04x)\n", val);
+ pr_err("%s(0x%04x) aborted; SMU still servicing msg (0x%04x)\n",
+ __func__, msg, val);
return result;
}
+ t_start = ktime_get();
+
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
- return 0;
+ result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
+ SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+
+ elapsed_us = ktime_us_delta(ktime_get(), t_start);
+
+ WARN(result, "%s(0x%04x, %#x) timed out after %lld us\n",
+ __func__, msg, parameter, elapsed_us);
+
+ return result;
}
-/* Send a message to the SMC, and wait for its response.*/
static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- int result = 0;
-
- result = smu8_send_msg_to_smc_async(hwmgr, msg);
- if (result != 0)
- return result;
-
- return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
- SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+ return smu8_send_msg_to_smc_with_parameter(hwmgr, msg, 0);
}
static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
@@ -135,17 +143,6 @@ static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
return result;
}
-static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
-{
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
-
- return smu8_send_msg_to_smc(hwmgr, msg);
-}
-
static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
uint32_t firmware)
{
@@ -737,6 +734,10 @@ static int smu8_start_smu(struct pp_hwmgr *hwmgr)
cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
+ pr_info("smu version %02d.%02d.%02d\n",
+ ((hwmgr->smu_version >> 16) & 0xFF),
+ ((hwmgr->smu_version >> 8) & 0xFF),
+ (hwmgr->smu_version & 0xFF));
adev->pm.fw_version = hwmgr->smu_version >> 8;
return smu8_request_smu_load_fw(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 99d5e4f98f49..a6edd5df33b0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 9f71512b2510..1e69300f6175 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -40,7 +40,6 @@
#include "smu7_hwmgr.h"
#include "hardwaremanager.h"
-#include "ppatomctrl.h"
#include "atombios.h"
#include "pppcielanes.h"
diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h
index e8fcf3ab1d9a..90ef76b19f8a 100644
--- a/drivers/gpu/drm/arc/arcpgu.h
+++ b/drivers/gpu/drm/arc/arcpgu.h
@@ -20,7 +20,6 @@
struct arcpgu_drm_private {
void __iomem *regs;
struct clk *clk;
- struct drm_fbdev_cma *fbdev;
struct drm_framebuffer *fb;
struct drm_crtc crtc;
struct drm_plane *plane;
@@ -43,8 +42,5 @@ static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
int arc_pgu_setup_crtc(struct drm_device *dev);
int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np);
int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np);
-struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int num_crtc,
- unsigned int max_conn_count);
#endif
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 965cda48dc13..62f51f70606d 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -158,8 +158,6 @@ static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
.mode_valid = arc_pgu_crtc_mode_valid,
- .mode_set = drm_helper_crtc_mode_set,
- .mode_set_base = drm_helper_crtc_mode_set_base,
.mode_set_nofb = arc_pgu_crtc_mode_set_nofb,
.atomic_begin = arc_pgu_crtc_atomic_begin,
.atomic_enable = arc_pgu_crtc_atomic_enable,
@@ -186,7 +184,6 @@ static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
static void arc_pgu_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index f067de4e1e82..206a76abf771 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -25,16 +26,8 @@
#include "arcpgu.h"
#include "arcpgu_regs.h"
-static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
-{
- struct arcpgu_drm_private *arcpgu = dev->dev_private;
-
- drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
-}
-
static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = arcpgu_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -51,13 +44,6 @@ static void arcpgu_setup_mode_config(struct drm_device *drm)
DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops);
-static void arcpgu_lastclose(struct drm_device *drm)
-{
- struct arcpgu_drm_private *arcpgu = drm->dev_private;
-
- drm_fbdev_cma_restore_mode(arcpgu->fbdev);
-}
-
static int arcpgu_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
@@ -113,27 +99,14 @@ static int arcpgu_load(struct drm_device *drm)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
- arcpgu->fbdev = drm_fbdev_cma_init(drm, 16,
- drm->mode_config.num_connector);
- if (IS_ERR(arcpgu->fbdev)) {
- ret = PTR_ERR(arcpgu->fbdev);
- arcpgu->fbdev = NULL;
- return -ENODEV;
- }
-
platform_set_drvdata(pdev, drm);
return 0;
}
static int arcpgu_unload(struct drm_device *drm)
{
- struct arcpgu_drm_private *arcpgu = drm->dev_private;
-
- if (arcpgu->fbdev) {
- drm_fbdev_cma_fini(arcpgu->fbdev);
- arcpgu->fbdev = NULL;
- }
drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
return 0;
@@ -167,7 +140,6 @@ static int arcpgu_debugfs_init(struct drm_minor *minor)
static struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = arcpgu_lastclose,
.name = "arcpgu",
.desc = "ARC PGU Controller",
.date = "20160219",
@@ -210,13 +182,15 @@ static int arcpgu_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
+ drm_fbdev_generic_setup(drm, 16);
+
return 0;
err_unload:
arcpgu_unload(drm);
err_unref:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -227,7 +201,7 @@ static int arcpgu_remove(struct platform_device *pdev)
drm_dev_unregister(drm);
arcpgu_unload(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 7aad7dd80d8c..b9bed1138fa3 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -77,12 +77,18 @@ static const struct malidp_format_id malidp500_de_formats[] = {
{ DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
{ DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
{ DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \
- { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
+ { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }, \
+ { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}
static const struct malidp_format_id malidp550_de_formats[] = {
MALIDP_COMMON_FORMATS,
};
+static const struct malidp_format_id malidp650_de_formats[] = {
+ MALIDP_COMMON_FORMATS,
+ { DRM_FORMAT_X0L0, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 4)},
+};
+
static const struct malidp_layer malidp500_layers[] = {
/* id, base address, fb pointer address base, stride offset,
* yuv2rgb matrix offset, mmu control register offset, rotation_features
@@ -630,6 +636,8 @@ static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
case DRM_FORMAT_BGR565:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_X0L0:
+ case DRM_FORMAT_X0L2:
bytes_per_col = 32;
break;
/* 16 lines at 1.5 bytes per pixel */
@@ -905,8 +913,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
MALIDP550_DC_IRQ_SE,
.vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
},
- .pixel_formats = malidp550_de_formats,
- .n_pixel_formats = ARRAY_SIZE(malidp550_de_formats),
+ .pixel_formats = malidp650_de_formats,
+ .n_pixel_formats = ARRAY_SIZE(malidp650_de_formats),
.bus_align_bytes = 16,
},
.query_hw = malidp650_query_hw,
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 837a24d56675..c9a6d3e0cada 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -398,6 +398,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
struct drm_framebuffer *fb;
u16 pixel_alpha = state->pixel_blend_mode;
int i, ret;
+ unsigned int block_w, block_h;
if (!state->crtc || !state->fb)
return 0;
@@ -413,13 +414,26 @@ static int malidp_de_plane_check(struct drm_plane *plane,
ms->n_planes = fb->format->num_planes;
for (i = 0; i < ms->n_planes; i++) {
u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated);
- if (fb->pitches[i] & (alignment - 1)) {
+
+ if ((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
+ & (alignment - 1)) {
DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
fb->pitches[i], i);
return -EINVAL;
}
}
+ block_w = drm_format_info_block_width(fb->format, 0);
+ block_h = drm_format_info_block_height(fb->format, 0);
+ if (fb->width % block_w || fb->height % block_h) {
+ DRM_DEBUG_KMS("Buffer width/height needs to be a multiple of tile sizes");
+ return -EINVAL;
+ }
+ if ((state->src_x >> 16) % block_w || (state->src_y >> 16) % block_h) {
+ DRM_DEBUG_KMS("Plane src_x/src_y needs to be a multiple of tile sizes");
+ return -EINVAL;
+ }
+
if ((state->crtc_w > mp->hwdev->max_line_size) ||
(state->crtc_h > mp->hwdev->max_line_size) ||
(state->crtc_w < mp->hwdev->min_line_size) ||
@@ -492,10 +506,18 @@ static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
num_strides = (mp->hwdev->hw->features &
MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
- for (i = 0; i < num_strides; ++i)
- malidp_hw_write(mp->hwdev, pitches[i],
+ /*
+ * The drm convention for pitch is that it needs to cover width * cpp,
+ * but our hardware wants the pitch/stride to cover all rows included
+ * in a tile.
+ */
+ for (i = 0; i < num_strides; ++i) {
+ unsigned int block_h = drm_format_info_block_height(mp->base.state->fb->format, i);
+
+ malidp_hw_write(mp->hwdev, pitches[i] * block_h,
mp->layer->base +
mp->layer->stride_offset + i * 4);
+ }
}
static const s16
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 69dab82a3771..bf589c53b908 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
+static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary);
+ kfree(ap);
+}
+
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ ast_kick_out_firmware_fb(pdev);
+
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index e6c4cd3dc50e..bfc65040dfcb 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -104,8 +104,6 @@ struct ast_private {
int fb_mtrr;
struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 5e77d456d9bb..7c6ac3cadb6b 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -568,6 +568,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
}
ast_bo_unreserve(bo);
+ ast_set_offset_reg(crtc);
ast_set_start_address_crt1(crtc, (u32)gpu_addr);
return 0;
@@ -1254,7 +1255,7 @@ static int ast_cursor_move(struct drm_crtc *crtc,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
/* dummy write to fire HWC */
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
+ ast_show_cursor(crtc);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index fe354ebf374d..c168d62fe8f9 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -36,63 +36,6 @@ ast_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct ast_private, ttm.bdev);
}
-static int
-ast_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void
-ast_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int ast_ttm_global_init(struct ast_private *ast)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- global_ref = &ast->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &ast_ttm_mem_global_init;
- global_ref->release = &ast_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- ast->ttm.bo_global_ref.mem_glob =
- ast->ttm.mem_global_ref.object;
- global_ref = &ast->ttm.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&ast->ttm.mem_global_ref);
- return r;
- }
- return 0;
-}
-
-static void
-ast_ttm_global_release(struct ast_private *ast)
-{
- if (ast->ttm.mem_global_ref.release == NULL)
- return;
-
- drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
- drm_global_item_unref(&ast->ttm.mem_global_ref);
- ast->ttm.mem_global_ref.release = NULL;
-}
-
-
static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct ast_bo *bo;
@@ -232,12 +175,7 @@ int ast_mm_init(struct ast_private *ast)
struct drm_device *dev = ast->dev;
struct ttm_bo_device *bdev = &ast->ttm.bdev;
- ret = ast_ttm_global_init(ast);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&ast->ttm.bdev,
- ast->ttm.bo_global_ref.ref.object,
&ast_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -268,8 +206,6 @@ void ast_mm_fini(struct ast_private *ast)
ttm_bo_device_release(&ast->ttm.bdev);
- ast_ttm_global_release(ast);
-
arch_phys_wc_del(ast->fb_mtrr);
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 9e34bce089d0..96f4082671fe 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -364,9 +364,7 @@ static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
.mode_valid = atmel_hlcdc_crtc_mode_valid,
- .mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
- .mode_set_base = drm_helper_crtc_mode_set_base,
.atomic_check = atmel_hlcdc_crtc_atomic_check,
.atomic_begin = atmel_hlcdc_crtc_atomic_begin,
.atomic_flush = atmel_hlcdc_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 843cac222e60..034a91112098 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -556,7 +556,6 @@ error:
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = atmel_hlcdc_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = atmel_hlcdc_dc_atomic_commit,
};
@@ -658,8 +657,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
platform_set_drvdata(pdev, dev);
- drm_fb_cma_fbdev_init(dev, 24, 0);
-
drm_kms_helper_poll_init(dev);
return 0;
@@ -678,7 +675,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
- drm_fb_cma_fbdev_fini(dev);
flush_workqueue(dc->wq);
drm_kms_helper_poll_fini(dev);
drm_atomic_helper_shutdown(dev);
@@ -727,7 +723,6 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = drm_fb_helper_lastclose,
.irq_handler = atmel_hlcdc_dc_irq_handler,
.irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
.irq_postinstall = atmel_hlcdc_dc_irq_postinstall,
@@ -763,19 +758,21 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
ret = atmel_hlcdc_dc_load(ddev);
if (ret)
- goto err_unref;
+ goto err_put;
ret = drm_dev_register(ddev, 0);
if (ret)
goto err_unload;
+ drm_fbdev_generic_setup(ddev, 24);
+
return 0;
err_unload:
atmel_hlcdc_dc_unload(ddev);
-err_unref:
- drm_dev_unref(ddev);
+err_put:
+ drm_dev_put(ddev);
return ret;
}
@@ -786,7 +783,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
atmel_hlcdc_dc_unload(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return 0;
}
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index e7a69077e45a..fb38c8b857b5 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -66,6 +66,7 @@ struct bochs_device {
u16 yres_virtual;
u32 stride;
u32 bpp;
+ struct edid *edid;
/* drm */
struct drm_device *dev;
@@ -76,8 +77,6 @@ struct bochs_device {
/* ttm */
struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
bool initialized;
} ttm;
@@ -126,6 +125,7 @@ void bochs_hw_setmode(struct bochs_device *bochs,
const struct drm_format_info *format);
void bochs_hw_setbase(struct bochs_device *bochs,
int x, int y, u64 addr);
+int bochs_hw_load_edid(struct bochs_device *bochs);
/* bochs_mm.c */
int bochs_mm_init(struct bochs_device *bochs);
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index cacff73a64ab..c90a0d492fd5 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -69,6 +69,35 @@ static void bochs_hw_set_little_endian(struct bochs_device *bochs)
#define bochs_hw_set_native_endian(_b) bochs_hw_set_little_endian(_b)
#endif
+static int bochs_get_edid_block(void *data, u8 *buf,
+ unsigned int block, size_t len)
+{
+ struct bochs_device *bochs = data;
+ size_t i, start = block * EDID_LENGTH;
+
+ if (start + len > 0x400 /* vga register offset */)
+ return -1;
+
+ for (i = 0; i < len; i++) {
+ buf[i] = readb(bochs->mmio + start + i);
+ }
+ return 0;
+}
+
+int bochs_hw_load_edid(struct bochs_device *bochs)
+{
+ if (!bochs->mmio)
+ return -1;
+
+ kfree(bochs->edid);
+ bochs->edid = drm_do_get_edid(&bochs->connector,
+ bochs_get_edid_block, bochs);
+ if (bochs->edid == NULL)
+ return -1;
+
+ return 0;
+}
+
int bochs_hw_init(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
@@ -164,6 +193,7 @@ void bochs_hw_fini(struct drm_device *dev)
if (bochs->fb_map)
iounmap(bochs->fb_map);
pci_release_regions(dev->pdev);
+ kfree(bochs->edid);
}
void bochs_hw_setmode(struct bochs_device *bochs,
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 9bc5b438aefd..f87c284dd93d 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -213,10 +213,17 @@ static void bochs_encoder_init(struct drm_device *dev)
static int bochs_connector_get_modes(struct drm_connector *connector)
{
- int count;
+ struct bochs_device *bochs =
+ container_of(connector, struct bochs_device, connector);
+ int count = 0;
+
+ if (bochs->edid)
+ count = drm_add_edid_modes(connector, bochs->edid);
- count = drm_add_modes_noedid(connector, 8192, 8192);
- drm_set_preferred_mode(connector, defx, defy);
+ if (!count) {
+ count = drm_add_modes_noedid(connector, 8192, 8192);
+ drm_set_preferred_mode(connector, defx, defy);
+ }
return count;
}
@@ -271,6 +278,13 @@ static void bochs_connector_init(struct drm_device *dev)
drm_connector_helper_add(connector,
&bochs_connector_connector_helper_funcs);
drm_connector_register(connector);
+
+ bochs_hw_load_edid(bochs);
+ if (bochs->edid) {
+ DRM_INFO("Found EDID data blob.\n");
+ drm_connector_attach_edid_property(connector);
+ drm_connector_update_edid_property(connector, bochs->edid);
+ }
}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index a61c1ecb2bdc..0980411e41bf 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -16,61 +16,6 @@ static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct bochs_device, ttm.bdev);
}
-static int bochs_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void bochs_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int bochs_ttm_global_init(struct bochs_device *bochs)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- global_ref = &bochs->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &bochs_ttm_mem_global_init;
- global_ref->release = &bochs_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- bochs->ttm.bo_global_ref.mem_glob =
- bochs->ttm.mem_global_ref.object;
- global_ref = &bochs->ttm.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&bochs->ttm.mem_global_ref);
- return r;
- }
-
- return 0;
-}
-
-static void bochs_ttm_global_release(struct bochs_device *bochs)
-{
- if (bochs->ttm.mem_global_ref.release == NULL)
- return;
-
- drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
- drm_global_item_unref(&bochs->ttm.mem_global_ref);
- bochs->ttm.mem_global_ref.release = NULL;
-}
-
-
static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct bochs_bo *bo;
@@ -208,12 +153,7 @@ int bochs_mm_init(struct bochs_device *bochs)
struct ttm_bo_device *bdev = &bochs->ttm.bdev;
int ret;
- ret = bochs_ttm_global_init(bochs);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&bochs->ttm.bdev,
- bochs->ttm.bo_global_ref.ref.object,
&bochs_bo_driver,
bochs->dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -240,7 +180,6 @@ void bochs_mm_fini(struct bochs_device *bochs)
return;
ttm_bo_device_release(&bochs->ttm.bdev);
- bochs_ttm_global_release(bochs);
bochs->ttm.initialized = false;
}
@@ -414,7 +353,7 @@ int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
@@ -454,6 +393,6 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
bo = gem_to_bochs_bo(obj);
*offset = bochs_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 9eeb8ef0b174..2fee47b0d50b 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -95,6 +95,7 @@ config DRM_SII902X
depends on OF
select DRM_KMS_HELPER
select REGMAP_I2C
+ select I2C_MUX
---help---
Silicon Image sii902x bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 2f21d3b6850b..753e96129ab7 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1219,12 +1219,12 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
* plat_data->attch return, that's why we record the connector
* point after plat attached.
*/
- if (dp->plat_data->attach) {
- ret = dp->plat_data->attach(dp->plat_data, bridge, connector);
- if (ret) {
- DRM_ERROR("Failed at platform attch func\n");
- return ret;
- }
+ if (dp->plat_data->attach) {
+ ret = dp->plat_data->attach(dp->plat_data, bridge, connector);
+ if (ret) {
+ DRM_ERROR("Failed at platform attach func\n");
+ return ret;
+ }
}
if (dp->plat_data->panel) {
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index e59a13542333..bfa902013aa4 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1,4 +1,6 @@
/*
+ * Copyright (C) 2018 Renesas Electronics
+ *
* Copyright (C) 2016 Atmel
* Bo Shen <voice.shen@atmel.com>
*
@@ -21,6 +23,7 @@
*/
#include <linux/gpio/consumer.h>
+#include <linux/i2c-mux.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -86,8 +89,49 @@ struct sii902x {
struct drm_bridge bridge;
struct drm_connector connector;
struct gpio_desc *reset_gpio;
+ struct i2c_mux_core *i2cmux;
};
+static int sii902x_read_unlocked(struct i2c_client *i2c, u8 reg, u8 *val)
+{
+ union i2c_smbus_data data;
+ int ret;
+
+ ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_READ, reg, I2C_SMBUS_BYTE_DATA, &data);
+
+ if (ret < 0)
+ return ret;
+
+ *val = data.byte;
+ return 0;
+}
+
+static int sii902x_write_unlocked(struct i2c_client *i2c, u8 reg, u8 val)
+{
+ union i2c_smbus_data data;
+
+ data.byte = val;
+
+ return __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA,
+ &data);
+}
+
+static int sii902x_update_bits_unlocked(struct i2c_client *i2c, u8 reg, u8 mask,
+ u8 val)
+{
+ int ret;
+ u8 status;
+
+ ret = sii902x_read_unlocked(i2c, reg, &status);
+ if (ret)
+ return ret;
+ status &= ~mask;
+ status |= val & mask;
+ return sii902x_write_unlocked(i2c, reg, status);
+}
+
static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge)
{
return container_of(bridge, struct sii902x, bridge);
@@ -135,41 +179,11 @@ static const struct drm_connector_funcs sii902x_connector_funcs = {
static int sii902x_get_modes(struct drm_connector *connector)
{
struct sii902x *sii902x = connector_to_sii902x(connector);
- struct regmap *regmap = sii902x->regmap;
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- struct device *dev = &sii902x->i2c->dev;
- unsigned long timeout;
- unsigned int retries;
- unsigned int status;
struct edid *edid;
- int num = 0;
- int ret;
-
- ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
- SII902X_SYS_CTRL_DDC_BUS_REQ,
- SII902X_SYS_CTRL_DDC_BUS_REQ);
- if (ret)
- return ret;
-
- timeout = jiffies +
- msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
- do {
- ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
- if (ret)
- return ret;
- } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
- time_before(jiffies, timeout));
+ int num = 0, ret;
- if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
- dev_err(dev, "failed to acquire the i2c bus\n");
- return -ETIMEDOUT;
- }
-
- ret = regmap_write(regmap, SII902X_SYS_CTRL_DATA, status);
- if (ret)
- return ret;
-
- edid = drm_get_edid(connector, sii902x->i2c->adapter);
+ edid = drm_get_edid(connector, sii902x->i2cmux->adapter[0]);
drm_connector_update_edid_property(connector, edid);
if (edid) {
num = drm_add_edid_modes(connector, edid);
@@ -181,42 +195,6 @@ static int sii902x_get_modes(struct drm_connector *connector)
if (ret)
return ret;
- /*
- * Sometimes the I2C bus can stall after failure to use the
- * EDID channel. Retry a few times to see if things clear
- * up, else continue anyway.
- */
- retries = 5;
- do {
- ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA,
- &status);
- retries--;
- } while (ret && retries);
- if (ret)
- dev_err(dev, "failed to read status (%d)\n", ret);
-
- ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
- SII902X_SYS_CTRL_DDC_BUS_REQ |
- SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
- if (ret)
- return ret;
-
- timeout = jiffies +
- msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
- do {
- ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
- if (ret)
- return ret;
- } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
- SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
- time_before(jiffies, timeout));
-
- if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
- SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
- dev_err(dev, "failed to release the i2c bus\n");
- return -ETIMEDOUT;
- }
-
return num;
}
@@ -366,6 +344,121 @@ static irqreturn_t sii902x_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
+/*
+ * The purpose of sii902x_i2c_bypass_select is to enable the pass through
+ * mode of the HDMI transmitter. Do not use regmap from within this function,
+ * only use sii902x_*_unlocked functions to read/modify/write registers.
+ * We are holding the parent adapter lock here, keep this in mind before
+ * adding more i2c transactions.
+ *
+ * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere
+ * in this driver, we need to make sure that we only touch 0x1A[2:1] from
+ * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that
+ * we leave the remaining bits as we have found them.
+ */
+static int sii902x_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id)
+{
+ struct sii902x *sii902x = i2c_mux_priv(mux);
+ struct device *dev = &sii902x->i2c->dev;
+ unsigned long timeout;
+ u8 status;
+ int ret;
+
+ ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_DDC_BUS_REQ,
+ SII902X_SYS_CTRL_DDC_BUS_REQ);
+ if (ret)
+ return ret;
+
+ timeout = jiffies +
+ msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+ do {
+ ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ &status);
+ if (ret)
+ return ret;
+ } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+ time_before(jiffies, timeout));
+
+ if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+ dev_err(dev, "Failed to acquire the i2c bus\n");
+ return -ETIMEDOUT;
+ }
+
+ return sii902x_write_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ status);
+}
+
+/*
+ * The purpose of sii902x_i2c_bypass_deselect is to disable the pass through
+ * mode of the HDMI transmitter. Do not use regmap from within this function,
+ * only use sii902x_*_unlocked functions to read/modify/write registers.
+ * We are holding the parent adapter lock here, keep this in mind before
+ * adding more i2c transactions.
+ *
+ * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere
+ * in this driver, we need to make sure that we only touch 0x1A[2:1] from
+ * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that
+ * we leave the remaining bits as we have found them.
+ */
+static int sii902x_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
+{
+ struct sii902x *sii902x = i2c_mux_priv(mux);
+ struct device *dev = &sii902x->i2c->dev;
+ unsigned long timeout;
+ unsigned int retries;
+ u8 status;
+ int ret;
+
+ /*
+ * When the HDMI transmitter is in pass through mode, we need an
+ * (undocumented) additional delay between STOP and START conditions
+ * to guarantee the bus won't get stuck.
+ */
+ udelay(30);
+
+ /*
+ * Sometimes the I2C bus can stall after failure to use the
+ * EDID channel. Retry a few times to see if things clear
+ * up, else continue anyway.
+ */
+ retries = 5;
+ do {
+ ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ &status);
+ retries--;
+ } while (ret && retries);
+ if (ret) {
+ dev_err(dev, "failed to read status (%d)\n", ret);
+ return ret;
+ }
+
+ ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
+ if (ret)
+ return ret;
+
+ timeout = jiffies +
+ msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+ do {
+ ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ &status);
+ if (ret)
+ return ret;
+ } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+ time_before(jiffies, timeout));
+
+ if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+ dev_err(dev, "failed to release the i2c bus\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int sii902x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -375,6 +468,13 @@ static int sii902x_probe(struct i2c_client *client,
u8 chipid[4];
int ret;
+ ret = i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA);
+ if (!ret) {
+ dev_err(dev, "I2C adapter not suitable\n");
+ return -EIO;
+ }
+
sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
if (!sii902x)
return -ENOMEM;
@@ -433,7 +533,15 @@ static int sii902x_probe(struct i2c_client *client,
i2c_set_clientdata(client, sii902x);
- return 0;
+ sii902x->i2cmux = i2c_mux_alloc(client->adapter, dev,
+ 1, 0, I2C_MUX_GATE,
+ sii902x_i2c_bypass_select,
+ sii902x_i2c_bypass_deselect);
+ if (!sii902x->i2cmux)
+ return -ENOMEM;
+
+ sii902x->i2cmux->priv = sii902x;
+ return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
}
static int sii902x_remove(struct i2c_client *client)
@@ -441,6 +549,7 @@ static int sii902x_remove(struct i2c_client *client)
{
struct sii902x *sii902x = i2c_get_clientdata(client);
+ i2c_mux_del_adapters(sii902x->i2cmux);
drm_bridge_remove(&sii902x->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 5971976284bf..64c3cf027518 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1664,6 +1664,7 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
case 0x131a:
case 0x132a:
case 0x201a:
+ case 0x212a:
count = 1;
break;
default:
@@ -1957,7 +1958,6 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
.get_modes = dw_hdmi_connector_get_modes,
- .best_encoder = drm_atomic_helper_best_encoder,
};
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
@@ -2205,7 +2205,9 @@ static int dw_hdmi_detect_phy(struct dw_hdmi *hdmi)
unsigned int i;
u8 phy_type;
- phy_type = hdmi_readb(hdmi, HDMI_CONFIG2_ID);
+ phy_type = hdmi->plat_data->phy_force_vendor ?
+ DW_HDMI_PHY_VENDOR_PHY :
+ hdmi_readb(hdmi, HDMI_CONFIG2_ID);
if (phy_type == DW_HDMI_PHY_VENDOR_PHY) {
/* Vendor PHYs require support from the glue layer. */
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index fd7999642cf8..2f4b145b73af 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -230,10 +230,21 @@ struct dw_mipi_dsi {
u32 format;
unsigned long mode_flags;
+ struct dw_mipi_dsi *master; /* dual-dsi master ptr */
+ struct dw_mipi_dsi *slave; /* dual-dsi slave ptr */
+
const struct dw_mipi_dsi_plat_data *plat_data;
};
/*
+ * Check if either a link to a master or slave is present
+ */
+static inline bool dw_mipi_is_dual_mode(struct dw_mipi_dsi *dsi)
+{
+ return dsi->slave || dsi->master;
+}
+
+/*
* The controller should generate 2 frames before
* preparing the peripheral.
*/
@@ -270,6 +281,7 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct dw_mipi_dsi *dsi = host_to_dsi(host);
+ const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data;
struct drm_bridge *bridge;
struct drm_panel *panel;
int ret;
@@ -300,6 +312,12 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
drm_bridge_add(&dsi->bridge);
+ if (pdata->host_ops && pdata->host_ops->attach) {
+ ret = pdata->host_ops->attach(pdata->priv_data, device);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -307,6 +325,14 @@ static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct dw_mipi_dsi *dsi = host_to_dsi(host);
+ const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data;
+ int ret;
+
+ if (pdata->host_ops && pdata->host_ops->detach) {
+ ret = pdata->host_ops->detach(pdata->priv_data, device);
+ if (ret < 0)
+ return ret;
+ }
drm_of_panel_bridge_remove(host->dev->of_node, 1, 0);
@@ -441,10 +467,17 @@ static ssize_t dw_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
}
dw_mipi_message_config(dsi, msg);
+ if (dsi->slave)
+ dw_mipi_message_config(dsi->slave, msg);
ret = dw_mipi_dsi_write(dsi, &packet);
if (ret)
return ret;
+ if (dsi->slave) {
+ ret = dw_mipi_dsi_write(dsi->slave, &packet);
+ if (ret)
+ return ret;
+ }
if (msg->rx_buf && msg->rx_len) {
ret = dw_mipi_dsi_read(dsi, msg);
@@ -583,7 +616,11 @@ static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
* DSI_VNPCR.NPSIZE... especially because this driver supports
* non-burst video modes, see dw_mipi_dsi_video_mode_config()...
*/
- dsi_write(dsi, DSI_VID_PKT_SIZE, VID_PKT_SIZE(mode->hdisplay));
+
+ dsi_write(dsi, DSI_VID_PKT_SIZE,
+ dw_mipi_is_dual_mode(dsi) ?
+ VID_PKT_SIZE(mode->hdisplay / 2) :
+ VID_PKT_SIZE(mode->hdisplay));
}
static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
@@ -755,24 +792,43 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
*/
dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
+ if (dsi->slave) {
+ dw_mipi_dsi_disable(dsi->slave);
+ clk_disable_unprepare(dsi->slave->pclk);
+ pm_runtime_put(dsi->slave->dev);
+ }
dw_mipi_dsi_disable(dsi);
+
clk_disable_unprepare(dsi->pclk);
pm_runtime_put(dsi->dev);
}
-static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static unsigned int dw_mipi_dsi_get_lanes(struct dw_mipi_dsi *dsi)
+{
+ /* this instance is the slave, so add the master's lanes */
+ if (dsi->master)
+ return dsi->master->lanes + dsi->lanes;
+
+ /* this instance is the master, so add the slave's lanes */
+ if (dsi->slave)
+ return dsi->lanes + dsi->slave->lanes;
+
+ /* single-dsi, so no other instance to consider */
+ return dsi->lanes;
+}
+
+static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
+ struct drm_display_mode *adjusted_mode)
{
- struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
void *priv_data = dsi->plat_data->priv_data;
int ret;
+ u32 lanes = dw_mipi_dsi_get_lanes(dsi);
clk_prepare_enable(dsi->pclk);
ret = phy_ops->get_lane_mbps(priv_data, adjusted_mode, dsi->mode_flags,
- dsi->lanes, dsi->format, &dsi->lane_mbps);
+ lanes, dsi->format, &dsi->lane_mbps);
if (ret)
DRM_DEBUG_DRIVER("Phy get_lane_mbps() failed\n");
@@ -804,12 +860,25 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
dw_mipi_dsi_set_mode(dsi, 0);
}
+static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+
+ dw_mipi_dsi_mode_set(dsi, adjusted_mode);
+ if (dsi->slave)
+ dw_mipi_dsi_mode_set(dsi->slave, adjusted_mode);
+}
+
static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
/* Switch to video mode for panel-bridge enable & panel enable */
dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO);
+ if (dsi->slave)
+ dw_mipi_dsi_set_mode(dsi->slave, MIPI_DSI_MODE_VIDEO);
}
static enum drm_mode_status
@@ -941,9 +1010,25 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
static void __dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi)
{
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+
pm_runtime_disable(dsi->dev);
}
+void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave)
+{
+ /* introduce controllers to each other */
+ dsi->slave = slave;
+ dsi->slave->master = dsi;
+
+ /* migrate settings for already attached displays */
+ dsi->slave->lanes = dsi->lanes;
+ dsi->slave->channel = dsi->channel;
+ dsi->slave->format = dsi->format;
+ dsi->slave->mode_flags = dsi->mode_flags;
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi_set_slave);
+
/*
* Probe/remove API, used from platforms based on the DRM bridge API.
*/
@@ -957,8 +1042,6 @@ EXPORT_SYMBOL_GPL(dw_mipi_dsi_probe);
void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi)
{
- mipi_dsi_host_unregister(&dsi->dsi_host);
-
__dw_mipi_dsi_remove(dsi);
}
EXPORT_SYMBOL_GPL(dw_mipi_dsi_remove);
@@ -966,31 +1049,22 @@ EXPORT_SYMBOL_GPL(dw_mipi_dsi_remove);
/*
* Bind/unbind API, used from platforms based on the component framework.
*/
-struct dw_mipi_dsi *
-dw_mipi_dsi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
- const struct dw_mipi_dsi_plat_data *plat_data)
+int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder)
{
- struct dw_mipi_dsi *dsi;
int ret;
- dsi = __dw_mipi_dsi_probe(pdev, plat_data);
- if (IS_ERR(dsi))
- return dsi;
-
ret = drm_bridge_attach(encoder, &dsi->bridge, NULL);
if (ret) {
- dw_mipi_dsi_remove(dsi);
DRM_ERROR("Failed to initialize bridge with drm\n");
- return ERR_PTR(ret);
+ return ret;
}
- return dsi;
+ return ret;
}
EXPORT_SYMBOL_GPL(dw_mipi_dsi_bind);
void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi)
{
- __dw_mipi_dsi_remove(dsi);
}
EXPORT_SYMBOL_GPL(dw_mipi_dsi_unbind);
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index ee6b98efa9c2..afd491018bfc 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -379,7 +379,7 @@ static void tc358764_detach(struct drm_bridge *bridge)
drm_fb_helper_remove_one_connector(drm->fb_helper, &ctx->connector);
drm_panel_detach(ctx->panel);
ctx->panel = NULL;
- drm_connector_unreference(&ctx->connector);
+ drm_connector_put(&ctx->connector);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index a29f87e98d9d..f2b2e0d169fa 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -136,8 +136,6 @@ struct cirrus_device {
int fb_mtrr;
struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
bool mm_inited;
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 68ab1821e15b..4dd499c7d1ba 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -169,7 +169,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
struct drm_mode_fb_cmd2 mode_cmd;
void *sysram;
struct drm_gem_object *gobj = NULL;
- struct cirrus_bo *bo = NULL;
int size, ret;
mode_cmd.width = sizes->surface_width;
@@ -185,8 +184,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
return ret;
}
- bo = gem_to_cirrus_bo(gobj);
-
sysram = vmalloc(size);
if (!sysram)
return -ENOMEM;
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index f21953243790..e075810b4bd4 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -36,63 +36,6 @@ cirrus_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct cirrus_device, ttm.bdev);
}
-static int
-cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void
-cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- global_ref = &cirrus->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &cirrus_ttm_mem_global_init;
- global_ref->release = &cirrus_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- cirrus->ttm.bo_global_ref.mem_glob =
- cirrus->ttm.mem_global_ref.object;
- global_ref = &cirrus->ttm.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&cirrus->ttm.mem_global_ref);
- return r;
- }
- return 0;
-}
-
-static void
-cirrus_ttm_global_release(struct cirrus_device *cirrus)
-{
- if (cirrus->ttm.mem_global_ref.release == NULL)
- return;
-
- drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
- drm_global_item_unref(&cirrus->ttm.mem_global_ref);
- cirrus->ttm.mem_global_ref.release = NULL;
-}
-
-
static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct cirrus_bo *bo;
@@ -232,12 +175,7 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
struct drm_device *dev = cirrus->dev;
struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
- ret = cirrus_ttm_global_init(cirrus);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&cirrus->ttm.bdev,
- cirrus->ttm.bo_global_ref.ref.object,
&cirrus_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -273,8 +211,6 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
ttm_bo_device_release(&cirrus->ttm.bdev);
- cirrus_ttm_global_release(cirrus);
-
arch_phys_wc_del(cirrus->fb_mtrr);
cirrus->fb_mtrr = 0;
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3dbfbddae7e6..48ec378fb27e 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -315,9 +315,11 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
-static int drm_atomic_crtc_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
+ const struct drm_crtc_state *new_crtc_state)
{
+ struct drm_crtc *crtc = new_crtc_state->crtc;
+
/* NOTE: we explicitly don't enforce constraints such as primary
* layer covering entire screen, since that is something we want
* to allow (on hw that supports it). For hw that does not, it
@@ -326,7 +328,7 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
* TODO: Add generic modeset state checks once we support those.
*/
- if (state->active && !state->enable) {
+ if (new_crtc_state->active && !new_crtc_state->enable) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
crtc->base.id, crtc->name);
return -EINVAL;
@@ -336,14 +338,14 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
* as this is a kernel-internal detail that userspace should never
* be able to trigger. */
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
- WARN_ON(state->enable && !state->mode_blob)) {
+ WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
- WARN_ON(!state->enable && state->mode_blob)) {
+ WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
crtc->base.id, crtc->name);
return -EINVAL;
@@ -359,7 +361,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
* and legacy page_flip IOCTL which also reject service on a disabled
* pipe.
*/
- if (state->event && !state->active && !crtc->state->active) {
+ if (new_crtc_state->event &&
+ !new_crtc_state->active && !old_crtc_state->active) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
crtc->base.id, crtc->name);
return -EINVAL;
@@ -395,6 +398,11 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
{
struct drm_crtc_state *crtc_state;
struct drm_writeback_job *writeback_job = state->writeback_job;
+ const struct drm_display_info *info = &connector->display_info;
+
+ state->max_bpc = info->bpc ? info->bpc : 8;
+ if (connector->max_bpc_property)
+ state->max_bpc = min(state->max_bpc, state->max_requested_bpc);
if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
return 0;
@@ -489,14 +497,13 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_plane_state);
static bool
-plane_switching_crtc(struct drm_atomic_state *state,
- struct drm_plane *plane,
- struct drm_plane_state *plane_state)
+plane_switching_crtc(const struct drm_plane_state *old_plane_state,
+ const struct drm_plane_state *new_plane_state)
{
- if (!plane->state->crtc || !plane_state->crtc)
+ if (!old_plane_state->crtc || !new_plane_state->crtc)
return false;
- if (plane->state->crtc == plane_state->crtc)
+ if (old_plane_state->crtc == new_plane_state->crtc)
return false;
/* This could be refined, but currently there's no helper or driver code
@@ -509,88 +516,117 @@ plane_switching_crtc(struct drm_atomic_state *state,
/**
* drm_atomic_plane_check - check plane state
- * @plane: plane to check
- * @state: plane state to check
+ * @old_plane_state: old plane state to check
+ * @new_plane_state: new plane state to check
*
* Provides core sanity checks for plane state.
*
* RETURNS:
* Zero on success, error code on failure
*/
-static int drm_atomic_plane_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
+ const struct drm_plane_state *new_plane_state)
{
+ struct drm_plane *plane = new_plane_state->plane;
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ const struct drm_framebuffer *fb = new_plane_state->fb;
unsigned int fb_width, fb_height;
+ struct drm_mode_rect *clips;
+ uint32_t num_clips;
int ret;
/* either *both* CRTC and FB must be set, or neither */
- if (state->crtc && !state->fb) {
+ if (crtc && !fb) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
plane->base.id, plane->name);
return -EINVAL;
- } else if (state->fb && !state->crtc) {
+ } else if (fb && !crtc) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
plane->base.id, plane->name);
return -EINVAL;
}
/* if disabled, we don't care about the rest of the state: */
- if (!state->crtc)
+ if (!crtc)
return 0;
/* Check whether this plane is usable on this CRTC */
- if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
+ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
- state->crtc->base.id, state->crtc->name,
+ crtc->base.id, crtc->name,
plane->base.id, plane->name);
return -EINVAL;
}
/* Check whether this plane supports the fb pixel format. */
- ret = drm_plane_check_pixel_format(plane, state->fb->format->format,
- state->fb->modifier);
+ ret = drm_plane_check_pixel_format(plane, fb->format->format,
+ fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
plane->base.id, plane->name,
- drm_get_format_name(state->fb->format->format,
+ drm_get_format_name(fb->format->format,
&format_name),
- state->fb->modifier);
+ fb->modifier);
return ret;
}
/* Give drivers some help against integer overflows */
- if (state->crtc_w > INT_MAX ||
- state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
- state->crtc_h > INT_MAX ||
- state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
+ if (new_plane_state->crtc_w > INT_MAX ||
+ new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w ||
+ new_plane_state->crtc_h > INT_MAX ||
+ new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
plane->base.id, plane->name,
- state->crtc_w, state->crtc_h,
- state->crtc_x, state->crtc_y);
+ new_plane_state->crtc_w, new_plane_state->crtc_h,
+ new_plane_state->crtc_x, new_plane_state->crtc_y);
return -ERANGE;
}
- fb_width = state->fb->width << 16;
- fb_height = state->fb->height << 16;
+ fb_width = fb->width << 16;
+ fb_height = fb->height << 16;
/* Make sure source coordinates are inside the fb. */
- if (state->src_w > fb_width ||
- state->src_x > fb_width - state->src_w ||
- state->src_h > fb_height ||
- state->src_y > fb_height - state->src_h) {
+ if (new_plane_state->src_w > fb_width ||
+ new_plane_state->src_x > fb_width - new_plane_state->src_w ||
+ new_plane_state->src_h > fb_height ||
+ new_plane_state->src_y > fb_height - new_plane_state->src_h) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
plane->base.id, plane->name,
- state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
- state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
- state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
- state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10,
- state->fb->width, state->fb->height);
+ new_plane_state->src_w >> 16,
+ ((new_plane_state->src_w & 0xffff) * 15625) >> 10,
+ new_plane_state->src_h >> 16,
+ ((new_plane_state->src_h & 0xffff) * 15625) >> 10,
+ new_plane_state->src_x >> 16,
+ ((new_plane_state->src_x & 0xffff) * 15625) >> 10,
+ new_plane_state->src_y >> 16,
+ ((new_plane_state->src_y & 0xffff) * 15625) >> 10,
+ fb->width, fb->height);
return -ENOSPC;
}
- if (plane_switching_crtc(state->state, plane, state)) {
+ clips = drm_plane_get_damage_clips(new_plane_state);
+ num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+
+ /* Make sure damage clips are valid and inside the fb. */
+ while (num_clips > 0) {
+ if (clips->x1 >= clips->x2 ||
+ clips->y1 >= clips->y2 ||
+ clips->x1 < 0 ||
+ clips->y1 < 0 ||
+ clips->x2 > fb_width ||
+ clips->y2 > fb_height) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n",
+ plane->base.id, plane->name, clips->x1,
+ clips->y1, clips->x2, clips->y2);
+ return -EINVAL;
+ }
+ clips++;
+ num_clips--;
+ }
+
+ if (plane_switching_crtc(old_plane_state, new_plane_state)) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
plane->base.id, plane->name);
return -EINVAL;
@@ -927,6 +963,8 @@ int
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
+ const struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, crtc);
struct drm_plane *plane;
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
@@ -934,7 +972,7 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
crtc->base.id, crtc->name, state);
- drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+ drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
@@ -961,17 +999,19 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
struct drm_plane *plane;
- struct drm_plane_state *plane_state;
+ struct drm_plane_state *old_plane_state;
+ struct drm_plane_state *new_plane_state;
struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
struct drm_connector *conn;
struct drm_connector_state *conn_state;
int i, ret = 0;
DRM_DEBUG_ATOMIC("checking %p\n", state);
- for_each_new_plane_in_state(state, plane, plane_state, i) {
- ret = drm_atomic_plane_check(plane, plane_state);
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
if (ret) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
plane->base.id, plane->name);
@@ -979,8 +1019,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- ret = drm_atomic_crtc_check(crtc, crtc_state);
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state);
if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
crtc->base.id, crtc->name);
@@ -1008,8 +1048,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
if (!state->allow_modeset) {
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
crtc->base.id, crtc->name);
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index d8b526b7932c..54e2ae614dcc 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -32,6 +32,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_writeback.h>
+#include <drm/drm_damage_helper.h>
#include <linux/dma-fence.h>
#include "drm_crtc_helper_internal.h"
@@ -92,6 +93,17 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
}
}
+/*
+ * For connectors that support multiple encoders, either the
+ * .atomic_best_encoder() or .best_encoder() operation must be implemented.
+ */
+static struct drm_encoder *
+pick_single_encoder_for_connector(struct drm_connector *connector)
+{
+ WARN_ON(connector->encoder_ids[1]);
+ return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+}
+
static int handle_conflicting_encoders(struct drm_atomic_state *state,
bool disable_conflicting_encoders)
{
@@ -119,7 +131,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
- new_encoder = drm_atomic_helper_best_encoder(connector);
+ new_encoder = pick_single_encoder_for_connector(connector);
if (new_encoder) {
if (encoder_mask & drm_encoder_mask(new_encoder)) {
@@ -336,7 +348,7 @@ update_connector_routing(struct drm_atomic_state *state,
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
- new_encoder = drm_atomic_helper_best_encoder(connector);
+ new_encoder = pick_single_encoder_for_connector(connector);
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -658,6 +670,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (old_connector_state->link_status !=
new_connector_state->link_status)
new_crtc_state->connectors_changed = true;
+
+ if (old_connector_state->max_requested_bpc !=
+ new_connector_state->max_requested_bpc)
+ new_crtc_state->connectors_changed = true;
}
if (funcs->atomic_check)
@@ -847,6 +863,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
+ drm_atomic_helper_check_plane_damage(state, new_plane_state);
+
if (!funcs || !funcs->atomic_check)
continue;
@@ -1445,6 +1463,9 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
crtc->base.id, crtc->name);
}
+
+ if (old_state->fake_commit)
+ complete_all(&old_state->fake_commit->flip_done);
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
@@ -2202,8 +2223,10 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
spin_unlock(&crtc->commit_lock);
}
- if (old_state->fake_commit)
+ if (old_state->fake_commit) {
complete_all(&old_state->fake_commit->cleanup_done);
+ WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
+ }
}
EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
@@ -3108,27 +3131,104 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
struct drm_modeset_acquire_ctx ctx;
int ret;
- drm_modeset_acquire_init(&ctx, 0);
- while (1) {
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
- if (!ret)
- ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
-
- if (ret != -EDEADLK)
- break;
-
- drm_modeset_backoff(&ctx);
- }
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
+ ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
if (ret)
DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
}
EXPORT_SYMBOL(drm_atomic_helper_shutdown);
/**
+ * drm_atomic_helper_duplicate_state - duplicate an atomic state object
+ * @dev: DRM device
+ * @ctx: lock acquisition context
+ *
+ * Makes a copy of the current atomic state by looping over all objects and
+ * duplicating their respective states. This is used for example by suspend/
+ * resume support code to save the state prior to suspend such that it can
+ * be restored upon resume.
+ *
+ * Note that this treats atomic state as persistent between save and restore.
+ * Drivers must make sure that this is possible and won't result in confusion
+ * or erroneous behaviour.
+ *
+ * Note that if callers haven't already acquired all modeset locks this might
+ * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
+ *
+ * Returns:
+ * A pointer to the copy of the atomic state object on success or an
+ * ERR_PTR()-encoded error code on failure.
+ *
+ * See also:
+ * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
+ */
+struct drm_atomic_state *
+drm_atomic_helper_duplicate_state(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector *conn;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ int err = 0;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ state->acquire_ctx = ctx;
+
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ err = PTR_ERR(crtc_state);
+ goto free;
+ }
+ }
+
+ drm_for_each_plane(plane, dev) {
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ err = PTR_ERR(plane_state);
+ goto free;
+ }
+ }
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(conn, &conn_iter) {
+ struct drm_connector_state *conn_state;
+
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ if (IS_ERR(conn_state)) {
+ err = PTR_ERR(conn_state);
+ drm_connector_list_iter_end(&conn_iter);
+ goto free;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ /* clear the acquire context so that it isn't accidentally reused */
+ state->acquire_ctx = NULL;
+
+free:
+ if (err < 0) {
+ drm_atomic_state_put(state);
+ state = ERR_PTR(err);
+ }
+
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
+
+/**
* drm_atomic_helper_suspend - subsystem-level suspend helper
* @dev: DRM device
*
@@ -3159,14 +3259,10 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
struct drm_atomic_state *state;
int err;
- drm_modeset_acquire_init(&ctx, 0);
+ /* This can never be returned, but it makes the compiler happy */
+ state = ERR_PTR(-EINVAL);
-retry:
- err = drm_modeset_lock_all_ctx(dev, &ctx);
- if (err < 0) {
- state = ERR_PTR(err);
- goto unlock;
- }
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
state = drm_atomic_helper_duplicate_state(dev, &ctx);
if (IS_ERR(state))
@@ -3180,13 +3276,10 @@ retry:
}
unlock:
- if (PTR_ERR(state) == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
+ DRM_MODESET_LOCK_ALL_END(ctx, err);
+ if (err)
+ return ERR_PTR(err);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_suspend);
@@ -3209,7 +3302,7 @@ EXPORT_SYMBOL(drm_atomic_helper_suspend);
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- int i;
+ int i, ret;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct drm_connector *connector;
@@ -3228,7 +3321,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
for_each_new_connector_in_state(state, connector, new_conn_state, i)
state->connectors[i].old_state = connector->state;
- return drm_atomic_commit(state);
+ ret = drm_atomic_commit(state);
+
+ state->acquire_ctx = NULL;
+
+ return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
@@ -3256,23 +3353,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
drm_mode_config_reset(dev);
- drm_modeset_acquire_init(&ctx, 0);
- while (1) {
- err = drm_modeset_lock_all_ctx(dev, &ctx);
- if (err)
- goto out;
-
- err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
-out:
- if (err != -EDEADLK)
- break;
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
- drm_modeset_backoff(&ctx);
- }
+ err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
+ DRM_MODESET_LOCK_ALL_END(ctx, err);
drm_atomic_state_put(state);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
return err;
}
@@ -3413,504 +3499,6 @@ fail:
EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
/**
- * drm_atomic_helper_best_encoder - Helper for
- * &drm_connector_helper_funcs.best_encoder callback
- * @connector: Connector control structure
- *
- * This is a &drm_connector_helper_funcs.best_encoder callback helper for
- * connectors that support exactly 1 encoder, statically determined at driver
- * init time.
- */
-struct drm_encoder *
-drm_atomic_helper_best_encoder(struct drm_connector *connector)
-{
- WARN_ON(connector->encoder_ids[1]);
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
-}
-EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
-
-/**
- * DOC: atomic state reset and initialization
- *
- * Both the drm core and the atomic helpers assume that there is always the full
- * and correct atomic software state for all connectors, CRTCs and planes
- * available. Which is a bit a problem on driver load and also after system
- * suspend. One way to solve this is to have a hardware state read-out
- * infrastructure which reconstructs the full software state (e.g. the i915
- * driver).
- *
- * The simpler solution is to just reset the software state to everything off,
- * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
- * the atomic helpers provide default reset implementations for all hooks.
- *
- * On the upside the precise state tracking of atomic simplifies system suspend
- * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
- * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
- * For other drivers the building blocks are split out, see the documentation
- * for these functions.
- */
-
-/**
- * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs
- * @crtc: drm CRTC
- *
- * Resets the atomic state for @crtc by freeing the state pointer (which might
- * be NULL, e.g. at driver load time) and allocating a new empty state object.
- */
-void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
-{
- if (crtc->state)
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
-
- kfree(crtc->state);
- crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
-
- if (crtc->state)
- crtc->state->crtc = crtc;
-}
-EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
-
-/**
- * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
- * @crtc: CRTC object
- * @state: atomic CRTC state
- *
- * Copies atomic state from a CRTC's current state and resets inferred values.
- * This is useful for drivers that subclass the CRTC state.
- */
-void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- memcpy(state, crtc->state, sizeof(*state));
-
- if (state->mode_blob)
- drm_property_blob_get(state->mode_blob);
- if (state->degamma_lut)
- drm_property_blob_get(state->degamma_lut);
- if (state->ctm)
- drm_property_blob_get(state->ctm);
- if (state->gamma_lut)
- drm_property_blob_get(state->gamma_lut);
- state->mode_changed = false;
- state->active_changed = false;
- state->planes_changed = false;
- state->connectors_changed = false;
- state->color_mgmt_changed = false;
- state->zpos_changed = false;
- state->commit = NULL;
- state->event = NULL;
- state->pageflip_flags = 0;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
-
-/**
- * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
- * @crtc: drm CRTC
- *
- * Default CRTC state duplicate hook for drivers which don't have their own
- * subclassed CRTC state structure.
- */
-struct drm_crtc_state *
-drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
-{
- struct drm_crtc_state *state;
-
- if (WARN_ON(!crtc->state))
- return NULL;
-
- state = kmalloc(sizeof(*state), GFP_KERNEL);
- if (state)
- __drm_atomic_helper_crtc_duplicate_state(crtc, state);
-
- return state;
-}
-EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
-
-/**
- * __drm_atomic_helper_crtc_destroy_state - release CRTC state
- * @state: CRTC state object to release
- *
- * Releases all resources stored in the CRTC state without actually freeing
- * the memory of the CRTC state. This is useful for drivers that subclass the
- * CRTC state.
- */
-void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
-{
- if (state->commit) {
- /*
- * In the event that a non-blocking commit returns
- * -ERESTARTSYS before the commit_tail work is queued, we will
- * have an extra reference to the commit object. Release it, if
- * the event has not been consumed by the worker.
- *
- * state->event may be freed, so we can't directly look at
- * state->event->base.completion.
- */
- if (state->event && state->commit->abort_completion)
- drm_crtc_commit_put(state->commit);
-
- kfree(state->commit->event);
- state->commit->event = NULL;
-
- drm_crtc_commit_put(state->commit);
- }
-
- drm_property_blob_put(state->mode_blob);
- drm_property_blob_put(state->degamma_lut);
- drm_property_blob_put(state->ctm);
- drm_property_blob_put(state->gamma_lut);
-}
-EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
-
-/**
- * drm_atomic_helper_crtc_destroy_state - default state destroy hook
- * @crtc: drm CRTC
- * @state: CRTC state object to release
- *
- * Default CRTC state destroy hook for drivers which don't have their own
- * subclassed CRTC state structure.
- */
-void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- __drm_atomic_helper_crtc_destroy_state(state);
- kfree(state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
-
-/**
- * __drm_atomic_helper_plane_reset - resets planes state to default values
- * @plane: plane object, must not be NULL
- * @state: atomic plane state, must not be NULL
- *
- * Initializes plane state to default. This is useful for drivers that subclass
- * the plane state.
- */
-void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- state->plane = plane;
- state->rotation = DRM_MODE_ROTATE_0;
-
- state->alpha = DRM_BLEND_ALPHA_OPAQUE;
- state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
-
- plane->state = state;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
-
-/**
- * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
- * @plane: drm plane
- *
- * Resets the atomic state for @plane by freeing the state pointer (which might
- * be NULL, e.g. at driver load time) and allocating a new empty state object.
- */
-void drm_atomic_helper_plane_reset(struct drm_plane *plane)
-{
- if (plane->state)
- __drm_atomic_helper_plane_destroy_state(plane->state);
-
- kfree(plane->state);
- plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
- if (plane->state)
- __drm_atomic_helper_plane_reset(plane, plane->state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
-
-/**
- * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
- * @plane: plane object
- * @state: atomic plane state
- *
- * Copies atomic state from a plane's current state. This is useful for
- * drivers that subclass the plane state.
- */
-void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- memcpy(state, plane->state, sizeof(*state));
-
- if (state->fb)
- drm_framebuffer_get(state->fb);
-
- state->fence = NULL;
- state->commit = NULL;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
-
-/**
- * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
- * @plane: drm plane
- *
- * Default plane state duplicate hook for drivers which don't have their own
- * subclassed plane state structure.
- */
-struct drm_plane_state *
-drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
-{
- struct drm_plane_state *state;
-
- if (WARN_ON(!plane->state))
- return NULL;
-
- state = kmalloc(sizeof(*state), GFP_KERNEL);
- if (state)
- __drm_atomic_helper_plane_duplicate_state(plane, state);
-
- return state;
-}
-EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
-
-/**
- * __drm_atomic_helper_plane_destroy_state - release plane state
- * @state: plane state object to release
- *
- * Releases all resources stored in the plane state without actually freeing
- * the memory of the plane state. This is useful for drivers that subclass the
- * plane state.
- */
-void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
-{
- if (state->fb)
- drm_framebuffer_put(state->fb);
-
- if (state->fence)
- dma_fence_put(state->fence);
-
- if (state->commit)
- drm_crtc_commit_put(state->commit);
-}
-EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
-
-/**
- * drm_atomic_helper_plane_destroy_state - default state destroy hook
- * @plane: drm plane
- * @state: plane state object to release
- *
- * Default plane state destroy hook for drivers which don't have their own
- * subclassed plane state structure.
- */
-void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- __drm_atomic_helper_plane_destroy_state(state);
- kfree(state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
-
-/**
- * __drm_atomic_helper_connector_reset - reset state on connector
- * @connector: drm connector
- * @conn_state: connector state to assign
- *
- * Initializes the newly allocated @conn_state and assigns it to
- * the &drm_conector->state pointer of @connector, usually required when
- * initializing the drivers or when called from the &drm_connector_funcs.reset
- * hook.
- *
- * This is useful for drivers that subclass the connector state.
- */
-void
-__drm_atomic_helper_connector_reset(struct drm_connector *connector,
- struct drm_connector_state *conn_state)
-{
- if (conn_state)
- conn_state->connector = connector;
-
- connector->state = conn_state;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
-
-/**
- * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors
- * @connector: drm connector
- *
- * Resets the atomic state for @connector by freeing the state pointer (which
- * might be NULL, e.g. at driver load time) and allocating a new empty state
- * object.
- */
-void drm_atomic_helper_connector_reset(struct drm_connector *connector)
-{
- struct drm_connector_state *conn_state =
- kzalloc(sizeof(*conn_state), GFP_KERNEL);
-
- if (connector->state)
- __drm_atomic_helper_connector_destroy_state(connector->state);
-
- kfree(connector->state);
- __drm_atomic_helper_connector_reset(connector, conn_state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
-
-/**
- * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
- * @connector: connector object
- * @state: atomic connector state
- *
- * Copies atomic state from a connector's current state. This is useful for
- * drivers that subclass the connector state.
- */
-void
-__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
- struct drm_connector_state *state)
-{
- memcpy(state, connector->state, sizeof(*state));
- if (state->crtc)
- drm_connector_get(connector);
- state->commit = NULL;
-
- /* Don't copy over a writeback job, they are used only once */
- state->writeback_job = NULL;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
-
-/**
- * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
- * @connector: drm connector
- *
- * Default connector state duplicate hook for drivers which don't have their own
- * subclassed connector state structure.
- */
-struct drm_connector_state *
-drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
-{
- struct drm_connector_state *state;
-
- if (WARN_ON(!connector->state))
- return NULL;
-
- state = kmalloc(sizeof(*state), GFP_KERNEL);
- if (state)
- __drm_atomic_helper_connector_duplicate_state(connector, state);
-
- return state;
-}
-EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
-
-/**
- * drm_atomic_helper_duplicate_state - duplicate an atomic state object
- * @dev: DRM device
- * @ctx: lock acquisition context
- *
- * Makes a copy of the current atomic state by looping over all objects and
- * duplicating their respective states. This is used for example by suspend/
- * resume support code to save the state prior to suspend such that it can
- * be restored upon resume.
- *
- * Note that this treats atomic state as persistent between save and restore.
- * Drivers must make sure that this is possible and won't result in confusion
- * or erroneous behaviour.
- *
- * Note that if callers haven't already acquired all modeset locks this might
- * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
- *
- * Returns:
- * A pointer to the copy of the atomic state object on success or an
- * ERR_PTR()-encoded error code on failure.
- *
- * See also:
- * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
- */
-struct drm_atomic_state *
-drm_atomic_helper_duplicate_state(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_atomic_state *state;
- struct drm_connector *conn;
- struct drm_connector_list_iter conn_iter;
- struct drm_plane *plane;
- struct drm_crtc *crtc;
- int err = 0;
-
- state = drm_atomic_state_alloc(dev);
- if (!state)
- return ERR_PTR(-ENOMEM);
-
- state->acquire_ctx = ctx;
-
- drm_for_each_crtc(crtc, dev) {
- struct drm_crtc_state *crtc_state;
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- err = PTR_ERR(crtc_state);
- goto free;
- }
- }
-
- drm_for_each_plane(plane, dev) {
- struct drm_plane_state *plane_state;
-
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state)) {
- err = PTR_ERR(plane_state);
- goto free;
- }
- }
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(conn, &conn_iter) {
- struct drm_connector_state *conn_state;
-
- conn_state = drm_atomic_get_connector_state(state, conn);
- if (IS_ERR(conn_state)) {
- err = PTR_ERR(conn_state);
- drm_connector_list_iter_end(&conn_iter);
- goto free;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
-
- /* clear the acquire context so that it isn't accidentally reused */
- state->acquire_ctx = NULL;
-
-free:
- if (err < 0) {
- drm_atomic_state_put(state);
- state = ERR_PTR(err);
- }
-
- return state;
-}
-EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
-
-/**
- * __drm_atomic_helper_connector_destroy_state - release connector state
- * @state: connector state object to release
- *
- * Releases all resources stored in the connector state without actually
- * freeing the memory of the connector state. This is useful for drivers that
- * subclass the connector state.
- */
-void
-__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
-{
- if (state->crtc)
- drm_connector_put(state->connector);
-
- if (state->commit)
- drm_crtc_commit_put(state->commit);
-}
-EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
-
-/**
- * drm_atomic_helper_connector_destroy_state - default state destroy hook
- * @connector: drm connector
- * @state: connector state object to release
- *
- * Default connector state destroy hook for drivers which don't have their own
- * subclassed connector state structure.
- */
-void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
- struct drm_connector_state *state)
-{
- __drm_atomic_helper_connector_destroy_state(state);
- kfree(state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
-
-/**
* drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
* @crtc: CRTC object
* @red: red correction table
@@ -3979,18 +3567,3 @@ fail:
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
-
-/**
- * __drm_atomic_helper_private_duplicate_state - copy atomic private state
- * @obj: CRTC object
- * @state: new private object state
- *
- * Copies atomic state from a private objects's current state and resets inferred values.
- * This is useful for drivers that subclass the private state.
- */
-void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
- struct drm_private_state *state)
-{
- memcpy(state, obj->state, sizeof(*state));
-}
-EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
new file mode 100644
index 000000000000..60bd7d708e35
--- /dev/null
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_device.h>
+
+#include <linux/slab.h>
+#include <linux/dma-fence.h>
+
+/**
+ * DOC: atomic state reset and initialization
+ *
+ * Both the drm core and the atomic helpers assume that there is always the full
+ * and correct atomic software state for all connectors, CRTCs and planes
+ * available. Which is a bit a problem on driver load and also after system
+ * suspend. One way to solve this is to have a hardware state read-out
+ * infrastructure which reconstructs the full software state (e.g. the i915
+ * driver).
+ *
+ * The simpler solution is to just reset the software state to everything off,
+ * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
+ * the atomic helpers provide default reset implementations for all hooks.
+ *
+ * On the upside the precise state tracking of atomic simplifies system suspend
+ * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
+ * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
+ * For other drivers the building blocks are split out, see the documentation
+ * for these functions.
+ */
+
+/**
+ * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs
+ * @crtc: drm CRTC
+ *
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
+{
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ kfree(crtc->state);
+ crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
+
+ if (crtc->state)
+ crtc->state->crtc = crtc;
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
+
+/**
+ * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
+ * @crtc: CRTC object
+ * @state: atomic CRTC state
+ *
+ * Copies atomic state from a CRTC's current state and resets inferred values.
+ * This is useful for drivers that subclass the CRTC state.
+ */
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ memcpy(state, crtc->state, sizeof(*state));
+
+ if (state->mode_blob)
+ drm_property_blob_get(state->mode_blob);
+ if (state->degamma_lut)
+ drm_property_blob_get(state->degamma_lut);
+ if (state->ctm)
+ drm_property_blob_get(state->ctm);
+ if (state->gamma_lut)
+ drm_property_blob_get(state->gamma_lut);
+ state->mode_changed = false;
+ state->active_changed = false;
+ state->planes_changed = false;
+ state->connectors_changed = false;
+ state->color_mgmt_changed = false;
+ state->zpos_changed = false;
+ state->commit = NULL;
+ state->event = NULL;
+ state->pageflip_flags = 0;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
+
+/**
+ * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
+ * @crtc: drm CRTC
+ *
+ * Default CRTC state duplicate hook for drivers which don't have their own
+ * subclassed CRTC state structure.
+ */
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_crtc_duplicate_state(crtc, state);
+
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
+
+/**
+ * __drm_atomic_helper_crtc_destroy_state - release CRTC state
+ * @state: CRTC state object to release
+ *
+ * Releases all resources stored in the CRTC state without actually freeing
+ * the memory of the CRTC state. This is useful for drivers that subclass the
+ * CRTC state.
+ */
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
+{
+ if (state->commit) {
+ /*
+ * In the event that a non-blocking commit returns
+ * -ERESTARTSYS before the commit_tail work is queued, we will
+ * have an extra reference to the commit object. Release it, if
+ * the event has not been consumed by the worker.
+ *
+ * state->event may be freed, so we can't directly look at
+ * state->event->base.completion.
+ */
+ if (state->event && state->commit->abort_completion)
+ drm_crtc_commit_put(state->commit);
+
+ kfree(state->commit->event);
+ state->commit->event = NULL;
+
+ drm_crtc_commit_put(state->commit);
+ }
+
+ drm_property_blob_put(state->mode_blob);
+ drm_property_blob_put(state->degamma_lut);
+ drm_property_blob_put(state->ctm);
+ drm_property_blob_put(state->gamma_lut);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
+
+/**
+ * drm_atomic_helper_crtc_destroy_state - default state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ *
+ * Default CRTC state destroy hook for drivers which don't have their own
+ * subclassed CRTC state structure.
+ */
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
+
+/**
+ * __drm_atomic_helper_plane_reset - resets planes state to default values
+ * @plane: plane object, must not be NULL
+ * @state: atomic plane state, must not be NULL
+ *
+ * Initializes plane state to default. This is useful for drivers that subclass
+ * the plane state.
+ */
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ state->plane = plane;
+ state->rotation = DRM_MODE_ROTATE_0;
+
+ state->alpha = DRM_BLEND_ALPHA_OPAQUE;
+ state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+
+ plane->state = state;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
+
+/**
+ * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
+ * @plane: drm plane
+ *
+ * Resets the atomic state for @plane by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_atomic_helper_plane_reset(struct drm_plane *plane)
+{
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+
+ kfree(plane->state);
+ plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
+ if (plane->state)
+ __drm_atomic_helper_plane_reset(plane, plane->state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
+
+/**
+ * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
+ * @plane: plane object
+ * @state: atomic plane state
+ *
+ * Copies atomic state from a plane's current state. This is useful for
+ * drivers that subclass the plane state.
+ */
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ memcpy(state, plane->state, sizeof(*state));
+
+ if (state->fb)
+ drm_framebuffer_get(state->fb);
+
+ state->fence = NULL;
+ state->commit = NULL;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
+
+/**
+ * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
+ * @plane: drm plane
+ *
+ * Default plane state duplicate hook for drivers which don't have their own
+ * subclassed plane state structure.
+ */
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct drm_plane_state *state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_plane_duplicate_state(plane, state);
+
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
+
+/**
+ * __drm_atomic_helper_plane_destroy_state - release plane state
+ * @state: plane state object to release
+ *
+ * Releases all resources stored in the plane state without actually freeing
+ * the memory of the plane state. This is useful for drivers that subclass the
+ * plane state.
+ */
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
+{
+ if (state->fb)
+ drm_framebuffer_put(state->fb);
+
+ if (state->fence)
+ dma_fence_put(state->fence);
+
+ if (state->commit)
+ drm_crtc_commit_put(state->commit);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
+
+/**
+ * drm_atomic_helper_plane_destroy_state - default state destroy hook
+ * @plane: drm plane
+ * @state: plane state object to release
+ *
+ * Default plane state destroy hook for drivers which don't have their own
+ * subclassed plane state structure.
+ */
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
+
+/**
+ * __drm_atomic_helper_connector_reset - reset state on connector
+ * @connector: drm connector
+ * @conn_state: connector state to assign
+ *
+ * Initializes the newly allocated @conn_state and assigns it to
+ * the &drm_conector->state pointer of @connector, usually required when
+ * initializing the drivers or when called from the &drm_connector_funcs.reset
+ * hook.
+ *
+ * This is useful for drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_reset(struct drm_connector *connector,
+ struct drm_connector_state *conn_state)
+{
+ if (conn_state)
+ conn_state->connector = connector;
+
+ connector->state = conn_state;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
+
+/**
+ * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors
+ * @connector: drm connector
+ *
+ * Resets the atomic state for @connector by freeing the state pointer (which
+ * might be NULL, e.g. at driver load time) and allocating a new empty state
+ * object.
+ */
+void drm_atomic_helper_connector_reset(struct drm_connector *connector)
+{
+ struct drm_connector_state *conn_state =
+ kzalloc(sizeof(*conn_state), GFP_KERNEL);
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(connector->state);
+ __drm_atomic_helper_connector_reset(connector, conn_state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
+
+/**
+ * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
+ * @connector: connector object
+ * @state: atomic connector state
+ *
+ * Copies atomic state from a connector's current state. This is useful for
+ * drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ memcpy(state, connector->state, sizeof(*state));
+ if (state->crtc)
+ drm_connector_get(connector);
+ state->commit = NULL;
+
+ /* Don't copy over a writeback job, they are used only once */
+ state->writeback_job = NULL;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
+
+/**
+ * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
+ * @connector: drm connector
+ *
+ * Default connector state duplicate hook for drivers which don't have their own
+ * subclassed connector state structure.
+ */
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct drm_connector_state *state;
+
+ if (WARN_ON(!connector->state))
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_connector_duplicate_state(connector, state);
+
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
+
+/**
+ * __drm_atomic_helper_connector_destroy_state - release connector state
+ * @state: connector state object to release
+ *
+ * Releases all resources stored in the connector state without actually
+ * freeing the memory of the connector state. This is useful for drivers that
+ * subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
+{
+ if (state->crtc)
+ drm_connector_put(state->connector);
+
+ if (state->commit)
+ drm_crtc_commit_put(state->commit);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
+
+/**
+ * drm_atomic_helper_connector_destroy_state - default state destroy hook
+ * @connector: drm connector
+ * @state: connector state object to release
+ *
+ * Default connector state destroy hook for drivers which don't have their own
+ * subclassed connector state structure.
+ */
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ __drm_atomic_helper_connector_destroy_state(state);
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
+
+/**
+ * __drm_atomic_helper_private_duplicate_state - copy atomic private state
+ * @obj: CRTC object
+ * @state: new private object state
+ *
+ * Copies atomic state from a private objects's current state and resets inferred values.
+ * This is useful for drivers that subclass the private state.
+ */
+void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ memcpy(state, obj->state, sizeof(*state));
+}
+EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index d5b7f315098c..c40889888a16 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -433,6 +433,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
drm_property_blob_put(mode);
return ret;
+ } else if (property == config->prop_vrr_enabled) {
+ state->vrr_enabled = val;
} else if (property == config->degamma_lut_property) {
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->degamma_lut,
@@ -491,6 +493,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = state->active;
else if (property == config->prop_mode_id)
*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
+ else if (property == config->prop_vrr_enabled)
+ *val = state->vrr_enabled;
else if (property == config->degamma_lut_property)
*val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
else if (property == config->ctm_property)
@@ -513,6 +517,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
+ bool replaced = false;
+ int ret;
if (property == config->prop_fb_id) {
struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
@@ -566,6 +572,14 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
state->color_encoding = val;
} else if (property == plane->color_range_property) {
state->color_range = val;
+ } else if (property == config->prop_fb_damage_clips) {
+ ret = drm_atomic_replace_property_blob_from_id(dev,
+ &state->fb_damage_clips,
+ val,
+ -1,
+ sizeof(struct drm_rect),
+ &replaced);
+ return ret;
} else if (plane->funcs->atomic_set_property) {
return plane->funcs->atomic_set_property(plane, state,
property, val);
@@ -621,6 +635,9 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->color_encoding;
} else if (property == plane->color_range_property) {
*val = state->color_range;
+ } else if (property == config->prop_fb_damage_clips) {
+ *val = (state->fb_damage_clips) ?
+ state->fb_damage_clips->base.id : 0;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
} else {
@@ -740,6 +757,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return set_out_fence_for_connector(state->state, connector,
fence_ptr);
+ } else if (property == connector->max_bpc_property) {
+ state->max_requested_bpc = val;
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
@@ -804,6 +823,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = 0;
} else if (property == config->writeback_out_fence_ptr_property) {
*val = 0;
+ } else if (property == connector->max_bpc_property) {
+ *val = state->max_requested_bpc;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 7412acaf3cde..d7d10cabb9bb 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -36,6 +36,8 @@
#include <drm/drmP.h>
#include "drm_legacy.h"
+#include <linux/nospec.h>
+
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
struct drm_local_map *map)
{
@@ -1417,6 +1419,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
idx, dma->buf_count - 1);
return -EINVAL;
}
+ idx = array_index_nospec(idx, dma->buf_count);
buf = dma->buflist[idx];
if (buf->file_priv != file_priv) {
DRM_ERROR("Process %d freeing buffer not owned\n",
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index fc03d26fcacc..9b2bd28dde0a 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -81,8 +81,7 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
{
int ret;
- if (!drm_core_check_feature(dev, DRIVER_MODESET) ||
- !dev->driver->dumb_create || !dev->driver->gem_prime_vmap)
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) || !dev->driver->dumb_create)
return -EOPNOTSUPP;
if (funcs && !try_module_get(funcs->owner))
@@ -229,8 +228,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
struct drm_device *dev = buffer->client->dev;
- if (buffer->vaddr && dev->driver->gem_prime_vunmap)
- dev->driver->gem_prime_vunmap(buffer->gem, buffer->vaddr);
+ drm_gem_vunmap(buffer->gem, buffer->vaddr);
if (buffer->gem)
drm_gem_object_put_unlocked(buffer->gem);
@@ -283,9 +281,9 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
- vaddr = dev->driver->gem_prime_vmap(obj);
- if (!vaddr) {
- ret = -ENOMEM;
+ vaddr = drm_gem_vmap(obj);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
goto err_delete;
}
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 581cc3788223..07dcf47daafe 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -255,11 +255,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
if (crtc_lut->gamma_size != crtc->gamma_size)
return -EINVAL;
- drm_modeset_acquire_init(&ctx, 0);
-retry:
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
- if (ret)
- goto out;
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
size = crtc_lut->gamma_size * (sizeof(uint16_t));
r_base = crtc->gamma_store;
@@ -284,13 +280,7 @@ retry:
crtc->gamma_size, &ctx);
out:
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 4943cef178be..da8ae80c2750 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -260,9 +260,7 @@ int drm_connector_init(struct drm_device *dev,
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
- drm_object_attach_property(&connector->base,
- config->edid_property,
- 0);
+ drm_connector_attach_edid_property(connector);
drm_object_attach_property(&connector->base,
config->dpms_property, 0);
@@ -295,6 +293,24 @@ out_put:
EXPORT_SYMBOL(drm_connector_init);
/**
+ * drm_connector_attach_edid_property - attach edid property.
+ * @connector: the connector
+ *
+ * Some connector types like DRM_MODE_CONNECTOR_VIRTUAL do not get a
+ * edid property attached by default. This function can be used to
+ * explicitly enable the edid property in these cases.
+ */
+void drm_connector_attach_edid_property(struct drm_connector *connector)
+{
+ struct drm_mode_config *config = &connector->dev->mode_config;
+
+ drm_object_attach_property(&connector->base,
+ config->edid_property,
+ 0);
+}
+EXPORT_SYMBOL(drm_connector_attach_edid_property);
+
+/**
* drm_connector_attach_encoder - attach a connector to an encoder
* @connector: connector to attach
* @encoder: encoder to attach @connector to
@@ -916,6 +932,13 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
* is no longer protected and userspace should take appropriate action
* (whatever that might be).
*
+ * max bpc:
+ * This range property is used by userspace to limit the bit depth. When
+ * used the driver would limit the bpc in accordance with the valid range
+ * supported by the hardware and sink. Drivers to use the function
+ * drm_connector_attach_max_bpc_property() to create and attach the
+ * property to the connector during initialization.
+ *
* Connectors also have one standardized atomic property:
*
* CRTC_ID:
@@ -1256,6 +1279,105 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
/**
+ * DOC: Variable refresh properties
+ *
+ * Variable refresh rate capable displays can dynamically adjust their
+ * refresh rate by extending the duration of their vertical front porch
+ * until page flip or timeout occurs. This can reduce or remove stuttering
+ * and latency in scenarios where the page flip does not align with the
+ * vblank interval.
+ *
+ * An example scenario would be an application flipping at a constant rate
+ * of 48Hz on a 60Hz display. The page flip will frequently miss the vblank
+ * interval and the same contents will be displayed twice. This can be
+ * observed as stuttering for content with motion.
+ *
+ * If variable refresh rate was active on a display that supported a
+ * variable refresh range from 35Hz to 60Hz no stuttering would be observable
+ * for the example scenario. The minimum supported variable refresh rate of
+ * 35Hz is below the page flip frequency and the vertical front porch can
+ * be extended until the page flip occurs. The vblank interval will be
+ * directly aligned to the page flip rate.
+ *
+ * Not all userspace content is suitable for use with variable refresh rate.
+ * Large and frequent changes in vertical front porch duration may worsen
+ * perceived stuttering for input sensitive applications.
+ *
+ * Panel brightness will also vary with vertical front porch duration. Some
+ * panels may have noticeable differences in brightness between the minimum
+ * vertical front porch duration and the maximum vertical front porch duration.
+ * Large and frequent changes in vertical front porch duration may produce
+ * observable flickering for such panels.
+ *
+ * Userspace control for variable refresh rate is supported via properties
+ * on the &drm_connector and &drm_crtc objects.
+ *
+ * "vrr_capable":
+ * Optional &drm_connector boolean property that drivers should attach
+ * with drm_connector_attach_vrr_capable_property() on connectors that
+ * could support variable refresh rates. Drivers should update the
+ * property value by calling drm_connector_set_vrr_capable_property().
+ *
+ * Absence of the property should indicate absence of support.
+ *
+ * "vrr_enabled":
+ * Default &drm_crtc boolean property that notifies the driver that the
+ * content on the CRTC is suitable for variable refresh rate presentation.
+ * The driver will take this property as a hint to enable variable
+ * refresh rate support if the receiver supports it, ie. if the
+ * "vrr_capable" property is true on the &drm_connector object. The
+ * vertical front porch duration will be extended until page-flip or
+ * timeout when enabled.
+ *
+ * The minimum vertical front porch duration is defined as the vertical
+ * front porch duration for the current mode.
+ *
+ * The maximum vertical front porch duration is greater than or equal to
+ * the minimum vertical front porch duration. The duration is derived
+ * from the minimum supported variable refresh rate for the connector.
+ *
+ * The driver may place further restrictions within these minimum
+ * and maximum bounds.
+ *
+ * The semantics for the vertical blank timestamp differ when
+ * variable refresh rate is active. The vertical blank timestamp
+ * is defined to be an estimate using the current mode's fixed
+ * refresh rate timings. The semantics for the page-flip event
+ * timestamp remain the same.
+ */
+
+/**
+ * drm_connector_attach_vrr_capable_property - creates the
+ * vrr_capable property
+ * @connector: connector to create the vrr_capable property on.
+ *
+ * This is used by atomic drivers to add support for querying
+ * variable refresh rate capability for a connector.
+ *
+ * Returns:
+ * Zero on success, negative errono on failure.
+ */
+int drm_connector_attach_vrr_capable_property(
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop;
+
+ if (!connector->vrr_capable_property) {
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE,
+ "vrr_capable");
+ if (!prop)
+ return -ENOMEM;
+
+ connector->vrr_capable_property = prop;
+ drm_object_attach_property(&connector->base, prop, 0);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_vrr_capable_property);
+
+/**
* drm_connector_attach_scaling_mode_property - attach atomic scaling mode property
* @connector: connector to attach scaling mode property on.
* @scaling_mode_mask: or'ed mask of BIT(%DRM_MODE_SCALE_\*).
@@ -1584,6 +1706,58 @@ void drm_connector_set_link_status_property(struct drm_connector *connector,
EXPORT_SYMBOL(drm_connector_set_link_status_property);
/**
+ * drm_connector_attach_max_bpc_property - attach "max bpc" property
+ * @connector: connector to attach max bpc property on.
+ * @min: The minimum bit depth supported by the connector.
+ * @max: The maximum bit depth supported by the connector.
+ *
+ * This is used to add support for limiting the bit depth on a connector.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
+ int min, int max)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop;
+
+ prop = connector->max_bpc_property;
+ if (!prop) {
+ prop = drm_property_create_range(dev, 0, "max bpc", min, max);
+ if (!prop)
+ return -ENOMEM;
+
+ connector->max_bpc_property = prop;
+ }
+
+ drm_object_attach_property(&connector->base, prop, max);
+ connector->state->max_requested_bpc = max;
+ connector->state->max_bpc = max;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
+
+/**
+ * drm_connector_set_vrr_capable_property - sets the variable refresh rate
+ * capable property for a connector
+ * @connector: drm connector
+ * @capable: True if the connector is variable refresh rate capable
+ *
+ * Should be used by atomic drivers to update the indicated support for
+ * variable refresh rate over a connector.
+ */
+void drm_connector_set_vrr_capable_property(
+ struct drm_connector *connector, bool capable)
+{
+ drm_object_property_set_value(&connector->base,
+ connector->vrr_capable_property,
+ capable);
+}
+EXPORT_SYMBOL(drm_connector_set_vrr_capable_property);
+
+/**
* drm_connector_init_panel_orientation_property -
* initialize the connecters panel_orientation property
* @connector: connector for which to init the panel-orientation property.
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 268a182ae189..1593dd6cdfb7 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -340,6 +340,8 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
drm_object_attach_property(&crtc->base,
config->prop_out_fence_ptr, 0);
+ drm_object_attach_property(&crtc->base,
+ config->prop_vrr_enabled, 0);
}
return 0;
@@ -570,9 +572,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_mode_crtc *crtc_req = data;
struct drm_crtc *crtc;
struct drm_plane *plane;
- struct drm_connector **connector_set, *connector;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode;
+ struct drm_connector **connector_set = NULL, *connector;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_display_mode *mode = NULL;
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
struct drm_modeset_acquire_ctx ctx;
@@ -599,15 +601,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
plane = crtc->primary;
mutex_lock(&crtc->dev->mode_config.mutex);
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-retry:
- connector_set = NULL;
- fb = NULL;
- mode = NULL;
-
- ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
- if (ret)
- goto out;
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx,
+ DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
if (crtc_req->mode_valid) {
/* If we have a mode we need a framebuffer. */
@@ -766,13 +761,13 @@ out:
}
kfree(connector_set);
drm_mode_destroy(dev, mode);
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
+
+ /* In case we need to retry... */
+ connector_set = NULL;
+ fb = NULL;
+ mode = NULL;
+
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
mutex_unlock(&crtc->dev->mode_config.mutex);
return ret;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index ce75e9506e85..a3c81850e755 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -984,118 +984,3 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
-
-/**
- * drm_helper_crtc_mode_set - mode_set implementation for atomic plane helpers
- * @crtc: DRM CRTC
- * @mode: DRM display mode which userspace requested
- * @adjusted_mode: DRM display mode adjusted by ->mode_fixup callbacks
- * @x: x offset of the CRTC scanout area on the underlying framebuffer
- * @y: y offset of the CRTC scanout area on the underlying framebuffer
- * @old_fb: previous framebuffer
- *
- * This function implements a callback useable as the ->mode_set callback
- * required by the CRTC helpers. Besides the atomic plane helper functions for
- * the primary plane the driver must also provide the ->mode_set_nofb callback
- * to set up the CRTC.
- *
- * This is a transitional helper useful for converting drivers to the atomic
- * interfaces.
- */
-int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_crtc_state *crtc_state;
- const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- int ret;
-
- if (crtc->funcs->atomic_duplicate_state)
- crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
- else {
- if (!crtc->state)
- drm_atomic_helper_crtc_reset(crtc);
-
- crtc_state = drm_atomic_helper_crtc_duplicate_state(crtc);
- }
-
- if (!crtc_state)
- return -ENOMEM;
-
- crtc_state->planes_changed = true;
- crtc_state->mode_changed = true;
- ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
- if (ret)
- goto out;
- drm_mode_copy(&crtc_state->adjusted_mode, adjusted_mode);
-
- if (crtc_funcs->atomic_check) {
- ret = crtc_funcs->atomic_check(crtc, crtc_state);
- if (ret)
- goto out;
- }
-
- swap(crtc->state, crtc_state);
-
- crtc_funcs->mode_set_nofb(crtc);
-
- ret = drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
-
-out:
- if (crtc_state) {
- if (crtc->funcs->atomic_destroy_state)
- crtc->funcs->atomic_destroy_state(crtc, crtc_state);
- else
- drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(drm_helper_crtc_mode_set);
-
-/**
- * drm_helper_crtc_mode_set_base - mode_set_base implementation for atomic plane helpers
- * @crtc: DRM CRTC
- * @x: x offset of the CRTC scanout area on the underlying framebuffer
- * @y: y offset of the CRTC scanout area on the underlying framebuffer
- * @old_fb: previous framebuffer
- *
- * This function implements a callback useable as the ->mode_set_base used
- * required by the CRTC helpers. The driver must provide the atomic plane helper
- * functions for the primary plane.
- *
- * This is a transitional helper useful for converting drivers to the atomic
- * interfaces.
- */
-int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_plane_state *plane_state;
- struct drm_plane *plane = crtc->primary;
-
- if (plane->funcs->atomic_duplicate_state)
- plane_state = plane->funcs->atomic_duplicate_state(plane);
- else {
- if (!plane->state)
- drm_atomic_helper_plane_reset(plane);
-
- plane_state = drm_atomic_helper_plane_duplicate_state(plane);
- }
- if (!plane_state)
- return -ENOMEM;
- plane_state->plane = plane;
-
- plane_state->crtc = crtc;
- drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb);
- plane_state->crtc_x = 0;
- plane_state->crtc_y = 0;
- plane_state->crtc_h = crtc->mode.vdisplay;
- plane_state->crtc_w = crtc->mode.hdisplay;
- plane_state->src_x = x << 16;
- plane_state->src_y = y << 16;
- plane_state->src_h = crtc->mode.vdisplay << 16;
- plane_state->src_w = crtc->mode.hdisplay << 16;
-
- return drm_plane_helper_commit(plane, plane_state, old_fb);
-}
-EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
new file mode 100644
index 000000000000..05c8e7267165
--- /dev/null
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Deepak Rawat <drawat@vmware.com>
+ * Rob Clark <robdclark@gmail.com>
+ *
+ **************************************************************************/
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_damage_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * FB_DAMAGE_CLIPS is an optional plane property which provides a means to
+ * specify a list of damage rectangles on a plane in framebuffer coordinates of
+ * the framebuffer attached to the plane. In current context damage is the area
+ * of plane framebuffer that has changed since last plane update (also called
+ * page-flip), irrespective of whether currently attached framebuffer is same as
+ * framebuffer attached during last plane update or not.
+ *
+ * FB_DAMAGE_CLIPS is a hint to kernel which could be helpful for some drivers
+ * to optimize internally especially for virtual devices where each framebuffer
+ * change needs to be transmitted over network, usb, etc.
+ *
+ * Since FB_DAMAGE_CLIPS is a hint so it is an optional property. User-space can
+ * ignore damage clips property and in that case driver will do a full plane
+ * update. In case damage clips are provided then it is guaranteed that the area
+ * inside damage clips will be updated to plane. For efficiency driver can do
+ * full update or can update more than specified in damage clips. Since driver
+ * is free to read more, user-space must always render the entire visible
+ * framebuffer. Otherwise there can be corruptions. Also, if a user-space
+ * provides damage clips which doesn't encompass the actual damage to
+ * framebuffer (since last plane update) can result in incorrect rendering.
+ *
+ * FB_DAMAGE_CLIPS is a blob property with the layout of blob data is simply an
+ * array of &drm_mode_rect. Unlike plane &drm_plane_state.src coordinates,
+ * damage clips are not in 16.16 fixed point. Similar to plane src in
+ * framebuffer, damage clips cannot be negative. In damage clip, x1/y1 are
+ * inclusive and x2/y2 are exclusive. While kernel does not error for overlapped
+ * damage clips, it is strongly discouraged.
+ *
+ * Drivers that are interested in damage interface for plane should enable
+ * FB_DAMAGE_CLIPS property by calling drm_plane_enable_fb_damage_clips().
+ * Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and
+ * drm_atomic_helper_damage_iter_next() helper iterator function to get damage
+ * rectangles clipped to &drm_plane_state.src.
+ */
+
+static void convert_clip_rect_to_rect(const struct drm_clip_rect *src,
+ struct drm_mode_rect *dest,
+ uint32_t num_clips, uint32_t src_inc)
+{
+ while (num_clips > 0) {
+ dest->x1 = src->x1;
+ dest->y1 = src->y1;
+ dest->x2 = src->x2;
+ dest->y2 = src->y2;
+ src += src_inc;
+ dest++;
+ num_clips--;
+ }
+}
+
+/**
+ * drm_plane_enable_fb_damage_clips - Enables plane fb damage clips property.
+ * @plane: Plane on which to enable damage clips property.
+ *
+ * This function lets driver to enable the damage clips property on a plane.
+ */
+void drm_plane_enable_fb_damage_clips(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ drm_object_attach_property(&plane->base, config->prop_fb_damage_clips,
+ 0);
+}
+EXPORT_SYMBOL(drm_plane_enable_fb_damage_clips);
+
+/**
+ * drm_atomic_helper_check_plane_damage - Verify plane damage on atomic_check.
+ * @state: The driver state object.
+ * @plane_state: Plane state for which to verify damage.
+ *
+ * This helper function makes sure that damage from plane state is discarded
+ * for full modeset. If there are more reasons a driver would want to do a full
+ * plane update rather than processing individual damage regions, then those
+ * cases should be taken care of here.
+ *
+ * Note that &drm_plane_state.fb_damage_clips == NULL in plane state means that
+ * full plane update should happen. It also ensure helper iterator will return
+ * &drm_plane_state.src as damage.
+ */
+void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (plane_state->crtc) {
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ plane_state->crtc);
+
+ if (WARN_ON(!crtc_state))
+ return;
+
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ drm_property_blob_put(plane_state->fb_damage_clips);
+ plane_state->fb_damage_clips = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_check_plane_damage);
+
+/**
+ * drm_atomic_helper_dirtyfb - Helper for dirtyfb.
+ * @fb: DRM framebuffer.
+ * @file_priv: Drm file for the ioctl call.
+ * @flags: Dirty fb annotate flags.
+ * @color: Color for annotate fill.
+ * @clips: Dirty region.
+ * @num_clips: Count of clip in clips.
+ *
+ * A helper to implement &drm_framebuffer_funcs.dirty using damage interface
+ * during plane update. If num_clips is 0 then this helper will do a full plane
+ * update. This is the same behaviour expected by DIRTFB IOCTL.
+ *
+ * Note that this helper is blocking implementation. This is what current
+ * drivers and userspace expect in their DIRTYFB IOCTL implementation, as a way
+ * to rate-limit userspace and make sure its rendering doesn't get ahead of
+ * uploading new data too much.
+ *
+ * Return: Zero on success, negative errno on failure.
+ */
+int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int flags,
+ unsigned int color, struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_property_blob *damage = NULL;
+ struct drm_mode_rect *rects = NULL;
+ struct drm_atomic_state *state;
+ struct drm_plane *plane;
+ int ret = 0;
+
+ /*
+ * When called from ioctl, we are interruptable, but not when called
+ * internally (ie. defio worker)
+ */
+ drm_modeset_acquire_init(&ctx,
+ file_priv ? DRM_MODESET_ACQUIRE_INTERRUPTIBLE : 0);
+
+ state = drm_atomic_state_alloc(fb->dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ state->acquire_ctx = &ctx;
+
+ if (clips) {
+ uint32_t inc = 1;
+
+ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ inc = 2;
+ num_clips /= 2;
+ }
+
+ rects = kcalloc(num_clips, sizeof(*rects), GFP_KERNEL);
+ if (!rects) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ convert_clip_rect_to_rect(clips, rects, num_clips, inc);
+ damage = drm_property_create_blob(fb->dev,
+ num_clips * sizeof(*rects),
+ rects);
+ if (IS_ERR(damage)) {
+ ret = PTR_ERR(damage);
+ damage = NULL;
+ goto out;
+ }
+ }
+
+retry:
+ drm_for_each_plane(plane, fb->dev) {
+ struct drm_plane_state *plane_state;
+
+ if (plane->state->fb != fb)
+ continue;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto out;
+ }
+
+ drm_property_replace_blob(&plane_state->fb_damage_clips,
+ damage);
+ }
+
+ ret = drm_atomic_commit(state);
+
+out:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
+
+ drm_property_blob_put(damage);
+ kfree(rects);
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_atomic_helper_dirtyfb);
+
+/**
+ * drm_atomic_helper_damage_iter_init - Initialize the damage iterator.
+ * @iter: The iterator to initialize.
+ * @old_state: Old plane state for validation.
+ * @new_state: Plane state from which to iterate the damage clips.
+ *
+ * Initialize an iterator, which clips plane damage
+ * &drm_plane_state.fb_damage_clips to plane &drm_plane_state.src. This iterator
+ * returns full plane src in case damage is not present because either
+ * user-space didn't sent or driver discarded it (it want to do full plane
+ * update). Currently this iterator returns full plane src in case plane src
+ * changed but that can be changed in future to return damage.
+ *
+ * For the case when plane is not visible or plane update should not happen the
+ * first call to iter_next will return false. Note that this helper use clipped
+ * &drm_plane_state.src, so driver calling this helper should have called
+ * drm_atomic_helper_check_plane_state() earlier.
+ */
+void
+drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
+ const struct drm_plane_state *old_state,
+ const struct drm_plane_state *state)
+{
+ memset(iter, 0, sizeof(*iter));
+
+ if (!state || !state->crtc || !state->fb || !state->visible)
+ return;
+
+ iter->clips = drm_helper_get_plane_damage_clips(state);
+ iter->num_clips = drm_plane_get_damage_clips_count(state);
+
+ /* Round down for x1/y1 and round up for x2/y2 to catch all pixels */
+ iter->plane_src.x1 = state->src.x1 >> 16;
+ iter->plane_src.y1 = state->src.y1 >> 16;
+ iter->plane_src.x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
+ iter->plane_src.y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
+
+ if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
+ iter->clips = 0;
+ iter->num_clips = 0;
+ iter->full_update = true;
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_damage_iter_init);
+
+/**
+ * drm_atomic_helper_damage_iter_next - Advance the damage iterator.
+ * @iter: The iterator to advance.
+ * @rect: Return a rectangle in fb coordinate clipped to plane src.
+ *
+ * Since plane src is in 16.16 fixed point and damage clips are whole number,
+ * this iterator round off clips that intersect with plane src. Round down for
+ * x1/y1 and round up for x2/y2 for the intersected coordinate. Similar rounding
+ * off for full plane src, in case it's returned as damage. This iterator will
+ * skip damage clips outside of plane src.
+ *
+ * Return: True if the output is valid, false if reached the end.
+ *
+ * If the first call to iterator next returns false then it means no need to
+ * update the plane.
+ */
+bool
+drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
+ struct drm_rect *rect)
+{
+ bool ret = false;
+
+ if (iter->full_update) {
+ *rect = iter->plane_src;
+ iter->full_update = false;
+ return true;
+ }
+
+ while (iter->curr_clip < iter->num_clips) {
+ *rect = iter->clips[iter->curr_clip];
+ iter->curr_clip++;
+
+ if (drm_rect_intersect(rect, &iter->plane_src)) {
+ ret = true;
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_damage_iter_next);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 373bd4c2b698..f8468eae0503 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -32,6 +32,8 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_edid.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_gem.h>
#include <drm/drmP.h>
#include "drm_internal.h"
@@ -43,6 +45,93 @@
* Initialization, etc.
**************************************************/
+static int drm_name_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *dev = minor->dev;
+ struct drm_master *master;
+
+ mutex_lock(&dev->master_mutex);
+ master = dev->master;
+ seq_printf(m, "%s", dev->driver->name);
+ if (dev->dev)
+ seq_printf(m, " dev=%s", dev_name(dev->dev));
+ if (master && master->unique)
+ seq_printf(m, " master=%s", master->unique);
+ if (dev->unique)
+ seq_printf(m, " unique=%s", dev->unique);
+ seq_printf(m, "\n");
+ mutex_unlock(&dev->master_mutex);
+
+ return 0;
+}
+
+static int drm_clients_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_file *priv;
+ kuid_t uid;
+
+ seq_printf(m,
+ "%20s %5s %3s master a %5s %10s\n",
+ "command",
+ "pid",
+ "dev",
+ "uid",
+ "magic");
+
+ /* dev->filelist is sorted youngest first, but we want to present
+ * oldest first (i.e. kernel, servers, clients), so walk backwardss.
+ */
+ mutex_lock(&dev->filelist_mutex);
+ list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
+ struct task_struct *task;
+
+ rcu_read_lock(); /* locks pid_task()->comm */
+ task = pid_task(priv->pid, PIDTYPE_PID);
+ uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
+ seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
+ task ? task->comm : "<unknown>",
+ pid_vnr(priv->pid),
+ priv->minor->index,
+ drm_is_current_master(priv) ? 'y' : 'n',
+ priv->authenticated ? 'y' : 'n',
+ from_kuid_munged(seq_user_ns(m), uid),
+ priv->magic);
+ rcu_read_unlock();
+ }
+ mutex_unlock(&dev->filelist_mutex);
+ return 0;
+}
+
+static int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+ struct drm_gem_object *obj = ptr;
+ struct seq_file *m = data;
+
+ seq_printf(m, "%6d %8zd %7d %8d\n",
+ obj->name, obj->size,
+ obj->handle_count,
+ kref_read(&obj->refcount));
+ return 0;
+}
+
+static int drm_gem_name_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+
+ seq_printf(m, " name size handles refcount\n");
+
+ mutex_lock(&dev->object_name_lock);
+ idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+ mutex_unlock(&dev->object_name_lock);
+
+ return 0;
+}
+
static const struct drm_info_list drm_debugfs_list[] = {
{"name", drm_name_info, 0},
{"clients", drm_clients_info, 0},
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
index 8a718f85079a..b15cee85b702 100644
--- a/drivers/gpu/drm/drm_dp_cec.c
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -424,8 +424,6 @@ void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
aux->cec.parent = parent;
INIT_DELAYED_WORK(&aux->cec.unregister_work,
drm_dp_cec_unregister_work);
-
- drm_dp_cec_set_edid(aux, NULL);
}
EXPORT_SYMBOL(drm_dp_cec_register_connector);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 37c01b6076ec..2d6c491a0542 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1352,3 +1352,95 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
return 0;
}
EXPORT_SYMBOL(drm_dp_read_desc);
+
+/**
+ * DRM DP Helpers for DSC
+ */
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ bool is_edp)
+{
+ u8 slice_cap1 = dsc_dpcd[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT];
+
+ if (is_edp) {
+ /* For eDP, register DSC_SLICE_CAPABILITIES_1 gives slice count */
+ if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
+ return 4;
+ if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
+ return 2;
+ if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
+ return 1;
+ } else {
+ /* For DP, use values from DSC_SLICE_CAP_1 and DSC_SLICE_CAP2 */
+ u8 slice_cap2 = dsc_dpcd[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT];
+
+ if (slice_cap2 & DP_DSC_24_PER_DP_DSC_SINK)
+ return 24;
+ if (slice_cap2 & DP_DSC_20_PER_DP_DSC_SINK)
+ return 20;
+ if (slice_cap2 & DP_DSC_16_PER_DP_DSC_SINK)
+ return 16;
+ if (slice_cap1 & DP_DSC_12_PER_DP_DSC_SINK)
+ return 12;
+ if (slice_cap1 & DP_DSC_10_PER_DP_DSC_SINK)
+ return 10;
+ if (slice_cap1 & DP_DSC_8_PER_DP_DSC_SINK)
+ return 8;
+ if (slice_cap1 & DP_DSC_6_PER_DP_DSC_SINK)
+ return 6;
+ if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
+ return 4;
+ if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
+ return 2;
+ if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_count);
+
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ u8 line_buf_depth = dsc_dpcd[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT];
+
+ switch (line_buf_depth & DP_DSC_LINE_BUF_BIT_DEPTH_MASK) {
+ case DP_DSC_LINE_BUF_BIT_DEPTH_9:
+ return 9;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_10:
+ return 10;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_11:
+ return 11;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_12:
+ return 12;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_13:
+ return 13;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_14:
+ return 14;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_15:
+ return 15;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_16:
+ return 16;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_8:
+ return 8;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth);
+
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ u8 dsc_bpc[3])
+{
+ int num_bpc = 0;
+ u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT];
+
+ if (color_depth & DP_DSC_12_BPC)
+ dsc_bpc[num_bpc++] = 12;
+ if (color_depth & DP_DSC_10_BPC)
+ dsc_bpc[num_bpc++] = 10;
+ if (color_depth & DP_DSC_8_BPC)
+ dsc_bpc[num_bpc++] = 8;
+
+ return num_bpc;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 5ff1d79b86c4..529414556962 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1275,6 +1275,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
+ if (!mstb)
+ goto out;
+
for (i = 0; i < lct - 1; i++) {
int shift = (i % 2) ? 0 : 4;
int port_num = (rad[i / 2] >> shift) & 0xf;
@@ -2569,9 +2572,16 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
EXPORT_SYMBOL(drm_dp_mst_get_edid);
/**
- * drm_dp_find_vcpi_slots() - find slots for this PBN value
+ * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
* @mgr: manager to use
* @pbn: payload bandwidth to convert into slots.
+ *
+ * Calculate the number of VCPI slots that will be required for the given PBN
+ * value. This function is deprecated, and should not be used in atomic
+ * drivers.
+ *
+ * RETURNS:
+ * The total slots required for this port, or error.
*/
int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
int pbn)
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 36e8e9cbec52..12e5e2be7890 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -476,8 +476,6 @@ static void drm_fs_inode_free(struct inode *inode)
* The initial ref-count of the object is 1. Use drm_dev_get() and
* drm_dev_put() to take and drop further ref-counts.
*
- * Note that for purely virtual devices @parent can be NULL.
- *
* Drivers that do not want to allocate their own device struct
* embedding &struct drm_device can call drm_dev_alloc() instead. For drivers
* that do embed &struct drm_device it must be placed first in the overall
@@ -502,6 +500,8 @@ int drm_dev_init(struct drm_device *dev,
return -ENODEV;
}
+ BUG_ON(!parent);
+
kref_init(&dev->ref);
dev->dev = parent;
dev->driver = driver;
@@ -556,9 +556,7 @@ int drm_dev_init(struct drm_device *dev,
}
}
- /* Use the parent device name as DRM device unique identifier, but fall
- * back to the driver name for virtual devices like vgem. */
- ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
+ ret = drm_dev_set_unique(dev, dev_name(parent));
if (ret)
goto err_setunique;
@@ -706,19 +704,6 @@ void drm_dev_put(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_put);
-/**
- * drm_dev_unref - Drop reference of a DRM device
- * @dev: device to drop reference of or NULL
- *
- * This is a compatibility alias for drm_dev_put() and should not be used by new
- * code.
- */
-void drm_dev_unref(struct drm_device *dev)
-{
- drm_dev_put(dev);
-}
-EXPORT_SYMBOL(drm_dev_unref);
-
static int create_compat_control_link(struct drm_device *dev)
{
struct drm_minor *minor;
@@ -975,14 +960,12 @@ static void drm_core_exit(void)
drm_sysfs_destroy();
idr_destroy(&drm_minors_idr);
drm_connector_ida_destroy();
- drm_global_release();
}
static int __init drm_core_init(void)
{
int ret;
- drm_global_init();
drm_connector_ida_init();
idr_init(&drm_minors_idr);
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
new file mode 100644
index 000000000000..bc2b23adb072
--- /dev/null
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corp
+ *
+ * Author:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/byteorder/generic.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_dsc.h>
+
+/**
+ * DOC: dsc helpers
+ *
+ * These functions contain some common logic and helpers to deal with VESA
+ * Display Stream Compression standard required for DSC on Display Port/eDP or
+ * MIPI display interfaces.
+ */
+
+/**
+ * drm_dsc_dp_pps_header_init() - Initializes the PPS Header
+ * for DisplayPort as per the DP 1.4 spec.
+ * @pps_sdp: Secondary data packet for DSC Picture Parameter Set
+ */
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp)
+{
+ memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header));
+
+ pps_sdp->pps_header.HB1 = DP_SDP_PPS;
+ pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
+}
+EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
+
+/**
+ * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe
+ * using the DSC configuration parameters in the order expected
+ * by the DSC Display Sink device. For the DSC, the sink device
+ * expects the PPS payload in the big endian format for the fields
+ * that span more than 1 byte.
+ *
+ * @pps_sdp:
+ * Secondary data packet for DSC Picture Parameter Set
+ * @dsc_cfg:
+ * DSC Configuration data filled by driver
+ */
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+ const struct drm_dsc_config *dsc_cfg)
+{
+ int i;
+
+ /* Protect against someone accidently changing struct size */
+ BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) !=
+ DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
+
+ memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload));
+
+ /* PPS 0 */
+ pps_sdp->pps_payload.dsc_version =
+ dsc_cfg->dsc_version_minor |
+ dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
+
+ /* PPS 1, 2 is 0 */
+
+ /* PPS 3 */
+ pps_sdp->pps_payload.pps_3 =
+ dsc_cfg->line_buf_depth |
+ dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
+
+ /* PPS 4 */
+ pps_sdp->pps_payload.pps_4 =
+ ((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT) |
+ dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
+ dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT |
+ dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
+ dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
+
+ /* PPS 5 */
+ pps_sdp->pps_payload.bits_per_pixel_low =
+ (dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
+
+ /*
+ * The DSC panel expects the PPS packet to have big endian format
+ * for data spanning 2 bytes. Use a macro cpu_to_be16() to convert
+ * to big endian format. If format is little endian, it will swap
+ * bytes to convert to Big endian else keep it unchanged.
+ */
+
+ /* PPS 6, 7 */
+ pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height);
+
+ /* PPS 8, 9 */
+ pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width);
+
+ /* PPS 10, 11 */
+ pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height);
+
+ /* PPS 12, 13 */
+ pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width);
+
+ /* PPS 14, 15 */
+ pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
+
+ /* PPS 16 */
+ pps_sdp->pps_payload.initial_xmit_delay_high =
+ ((dsc_cfg->initial_xmit_delay &
+ DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT);
+
+ /* PPS 17 */
+ pps_sdp->pps_payload.initial_xmit_delay_low =
+ (dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
+
+ /* PPS 18, 19 */
+ pps_sdp->pps_payload.initial_dec_delay =
+ cpu_to_be16(dsc_cfg->initial_dec_delay);
+
+ /* PPS 20 is 0 */
+
+ /* PPS 21 */
+ pps_sdp->pps_payload.initial_scale_value =
+ dsc_cfg->initial_scale_value;
+
+ /* PPS 22, 23 */
+ pps_sdp->pps_payload.scale_increment_interval =
+ cpu_to_be16(dsc_cfg->scale_increment_interval);
+
+ /* PPS 24 */
+ pps_sdp->pps_payload.scale_decrement_interval_high =
+ ((dsc_cfg->scale_decrement_interval &
+ DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT);
+
+ /* PPS 25 */
+ pps_sdp->pps_payload.scale_decrement_interval_low =
+ (dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
+
+ /* PPS 26[7:0], PPS 27[7:5] RESERVED */
+
+ /* PPS 27 */
+ pps_sdp->pps_payload.first_line_bpg_offset =
+ dsc_cfg->first_line_bpg_offset;
+
+ /* PPS 28, 29 */
+ pps_sdp->pps_payload.nfl_bpg_offset =
+ cpu_to_be16(dsc_cfg->nfl_bpg_offset);
+
+ /* PPS 30, 31 */
+ pps_sdp->pps_payload.slice_bpg_offset =
+ cpu_to_be16(dsc_cfg->slice_bpg_offset);
+
+ /* PPS 32, 33 */
+ pps_sdp->pps_payload.initial_offset =
+ cpu_to_be16(dsc_cfg->initial_offset);
+
+ /* PPS 34, 35 */
+ pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset);
+
+ /* PPS 36 */
+ pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp;
+
+ /* PPS 37 */
+ pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp;
+
+ /* PPS 38, 39 */
+ pps_sdp->pps_payload.rc_model_size =
+ cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
+
+ /* PPS 40 */
+ pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
+
+ /* PPS 41 */
+ pps_sdp->pps_payload.rc_quant_incr_limit0 =
+ dsc_cfg->rc_quant_incr_limit0;
+
+ /* PPS 42 */
+ pps_sdp->pps_payload.rc_quant_incr_limit1 =
+ dsc_cfg->rc_quant_incr_limit1;
+
+ /* PPS 43 */
+ pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
+ DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
+
+ /* PPS 44 - 57 */
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
+ pps_sdp->pps_payload.rc_buf_thresh[i] =
+ dsc_cfg->rc_buf_thresh[i];
+
+ /* PPS 58 - 87 */
+ /*
+ * For DSC sink programming the RC Range parameter fields
+ * are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
+ */
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ pps_sdp->pps_payload.rc_range_parameters[i] =
+ ((dsc_cfg->rc_range_params[i].range_min_qp <<
+ DSC_PPS_RC_RANGE_MINQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_max_qp <<
+ DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_bpg_offset));
+ pps_sdp->pps_payload.rc_range_parameters[i] =
+ cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]);
+ }
+
+ /* PPS 88 */
+ pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 |
+ dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
+
+ /* PPS 89 */
+ pps_sdp->pps_payload.second_line_bpg_offset =
+ dsc_cfg->second_line_bpg_offset;
+
+ /* PPS 90, 91 */
+ pps_sdp->pps_payload.nsl_bpg_offset =
+ cpu_to_be16(dsc_cfg->nsl_bpg_offset);
+
+ /* PPS 92, 93 */
+ pps_sdp->pps_payload.second_line_offset_adj =
+ cpu_to_be16(dsc_cfg->second_line_offset_adj);
+
+ /* PPS 94 - 127 are O */
+}
+EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index fb0dfc62b1b6..5b516615881a 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -72,7 +72,9 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
/**
- * drm_fb_cma_get_gem_addr() - Get physical address for framebuffer
+ * drm_fb_cma_get_gem_addr() - Get physical address for framebuffer, for pixel
+ * formats where values are grouped in blocks this will get you the beginning of
+ * the block
* @fb: The framebuffer
* @state: Which state of drm plane
* @plane: Which plane
@@ -87,6 +89,13 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
struct drm_gem_cma_object *obj;
dma_addr_t paddr;
u8 h_div = 1, v_div = 1;
+ u32 block_w = drm_format_info_block_width(fb->format, plane);
+ u32 block_h = drm_format_info_block_height(fb->format, plane);
+ u32 block_size = fb->format->char_per_block[plane];
+ u32 sample_x;
+ u32 sample_y;
+ u32 block_start_y;
+ u32 num_hblocks;
obj = drm_fb_cma_get_gem_obj(fb, plane);
if (!obj)
@@ -99,8 +108,13 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
v_div = fb->format->vsub;
}
- paddr += (fb->format->cpp[plane] * (state->src_x >> 16)) / h_div;
- paddr += (fb->pitches[plane] * (state->src_y >> 16)) / v_div;
+ sample_x = (state->src_x >> 16) / h_div;
+ sample_y = (state->src_y >> 16) / v_div;
+ block_start_y = (sample_y / block_h) * block_h;
+ num_hblocks = sample_x / block_w;
+
+ paddr += fb->pitches[plane] * block_start_y;
+ paddr += block_size * num_hblocks;
return paddr;
}
@@ -124,10 +138,7 @@ int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
/* dev->fb_helper will indirectly point to fbdev_cma after this call */
fbdev_cma = drm_fbdev_cma_init(dev, preferred_bpp, max_conn_count);
- if (IS_ERR(fbdev_cma))
- return PTR_ERR(fbdev_cma);
-
- return 0;
+ return PTR_ERR_OR_ZERO(fbdev_cma);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
@@ -226,21 +237,3 @@ void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
-
-/**
- * drm_fbdev_cma_set_suspend_unlocked - wrapper around
- * drm_fb_helper_set_suspend_unlocked
- * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
- * @state: desired state, zero to resume, non-zero to suspend
- *
- * Calls drm_fb_helper_set_suspend, which is a wrapper around
- * fb_set_suspend implemented by fbdev core.
- */
-void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
- bool state)
-{
- if (fbdev_cma)
- drm_fb_helper_set_suspend_unlocked(&fbdev_cma->fb_helper,
- state);
-}
-EXPORT_SYMBOL(drm_fbdev_cma_set_suspend_unlocked);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a502f3e519fd..5e9ca6f96379 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -219,6 +219,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
mutex_lock(&fb_helper->lock);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
ret = __drm_fb_helper_add_one_connector(fb_helper, connector);
if (ret)
goto fail;
@@ -1632,6 +1635,10 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
+ if ((drm_format_info_block_width(fb->format, 0) > 1) ||
+ (drm_format_info_block_height(fb->format, 0) > 1))
+ return -EINVAL;
+
/*
* Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested.
@@ -1949,6 +1956,8 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
{
struct drm_framebuffer *fb = fb_helper->fb;
+ WARN_ON((drm_format_info_block_width(fb->format, 0) > 1) ||
+ (drm_format_info_block_height(fb->format, 0) > 1));
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 90a1c846fc25..d90ee03a84c6 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -97,14 +97,14 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
/**
* drm_driver_legacy_fb_format - compute drm fourcc code from legacy description
+ * @dev: DRM device
* @bpp: bits per pixels
* @depth: bit depth per pixel
- * @native: use host native byte order
*
* Computes a drm fourcc pixel format code for the given @bpp/@depth values.
* Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config,
- * and depending on the quirk_addfb_prefer_host_byte_order flag it returns
- * little endian byte order or host byte order framebuffer formats.
+ * and depending on the &drm_mode_config.quirk_addfb_prefer_host_byte_order flag
+ * it returns little endian byte order or host byte order framebuffer formats.
*/
uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
uint32_t bpp, uint32_t depth)
@@ -224,7 +224,20 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1,
+ .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
+ .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_X0L0, .depth = 0, .num_planes = 1,
+ .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_Y0L2, .depth = 0, .num_planes = 1,
+ .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
+ .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_X0L2, .depth = 0, .num_planes = 1,
+ .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
};
unsigned int i;
@@ -400,3 +413,65 @@ int drm_format_plane_height(int height, uint32_t format, int plane)
return height / info->vsub;
}
EXPORT_SYMBOL(drm_format_plane_height);
+
+/**
+ * drm_format_info_block_width - width in pixels of block.
+ * @info: pixel format info
+ * @plane: plane index
+ *
+ * Returns:
+ * The width in pixels of a block, depending on the plane index.
+ */
+unsigned int drm_format_info_block_width(const struct drm_format_info *info,
+ int plane)
+{
+ if (!info || plane < 0 || plane >= info->num_planes)
+ return 0;
+
+ if (!info->block_w[plane])
+ return 1;
+ return info->block_w[plane];
+}
+EXPORT_SYMBOL(drm_format_info_block_width);
+
+/**
+ * drm_format_info_block_height - height in pixels of a block
+ * @info: pixel format info
+ * @plane: plane index
+ *
+ * Returns:
+ * The height in pixels of a block, depending on the plane index.
+ */
+unsigned int drm_format_info_block_height(const struct drm_format_info *info,
+ int plane)
+{
+ if (!info || plane < 0 || plane >= info->num_planes)
+ return 0;
+
+ if (!info->block_h[plane])
+ return 1;
+ return info->block_h[plane];
+}
+EXPORT_SYMBOL(drm_format_info_block_height);
+
+/**
+ * drm_format_info_min_pitch - computes the minimum required pitch in bytes
+ * @info: pixel format info
+ * @plane: plane index
+ * @buffer_width: buffer width in pixels
+ *
+ * Returns:
+ * The minimum required pitch in bytes for a buffer by taking into consideration
+ * the pixel format information and the buffer width.
+ */
+uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
+ int plane, unsigned int buffer_width)
+{
+ if (!info || plane < 0 || plane >= info->num_planes)
+ return 0;
+
+ return DIV_ROUND_UP_ULL((u64)buffer_width * info->char_per_block[plane],
+ drm_format_info_block_width(info, plane) *
+ drm_format_info_block_height(info, plane));
+}
+EXPORT_SYMBOL(drm_format_info_min_pitch);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 3bf729d0aae5..fcaea8f50513 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -195,20 +195,26 @@ static int framebuffer_check(struct drm_device *dev,
for (i = 0; i < info->num_planes; i++) {
unsigned int width = fb_plane_width(r->width, info, i);
unsigned int height = fb_plane_height(r->height, info, i);
- unsigned int cpp = info->cpp[i];
+ unsigned int block_size = info->char_per_block[i];
+ u64 min_pitch = drm_format_info_min_pitch(info, i, width);
+
+ if (!block_size && (r->modifier[i] == DRM_FORMAT_MOD_LINEAR)) {
+ DRM_DEBUG_KMS("Format requires non-linear modifier for plane %d\n", i);
+ return -EINVAL;
+ }
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
return -EINVAL;
}
- if ((uint64_t) width * cpp > UINT_MAX)
+ if (min_pitch > UINT_MAX)
return -ERANGE;
if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
return -ERANGE;
- if (r->pitches[i] < width * cpp) {
+ if (block_size && r->pitches[i] < min_pitch) {
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
@@ -317,6 +323,7 @@ drm_internal_framebuffer_create(struct drm_device *dev,
return fb;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_internal_framebuffer_create);
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 512078ebd97b..8b55ece97967 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -257,7 +257,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
- if (dev->driver->gem_close_object)
+ if (obj->funcs && obj->funcs->close)
+ obj->funcs->close(obj, file_priv);
+ else if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
if (drm_core_check_feature(dev, DRIVER_PRIME))
@@ -410,7 +412,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
if (ret)
goto err_remove;
- if (dev->driver->gem_open_object) {
+ if (obj->funcs && obj->funcs->open) {
+ ret = obj->funcs->open(obj, file_priv);
+ if (ret)
+ goto err_revoke;
+ } else if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
if (ret)
goto err_revoke;
@@ -835,7 +841,9 @@ drm_gem_object_free(struct kref *kref)
container_of(kref, struct drm_gem_object, refcount);
struct drm_device *dev = obj->dev;
- if (dev->driver->gem_free_object_unlocked) {
+ if (obj->funcs) {
+ obj->funcs->free(obj);
+ } else if (dev->driver->gem_free_object_unlocked) {
dev->driver->gem_free_object_unlocked(obj);
} else if (dev->driver->gem_free_object) {
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -864,13 +872,13 @@ drm_gem_object_put_unlocked(struct drm_gem_object *obj)
dev = obj->dev;
- if (dev->driver->gem_free_object_unlocked) {
- kref_put(&obj->refcount, drm_gem_object_free);
- } else {
+ if (dev->driver->gem_free_object) {
might_lock(&dev->struct_mutex);
if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
&dev->struct_mutex))
mutex_unlock(&dev->struct_mutex);
+ } else {
+ kref_put(&obj->refcount, drm_gem_object_free);
}
}
EXPORT_SYMBOL(drm_gem_object_put_unlocked);
@@ -960,11 +968,14 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
- if (!dev->driver->gem_vm_ops)
+ if (obj->funcs && obj->funcs->vm_ops)
+ vma->vm_ops = obj->funcs->vm_ops;
+ else if (dev->driver->gem_vm_ops)
+ vma->vm_ops = dev->driver->gem_vm_ops;
+ else
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_ops = dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
@@ -1066,6 +1077,86 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
drm_printf_indent(p, indent, "imported=%s\n",
obj->import_attach ? "yes" : "no");
- if (obj->dev->driver->gem_print_info)
+ if (obj->funcs && obj->funcs->print_info)
+ obj->funcs->print_info(p, indent, obj);
+ else if (obj->dev->driver->gem_print_info)
obj->dev->driver->gem_print_info(p, indent, obj);
}
+
+/**
+ * drm_gem_pin - Pin backing buffer in memory
+ * @obj: GEM object
+ *
+ * Make sure the backing buffer is pinned in memory.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_pin(struct drm_gem_object *obj)
+{
+ if (obj->funcs && obj->funcs->pin)
+ return obj->funcs->pin(obj);
+ else if (obj->dev->driver->gem_prime_pin)
+ return obj->dev->driver->gem_prime_pin(obj);
+ else
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_pin);
+
+/**
+ * drm_gem_unpin - Unpin backing buffer from memory
+ * @obj: GEM object
+ *
+ * Relax the requirement that the backing buffer is pinned in memory.
+ */
+void drm_gem_unpin(struct drm_gem_object *obj)
+{
+ if (obj->funcs && obj->funcs->unpin)
+ obj->funcs->unpin(obj);
+ else if (obj->dev->driver->gem_prime_unpin)
+ obj->dev->driver->gem_prime_unpin(obj);
+}
+EXPORT_SYMBOL(drm_gem_unpin);
+
+/**
+ * drm_gem_vmap - Map buffer into kernel virtual address space
+ * @obj: GEM object
+ *
+ * Returns:
+ * A virtual pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+void *drm_gem_vmap(struct drm_gem_object *obj)
+{
+ void *vaddr;
+
+ if (obj->funcs && obj->funcs->vmap)
+ vaddr = obj->funcs->vmap(obj);
+ else if (obj->dev->driver->gem_prime_vmap)
+ vaddr = obj->dev->driver->gem_prime_vmap(obj);
+ else
+ vaddr = ERR_PTR(-EOPNOTSUPP);
+
+ if (!vaddr)
+ vaddr = ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+EXPORT_SYMBOL(drm_gem_vmap);
+
+/**
+ * drm_gem_vunmap - Remove buffer mapping from kernel virtual address space
+ * @obj: GEM object
+ * @vaddr: Virtual address (can be NULL)
+ */
+void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ if (!vaddr)
+ return;
+
+ if (obj->funcs && obj->funcs->vunmap)
+ obj->funcs->vunmap(obj, vaddr);
+ else if (obj->dev->driver->gem_prime_vunmap)
+ obj->dev->driver->gem_prime_vunmap(obj, vaddr);
+}
+EXPORT_SYMBOL(drm_gem_vunmap);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 1d2ced882b66..cc26625b4b33 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -176,6 +176,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
*
* This function frees the backing memory of the CMA GEM object, cleans up the
* GEM object state and frees the memory used to store the object itself.
+ * If the buffer is imported and the virtual address is set, it is released.
* Drivers using the CMA helpers should set this as their
* &drm_driver.gem_free_object_unlocked callback.
*/
@@ -189,6 +190,8 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
cma_obj->vaddr, cma_obj->paddr);
} else if (gem_obj->import_attach) {
+ if (cma_obj->vaddr)
+ dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
}
@@ -575,3 +578,86 @@ void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
/* Nothing to do */
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
+
+static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = {
+ .free = drm_gem_cma_free_object,
+ .print_info = drm_gem_cma_print_info,
+ .get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .vmap = drm_gem_cma_prime_vmap,
+ .vm_ops = &drm_gem_cma_vm_ops,
+};
+
+/**
+ * drm_cma_gem_create_object_default_funcs - Create a CMA GEM object with a
+ * default function table
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ *
+ * This sets the GEM object functions to the default CMA helper functions.
+ * This function can be used as the &drm_driver.gem_create_object callback.
+ *
+ * Returns:
+ * A pointer to a allocated GEM object or an error pointer on failure.
+ */
+struct drm_gem_object *
+drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_cma_object *cma_obj;
+
+ cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+ if (!cma_obj)
+ return NULL;
+
+ cma_obj->base.funcs = &drm_cma_gem_default_funcs;
+
+ return &cma_obj->base;
+}
+EXPORT_SYMBOL(drm_cma_gem_create_object_default_funcs);
+
+/**
+ * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
+ * scatter/gather table and get the virtual address of the buffer
+ * @dev: DRM device
+ * @attach: DMA-BUF attachment
+ * @sgt: Scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table using
+ * drm_gem_cma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
+ * virtual address. This ensures that a CMA GEM object always has its virtual
+ * address set. This address is released when the object is freed.
+ *
+ * This function can be used as the &drm_driver.gem_prime_import_sg_table
+ * callback. The DRM_GEM_CMA_VMAP_DRIVER_OPS() macro provides a shortcut to set
+ * the necessary DRM driver operations.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_object *
+drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *obj;
+ void *vaddr;
+
+ vaddr = dma_buf_vmap(attach->dmabuf);
+ if (!vaddr) {
+ DRM_ERROR("Failed to vmap PRIME buffer\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj)) {
+ dma_buf_vunmap(attach->dmabuf, vaddr);
+ return obj;
+ }
+
+ cma_obj = to_drm_gem_cma_obj(obj);
+ cma_obj->vaddr = vaddr;
+
+ return obj;
+}
+EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index ded7a379ac35..acb466d25afc 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -171,7 +171,7 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
}
min_size = (height - 1) * mode_cmd->pitches[i]
- + width * info->cpp[i]
+ + drm_format_info_min_pitch(info, i, width)
+ mode_cmd->offsets[i];
if (objs[i]->size < min_size) {
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
deleted file mode 100644
index 5799e2782dd1..000000000000
--- a/drivers/gpu/drm/drm_global.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <drm/drm_global.h>
-
-struct drm_global_item {
- struct mutex mutex;
- void *object;
- int refcount;
-};
-
-static struct drm_global_item glob[DRM_GLOBAL_NUM];
-
-void drm_global_init(void)
-{
- int i;
-
- for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
- struct drm_global_item *item = &glob[i];
- mutex_init(&item->mutex);
- item->object = NULL;
- item->refcount = 0;
- }
-}
-
-void drm_global_release(void)
-{
- int i;
- for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
- struct drm_global_item *item = &glob[i];
- BUG_ON(item->object != NULL);
- BUG_ON(item->refcount != 0);
- }
-}
-
-/**
- * drm_global_item_ref - Initialize and acquire reference to memory
- * object
- * @ref: Object for initialization
- *
- * This initializes a memory object, allocating memory and calling the
- * .init() hook. Further calls will increase the reference count for
- * that item.
- *
- * Returns:
- * Zero on success, non-zero otherwise.
- */
-int drm_global_item_ref(struct drm_global_reference *ref)
-{
- int ret = 0;
- struct drm_global_item *item = &glob[ref->global_type];
-
- mutex_lock(&item->mutex);
- if (item->refcount == 0) {
- ref->object = kzalloc(ref->size, GFP_KERNEL);
- if (unlikely(ref->object == NULL)) {
- ret = -ENOMEM;
- goto error_unlock;
- }
- ret = ref->init(ref);
- if (unlikely(ret != 0))
- goto error_free;
-
- item->object = ref->object;
- } else {
- ref->object = item->object;
- }
-
- ++item->refcount;
- mutex_unlock(&item->mutex);
- return 0;
-
-error_free:
- kfree(ref->object);
- ref->object = NULL;
-error_unlock:
- mutex_unlock(&item->mutex);
- return ret;
-}
-EXPORT_SYMBOL(drm_global_item_ref);
-
-/**
- * drm_global_item_unref - Drop reference to memory
- * object
- * @ref: Object being removed
- *
- * Drop a reference to the memory object and eventually call the
- * release() hook. The allocated object should be dropped in the
- * release() hook or before calling this function
- *
- */
-
-void drm_global_item_unref(struct drm_global_reference *ref)
-{
- struct drm_global_item *item = &glob[ref->global_type];
-
- mutex_lock(&item->mutex);
- BUG_ON(item->refcount == 0);
- BUG_ON(ref->object != item->object);
- if (--item->refcount == 0) {
- ref->release(ref);
- item->object = NULL;
- }
- mutex_unlock(&item->mutex);
-}
-EXPORT_SYMBOL(drm_global_item_unref);
-
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
deleted file mode 100644
index 6b68e9088436..000000000000
--- a/drivers/gpu/drm/drm_info.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * \file drm_info.c
- * DRM info file implementations
- *
- * \author Ben Gamari <bgamari@gmail.com>
- */
-
-/*
- * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2008 Ben Gamari <bgamari@gmail.com>
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include <drm/drmP.h>
-#include <drm/drm_gem.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-/**
- * Called when "/proc/dri/.../name" is read.
- *
- * Prints the device name together with the bus id if available.
- */
-int drm_name_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_minor *minor = node->minor;
- struct drm_device *dev = minor->dev;
- struct drm_master *master;
-
- mutex_lock(&dev->master_mutex);
- master = dev->master;
- seq_printf(m, "%s", dev->driver->name);
- if (dev->dev)
- seq_printf(m, " dev=%s", dev_name(dev->dev));
- if (master && master->unique)
- seq_printf(m, " master=%s", master->unique);
- if (dev->unique)
- seq_printf(m, " unique=%s", dev->unique);
- seq_printf(m, "\n");
- mutex_unlock(&dev->master_mutex);
-
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../clients" is read.
- *
- */
-int drm_clients_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_file *priv;
- kuid_t uid;
-
- seq_printf(m,
- "%20s %5s %3s master a %5s %10s\n",
- "command",
- "pid",
- "dev",
- "uid",
- "magic");
-
- /* dev->filelist is sorted youngest first, but we want to present
- * oldest first (i.e. kernel, servers, clients), so walk backwardss.
- */
- mutex_lock(&dev->filelist_mutex);
- list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
- struct task_struct *task;
-
- rcu_read_lock(); /* locks pid_task()->comm */
- task = pid_task(priv->pid, PIDTYPE_PID);
- uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
- seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
- task ? task->comm : "<unknown>",
- pid_vnr(priv->pid),
- priv->minor->index,
- drm_is_current_master(priv) ? 'y' : 'n',
- priv->authenticated ? 'y' : 'n',
- from_kuid_munged(seq_user_ns(m), uid),
- priv->magic);
- rcu_read_unlock();
- }
- mutex_unlock(&dev->filelist_mutex);
- return 0;
-}
-
-static int drm_gem_one_name_info(int id, void *ptr, void *data)
-{
- struct drm_gem_object *obj = ptr;
- struct seq_file *m = data;
-
- seq_printf(m, "%6d %8zd %7d %8d\n",
- obj->name, obj->size,
- obj->handle_count,
- kref_read(&obj->refcount));
- return 0;
-}
-
-int drm_gem_name_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
-
- seq_printf(m, " name size handles refcount\n");
-
- mutex_lock(&dev->object_name_lock);
- idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
- mutex_unlock(&dev->object_name_lock);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 0c4eb4a9ab31..c7a7d7ce5d1c 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -56,11 +56,6 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
void drm_minor_release(struct drm_minor *minor);
-/* drm_info.c */
-int drm_name_info(struct seq_file *m, void *data);
-int drm_clients_info(struct seq_file *m, void* data);
-int drm_gem_name_info(struct seq_file *m, void *data);
-
/* drm_vblank.c */
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
void drm_vblank_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 24a177ea5417..3650d3c46718 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -39,7 +39,6 @@ struct drm_master *drm_lease_owner(struct drm_master *master)
master = master->lessor;
return master;
}
-EXPORT_SYMBOL(drm_lease_owner);
/**
* _drm_find_lessee - find lessee by id (idr_mutex held)
@@ -117,7 +116,6 @@ bool _drm_lease_held(struct drm_file *file_priv, int id)
return _drm_lease_held_master(file_priv->master, id);
}
-EXPORT_SYMBOL(_drm_lease_held);
/**
* drm_lease_held - check drm_mode_object lease status (idr_mutex not held)
@@ -144,7 +142,6 @@ bool drm_lease_held(struct drm_file *file_priv, int id)
mutex_unlock(&master->dev->mode_config.idr_mutex);
return ret;
}
-EXPORT_SYMBOL(drm_lease_held);
/**
* drm_lease_filter_crtcs - restricted crtc set to leased values (idr_mutex not held)
@@ -184,7 +181,6 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
mutex_unlock(&master->dev->mode_config.idr_mutex);
return crtcs_out;
}
-EXPORT_SYMBOL(drm_lease_filter_crtcs);
/*
* drm_lease_create - create a new drm_master with leased objects (idr_mutex not held)
@@ -195,7 +191,7 @@ EXPORT_SYMBOL(drm_lease_filter_crtcs);
* make sure all of the desired objects can be leased, atomically
* leasing them to the new drmmaster.
*
- * ERR_PTR(-EACCESS) some other master holds the title to any object
+ * ERR_PTR(-EACCES) some other master holds the title to any object
* ERR_PTR(-ENOENT) some object is not a valid DRM object for this device
* ERR_PTR(-EBUSY) some other lessee holds title to this object
* ERR_PTR(-EEXIST) same object specified more than once in the provided list
@@ -357,9 +353,9 @@ void drm_lease_revoke(struct drm_master *top)
}
static int validate_lease(struct drm_device *dev,
- struct drm_file *lessor_priv,
int object_count,
- struct drm_mode_object **objects)
+ struct drm_mode_object **objects,
+ bool universal_planes)
{
int o;
int has_crtc = -1;
@@ -376,14 +372,14 @@ static int validate_lease(struct drm_device *dev,
if (objects[o]->type == DRM_MODE_OBJECT_CONNECTOR && has_connector == -1)
has_connector = o;
- if (lessor_priv->universal_planes) {
+ if (universal_planes) {
if (objects[o]->type == DRM_MODE_OBJECT_PLANE && has_plane == -1)
has_plane = o;
}
}
if (has_crtc == -1 || has_connector == -1)
return -EINVAL;
- if (lessor_priv->universal_planes && has_plane == -1)
+ if (universal_planes && has_plane == -1)
return -EINVAL;
return 0;
}
@@ -397,6 +393,8 @@ static int fill_object_idr(struct drm_device *dev,
struct drm_mode_object **objects;
u32 o;
int ret;
+ bool universal_planes = READ_ONCE(lessor_priv->universal_planes);
+
objects = kcalloc(object_count, sizeof(struct drm_mode_object *),
GFP_KERNEL);
if (!objects)
@@ -419,14 +417,17 @@ static int fill_object_idr(struct drm_device *dev,
}
if (!drm_mode_object_lease_required(objects[o]->type)) {
+ DRM_DEBUG_KMS("invalid object for lease\n");
ret = -EINVAL;
goto out_free_objects;
}
}
- ret = validate_lease(dev, lessor_priv, object_count, objects);
- if (ret)
+ ret = validate_lease(dev, object_count, objects, universal_planes);
+ if (ret) {
+ DRM_DEBUG_LEASE("lease validation failed\n");
goto out_free_objects;
+ }
/* add their IDs to the lease request - taking into account
universal planes */
@@ -449,7 +450,7 @@ static int fill_object_idr(struct drm_device *dev,
object_id, ret);
goto out_free_objects;
}
- if (obj->type == DRM_MODE_OBJECT_CRTC && !lessor_priv->universal_planes) {
+ if (obj->type == DRM_MODE_OBJECT_CRTC && !universal_planes) {
struct drm_crtc *crtc = obj_to_crtc(obj);
ret = idr_alloc(leases, &drm_lease_idr_object, crtc->primary->base.id, crtc->primary->base.id + 1, GFP_KERNEL);
if (ret < 0) {
@@ -509,15 +510,21 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
/* Do not allow sub-leases */
- if (lessor->lessor)
+ if (lessor->lessor) {
+ DRM_DEBUG_LEASE("recursive leasing not allowed\n");
return -EINVAL;
+ }
/* need some objects */
- if (cl->object_count == 0)
+ if (cl->object_count == 0) {
+ DRM_DEBUG_LEASE("no objects in lease\n");
return -EINVAL;
+ }
- if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK)))
+ if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK))) {
+ DRM_DEBUG_LEASE("invalid flags\n");
return -EINVAL;
+ }
object_count = cl->object_count;
@@ -532,6 +539,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
object_count, object_ids);
kfree(object_ids);
if (ret) {
+ DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
idr_destroy(&leases);
return ret;
}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index d69e4fc1ee77..40c4349cb939 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -51,7 +51,7 @@
#endif
static void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device * dev)
+ struct drm_device *dev)
{
unsigned long i, num_pages =
PAGE_ALIGN(size) / PAGE_SIZE;
@@ -94,26 +94,26 @@ static void *agp_remap(unsigned long offset, unsigned long size,
}
/** Wrapper around agp_free_memory() */
-void drm_free_agp(struct agp_memory * handle, int pages)
+void drm_free_agp(struct agp_memory *handle, int pages)
{
agp_free_memory(handle);
}
/** Wrapper around agp_bind_memory() */
-int drm_bind_agp(struct agp_memory * handle, unsigned int start)
+int drm_bind_agp(struct agp_memory *handle, unsigned int start)
{
return agp_bind_memory(handle, start);
}
/** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(struct agp_memory * handle)
+int drm_unbind_agp(struct agp_memory *handle)
{
return agp_unbind_memory(handle);
}
#else /* CONFIG_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device * dev)
+ struct drm_device *dev)
{
return NULL;
}
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index ee80788f2c40..703bfce975bb 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -297,6 +297,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.prop_crtc_id = prop;
+ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "FB_DAMAGE_CLIPS",
+ 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_fb_damage_clips = prop;
+
prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
"ACTIVE");
if (!prop)
@@ -310,6 +316,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.prop_mode_id = prop;
+ prop = drm_property_create_bool(dev, 0,
+ "VRR_ENABLED");
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_vrr_enabled = prop;
+
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB,
"DEGAMMA_LUT", 0);
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index be8b754eaf60..cd9bc0ce9be0 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -38,7 +38,8 @@ int __drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj,
int ret;
mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
+ ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL,
+ 1, 0, GFP_KERNEL);
if (ret >= 0) {
/*
* Set up the object linking under the protection of the idr
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 02db9ac82d7a..24a750436559 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -716,8 +716,8 @@ int of_get_drm_display_mode(struct device_node *np,
if (bus_flags)
drm_bus_flags_from_videomode(&vm, bus_flags);
- pr_debug("%pOF: got %dx%d display mode from %s\n",
- np, vm.hactive, vm.vactive, np->name);
+ pr_debug("%pOF: got %dx%d display mode\n",
+ np, vm.hactive, vm.vactive);
drm_mode_debug_printmodeline(dmode);
return 0;
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index f1c24ab0ef09..9150fa385bba 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -146,6 +146,21 @@ static struct drm_plane *create_primary_plane(struct drm_device *dev)
* Initialize a CRTC object with a default helper-provided primary plane and no
* cursor plane.
*
+ * Note that we make some assumptions about hardware limitations that may not be
+ * true for all hardware:
+ *
+ * 1. Primary plane cannot be repositioned.
+ * 2. Primary plane cannot be scaled.
+ * 3. Primary plane must cover the entire CRTC.
+ * 4. Subpixel positioning is not supported.
+ * 5. The primary plane must always be on if the CRTC is enabled.
+ *
+ * This is purely a backwards compatibility helper for old drivers. Drivers
+ * should instead implement their own primary plane. Atomic drivers must do so.
+ * Drivers with the above hardware restriction can look into using &struct
+ * drm_simple_display_pipe, which encapsulates the above limitations into a nice
+ * interface.
+ *
* Returns:
* Zero on success, error code on failure.
*/
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 8a5100685875..51f534db9107 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -56,6 +56,10 @@
* drm_modeset_drop_locks(ctx);
* drm_modeset_acquire_fini(ctx);
*
+ * For convenience this control flow is implemented in
+ * DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() for the case
+ * where all modeset locks need to be taken through drm_modeset_lock_all_ctx().
+ *
* If all that is needed is a single modeset lock, then the &struct
* drm_modeset_acquire_ctx is not needed and the locking can be simplified
* by passing a NULL instead of ctx in the drm_modeset_lock() call or
@@ -383,6 +387,8 @@ EXPORT_SYMBOL(drm_modeset_unlock);
* Locks acquired with this function should be released by calling the
* drm_modeset_drop_locks() function on @ctx.
*
+ * See also: DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END()
+ *
* Returns: 0 on success or a negative error-code on failure.
*/
int drm_modeset_lock_all_ctx(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index ee4a5e1221f1..52e445bb1aa5 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -59,6 +59,14 @@ static const struct drm_dmi_panel_orientation_data gpd_win = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data gpd_win2 = {
+ .width = 720,
+ .height = 1280,
+ .bios_dates = (const char * const []){
+ "12/07/2017", "05/24/2018", "06/29/2018", NULL },
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
.width = 800,
.height = 1280,
@@ -106,6 +114,14 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
},
.driver_data = (void *)&gpd_win,
+ }, { /* GPD Win 2 (too generic strings, also match on bios date) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+ },
+ .driver_data = (void *)&gpd_win2,
}, { /* I.T.Works TW891 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 48f615d38931..a9d9df6c85ad 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -61,15 +61,14 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
return NULL;
dmah->size = size;
- dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
+ dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr,
+ GFP_KERNEL | __GFP_COMP);
if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}
- memset(dmah->vaddr, 0, size);
-
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Reserve */
for (addr = (unsigned long)dmah->vaddr, sz = size;
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 1fa98bd12003..5f650d8fc66b 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -636,6 +636,29 @@ static int __setplane_check(struct drm_plane *plane,
return 0;
}
+/**
+ * drm_any_plane_has_format - Check whether any plane supports this format and modifier combination
+ * @dev: DRM device
+ * @format: pixel format (DRM_FORMAT_*)
+ * @modifier: data layout modifier
+ *
+ * Returns:
+ * Whether at least one plane supports the specified format and modifier combination.
+ */
+bool drm_any_plane_has_format(struct drm_device *dev,
+ u32 format, u64 modifier)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, dev) {
+ if (drm_plane_check_pixel_format(plane, format, modifier) == 0)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_any_plane_has_format);
+
/*
* __setplane_internal - setplane handler for internal callers
*
@@ -744,11 +767,8 @@ static int setplane_internal(struct drm_plane *plane,
struct drm_modeset_acquire_ctx ctx;
int ret;
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-retry:
- ret = drm_modeset_lock_all_ctx(plane->dev, &ctx);
- if (ret)
- goto fail;
+ DRM_MODESET_LOCK_ALL_BEGIN(plane->dev, ctx,
+ DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
if (drm_drv_uses_atomic_modeset(plane->dev))
ret = __setplane_atomic(plane, crtc, fb,
@@ -759,14 +779,7 @@ retry:
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, &ctx);
-fail:
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index a393756b664e..0fff72dcd06d 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -42,11 +42,8 @@
* primary plane support on top of the normal CRTC configuration interface.
* Since the legacy &drm_mode_config_funcs.set_config interface ties the primary
* plane together with the CRTC state this does not allow userspace to disable
- * the primary plane itself. To avoid too much duplicated code use
- * drm_plane_helper_check_update() which can be used to enforce the same
- * restrictions as primary planes had thus. The default primary plane only
- * expose XRBG8888 and ARGB8888 as valid pixel formats for the attached
- * framebuffer.
+ * the primary plane itself. The default primary plane only expose XRBG8888 and
+ * ARGB8888 as valid pixel formats for the attached framebuffer.
*
* Drivers are highly recommended to implement proper support for primary
* planes, and newly merged drivers must not rely upon these transitional
@@ -100,43 +97,17 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
return count;
}
-/**
- * drm_plane_helper_check_update() - Check plane update for validity
- * @plane: plane object to update
- * @crtc: owning CRTC of owning plane
- * @fb: framebuffer to flip onto plane
- * @src: source coordinates in 16.16 fixed point
- * @dst: integer destination coordinates
- * @rotation: plane rotation
- * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
- * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
- * @can_position: is it legal to position the plane such that it
- * doesn't cover the entire crtc? This will generally
- * only be false for primary planes.
- * @can_update_disabled: can the plane be updated while the crtc
- * is disabled?
- * @visible: output parameter indicating whether plane is still visible after
- * clipping
- *
- * Checks that a desired plane update is valid. Drivers that provide
- * their own plane handling rather than helper-provided implementations may
- * still wish to call this function to avoid duplication of error checking
- * code.
- *
- * RETURNS:
- * Zero if update appears valid, error code on failure
- */
-int drm_plane_helper_check_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_rect *src,
- struct drm_rect *dst,
- unsigned int rotation,
- int min_scale,
- int max_scale,
- bool can_position,
- bool can_update_disabled,
- bool *visible)
+static int drm_plane_helper_check_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_rect *src,
+ struct drm_rect *dst,
+ unsigned int rotation,
+ int min_scale,
+ int max_scale,
+ bool can_position,
+ bool can_update_disabled,
+ bool *visible)
{
struct drm_plane_state plane_state = {
.plane = plane,
@@ -173,52 +144,14 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
return 0;
}
-EXPORT_SYMBOL(drm_plane_helper_check_update);
-/**
- * drm_primary_helper_update() - Helper for primary plane update
- * @plane: plane object to update
- * @crtc: owning CRTC of owning plane
- * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of primary plane on crtc
- * @crtc_y: y offset of primary plane on crtc
- * @crtc_w: width of primary plane rectangle on crtc
- * @crtc_h: height of primary plane rectangle on crtc
- * @src_x: x offset of @fb for panning
- * @src_y: y offset of @fb for panning
- * @src_w: width of source rectangle in @fb
- * @src_h: height of source rectangle in @fb
- * @ctx: lock acquire context, not used here
- *
- * Provides a default plane update handler for primary planes. This is handler
- * is called in response to a userspace SetPlane operation on the plane with a
- * non-NULL framebuffer. We call the driver's modeset handler to update the
- * framebuffer.
- *
- * SetPlane() on a primary plane of a disabled CRTC is not supported, and will
- * return an error.
- *
- * Note that we make some assumptions about hardware limitations that may not be
- * true for all hardware --
- *
- * 1. Primary plane cannot be repositioned.
- * 2. Primary plane cannot be scaled.
- * 3. Primary plane must cover the entire CRTC.
- * 4. Subpixel positioning is not supported.
- *
- * Drivers for hardware that don't have these restrictions can provide their
- * own implementation rather than using this helper.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
+static int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_mode_set set = {
.crtc = crtc,
@@ -285,35 +218,12 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
kfree(connector_list);
return ret;
}
-EXPORT_SYMBOL(drm_primary_helper_update);
-/**
- * drm_primary_helper_disable() - Helper for primary plane disable
- * @plane: plane to disable
- * @ctx: lock acquire context, not used here
- *
- * Provides a default plane disable handler for primary planes. This is handler
- * is called in response to a userspace SetPlane operation on the plane with a
- * NULL framebuffer parameter. It unconditionally fails the disable call with
- * -EINVAL the only way to disable the primary plane without driver support is
- * to disable the entire CRTC. Which does not match the plane
- * &drm_plane_funcs.disable_plane hook.
- *
- * Note that some hardware may be able to disable the primary plane without
- * disabling the whole CRTC. Drivers for such hardware should provide their
- * own disable handler that disables just the primary plane (and they'll likely
- * need to provide their own update handler as well to properly re-enable a
- * disabled primary plane).
- *
- * RETURNS:
- * Unconditionally returns -EINVAL.
- */
-int drm_primary_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx)
+static int drm_primary_helper_disable(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx)
{
return -EINVAL;
}
-EXPORT_SYMBOL(drm_primary_helper_disable);
/**
* drm_primary_helper_destroy() - Helper for primary plane destruction
@@ -336,200 +246,3 @@ const struct drm_plane_funcs drm_primary_helper_funcs = {
.destroy = drm_primary_helper_destroy,
};
EXPORT_SYMBOL(drm_primary_helper_funcs);
-
-int drm_plane_helper_commit(struct drm_plane *plane,
- struct drm_plane_state *plane_state,
- struct drm_framebuffer *old_fb)
-{
- const struct drm_plane_helper_funcs *plane_funcs;
- struct drm_crtc *crtc[2];
- const struct drm_crtc_helper_funcs *crtc_funcs[2];
- int i, ret = 0;
-
- plane_funcs = plane->helper_private;
-
- /* Since this is a transitional helper we can't assume that plane->state
- * is always valid. Hence we need to use plane->crtc instead of
- * plane->state->crtc as the old crtc. */
- crtc[0] = plane->crtc;
- crtc[1] = crtc[0] != plane_state->crtc ? plane_state->crtc : NULL;
-
- for (i = 0; i < 2; i++)
- crtc_funcs[i] = crtc[i] ? crtc[i]->helper_private : NULL;
-
- if (plane_funcs->atomic_check) {
- ret = plane_funcs->atomic_check(plane, plane_state);
- if (ret)
- goto out;
- }
-
- if (plane_funcs->prepare_fb && plane_state->fb != old_fb) {
- ret = plane_funcs->prepare_fb(plane,
- plane_state);
- if (ret)
- goto out;
- }
-
- /* Point of no return, commit sw state. */
- swap(plane->state, plane_state);
-
- for (i = 0; i < 2; i++) {
- if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
- crtc_funcs[i]->atomic_begin(crtc[i], crtc[i]->state);
- }
-
- /*
- * Drivers may optionally implement the ->atomic_disable callback, so
- * special-case that here.
- */
- if (drm_atomic_plane_disabling(plane_state, plane->state) &&
- plane_funcs->atomic_disable)
- plane_funcs->atomic_disable(plane, plane_state);
- else
- plane_funcs->atomic_update(plane, plane_state);
-
- for (i = 0; i < 2; i++) {
- if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
- crtc_funcs[i]->atomic_flush(crtc[i], crtc[i]->state);
- }
-
- /*
- * If we only moved the plane and didn't change fb's, there's no need to
- * wait for vblank.
- */
- if (plane->state->fb == old_fb)
- goto out;
-
- for (i = 0; i < 2; i++) {
- if (!crtc[i])
- continue;
-
- if (crtc[i]->cursor == plane)
- continue;
-
- /* There's no other way to figure out whether the crtc is running. */
- ret = drm_crtc_vblank_get(crtc[i]);
- if (ret == 0) {
- drm_crtc_wait_one_vblank(crtc[i]);
- drm_crtc_vblank_put(crtc[i]);
- }
-
- ret = 0;
- }
-
- if (plane_funcs->cleanup_fb)
- plane_funcs->cleanup_fb(plane, plane_state);
-out:
- if (plane->funcs->atomic_destroy_state)
- plane->funcs->atomic_destroy_state(plane, plane_state);
- else
- drm_atomic_helper_plane_destroy_state(plane, plane_state);
-
- return ret;
-}
-
-/**
- * drm_plane_helper_update() - Transitional helper for plane update
- * @plane: plane object to update
- * @crtc: owning CRTC of owning plane
- * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of primary plane on crtc
- * @crtc_y: y offset of primary plane on crtc
- * @crtc_w: width of primary plane rectangle on crtc
- * @crtc_h: height of primary plane rectangle on crtc
- * @src_x: x offset of @fb for panning
- * @src_y: y offset of @fb for panning
- * @src_w: width of source rectangle in @fb
- * @src_h: height of source rectangle in @fb
- * @ctx: lock acquire context, not used here
- *
- * Provides a default plane update handler using the atomic plane update
- * functions. It is fully left to the driver to check plane constraints and
- * handle corner-cases like a fully occluded or otherwise invisible plane.
- *
- * This is useful for piecewise transitioning of a driver to the atomic helpers.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_plane_state *plane_state;
-
- if (plane->funcs->atomic_duplicate_state)
- plane_state = plane->funcs->atomic_duplicate_state(plane);
- else {
- if (!plane->state)
- drm_atomic_helper_plane_reset(plane);
-
- plane_state = drm_atomic_helper_plane_duplicate_state(plane);
- }
- if (!plane_state)
- return -ENOMEM;
- plane_state->plane = plane;
-
- plane_state->crtc = crtc;
- drm_atomic_set_fb_for_plane(plane_state, fb);
- plane_state->crtc_x = crtc_x;
- plane_state->crtc_y = crtc_y;
- plane_state->crtc_h = crtc_h;
- plane_state->crtc_w = crtc_w;
- plane_state->src_x = src_x;
- plane_state->src_y = src_y;
- plane_state->src_h = src_h;
- plane_state->src_w = src_w;
-
- return drm_plane_helper_commit(plane, plane_state, plane->fb);
-}
-EXPORT_SYMBOL(drm_plane_helper_update);
-
-/**
- * drm_plane_helper_disable() - Transitional helper for plane disable
- * @plane: plane to disable
- * @ctx: lock acquire context, not used here
- *
- * Provides a default plane disable handler using the atomic plane update
- * functions. It is fully left to the driver to check plane constraints and
- * handle corner-cases like a fully occluded or otherwise invisible plane.
- *
- * This is useful for piecewise transitioning of a driver to the atomic helpers.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int drm_plane_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_plane_state *plane_state;
- struct drm_framebuffer *old_fb;
-
- /* crtc helpers love to call disable functions for already disabled hw
- * functions. So cope with that. */
- if (!plane->crtc)
- return 0;
-
- if (plane->funcs->atomic_duplicate_state)
- plane_state = plane->funcs->atomic_duplicate_state(plane);
- else {
- if (!plane->state)
- drm_atomic_helper_plane_reset(plane);
-
- plane_state = drm_atomic_helper_plane_duplicate_state(plane);
- }
- if (!plane_state)
- return -ENOMEM;
- plane_state->plane = plane;
-
- plane_state->crtc = NULL;
- old_fb = plane_state->fb;
- drm_atomic_set_fb_for_plane(plane_state, NULL);
-
- return drm_plane_helper_commit(plane, plane_state, old_fb);
-}
-EXPORT_SYMBOL(drm_plane_helper_disable);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 3f0205fc0a1a..231e3f6d5f41 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -199,7 +199,6 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
{
struct drm_prime_attachment *prime_attach;
struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
if (!prime_attach)
@@ -208,10 +207,7 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
prime_attach->dir = DMA_NONE;
attach->priv = prime_attach;
- if (!dev->driver->gem_prime_pin)
- return 0;
-
- return dev->driver->gem_prime_pin(obj);
+ return drm_gem_pin(obj);
}
EXPORT_SYMBOL(drm_gem_map_attach);
@@ -228,7 +224,6 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
{
struct drm_prime_attachment *prime_attach = attach->priv;
struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
if (prime_attach) {
struct sg_table *sgt = prime_attach->sgt;
@@ -247,8 +242,7 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
attach->priv = NULL;
}
- if (dev->driver->gem_prime_unpin)
- dev->driver->gem_prime_unpin(obj);
+ drm_gem_unpin(obj);
}
EXPORT_SYMBOL(drm_gem_map_detach);
@@ -310,7 +304,10 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
if (WARN_ON(prime_attach->dir != DMA_NONE))
return ERR_PTR(-EBUSY);
- sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+ if (obj->funcs)
+ sgt = obj->funcs->get_sg_table(obj);
+ else
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
if (!IS_ERR(sgt)) {
if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
@@ -406,12 +403,13 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
+ void *vaddr;
- if (dev->driver->gem_prime_vmap)
- return dev->driver->gem_prime_vmap(obj);
- else
- return NULL;
+ vaddr = drm_gem_vmap(obj);
+ if (IS_ERR(vaddr))
+ vaddr = NULL;
+
+ return vaddr;
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
@@ -426,42 +424,12 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
- if (dev->driver->gem_prime_vunmap)
- dev->driver->gem_prime_vunmap(obj, vaddr);
+ drm_gem_vunmap(obj, vaddr);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
/**
- * drm_gem_dmabuf_kmap - map implementation for GEM
- * @dma_buf: buffer to be mapped
- * @page_num: page number within the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.map callback.
- */
-void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kmap);
-
-/**
- * drm_gem_dmabuf_kunmap - unmap implementation for GEM
- * @dma_buf: buffer to be unmapped
- * @page_num: page number within the buffer
- * @addr: virtual address of the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.unmap callback.
- */
-void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
- void *addr)
-{
-
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kunmap);
-
-/**
* drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
* @dma_buf: buffer to be mapped
* @vma: virtual address range
@@ -489,8 +457,6 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .map = drm_gem_dmabuf_kmap,
- .unmap = drm_gem_dmabuf_kunmap,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
@@ -559,7 +525,12 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
return dmabuf;
}
- dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ if (obj->funcs && obj->funcs->export)
+ dmabuf = obj->funcs->export(obj, flags);
+ else if (dev->driver->gem_prime_export)
+ dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ else
+ dmabuf = drm_gem_prime_export(dev, obj, flags);
if (IS_ERR(dmabuf)) {
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
@@ -679,6 +650,52 @@ out_unlock:
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
/**
+ * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
+ * @obj: GEM object
+ * @vma: Virtual address range
+ *
+ * This function sets up a userspace mapping for PRIME exported buffers using
+ * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
+ * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
+ * called to set up the mapping.
+ *
+ * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
+ */
+int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_file *priv;
+ struct file *fil;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ fil = kzalloc(sizeof(*fil), GFP_KERNEL);
+ if (!priv || !fil) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Used by drm_gem_mmap() to lookup the GEM object */
+ priv->minor = obj->dev->primary;
+ fil->private_data = priv;
+
+ ret = drm_vma_node_allow(&obj->vma_node, priv);
+ if (ret)
+ goto out;
+
+ vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
+
+ ret = obj->dev->driver->fops->mmap(fil, vma);
+
+ drm_vma_node_revoke(&obj->vma_node, priv);
+out:
+ kfree(priv);
+ kfree(fil);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_mmap);
+
+/**
* drm_gem_prime_import_dev - core implementation of the import callback
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
@@ -792,7 +809,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
/* never seen this one, need to import */
mutex_lock(&dev->object_name_lock);
- obj = dev->driver->gem_prime_import(dev, dma_buf);
+ if (dev->driver->gem_prime_import)
+ obj = dev->driver->gem_prime_import(dev, dma_buf);
+ else
+ obj = drm_gem_prime_import(dev, dma_buf);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto out_unlock;
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 51fa978f0d23..917812448d1b 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -190,6 +190,13 @@ static void drm_simple_kms_plane_cleanup_fb(struct drm_plane *plane,
pipe->funcs->cleanup_fb(pipe, state);
}
+static bool drm_simple_kms_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+}
+
static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
.prepare_fb = drm_simple_kms_plane_prepare_fb,
.cleanup_fb = drm_simple_kms_plane_cleanup_fb,
@@ -204,6 +211,7 @@ static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .format_mod_supported = drm_simple_kms_format_mod_supported,
};
/**
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 5c2091dbd230..db30a0e89db8 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -56,22 +56,6 @@
#include "drm_internal.h"
#include <drm/drm_syncobj.h>
-struct drm_syncobj_stub_fence {
- struct dma_fence base;
- spinlock_t lock;
-};
-
-static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence)
-{
- return "syncobjstub";
-}
-
-static const struct dma_fence_ops drm_syncobj_stub_fence_ops = {
- .get_driver_name = drm_syncobj_stub_fence_get_name,
- .get_timeline_name = drm_syncobj_stub_fence_get_name,
-};
-
-
/**
* drm_syncobj_find - lookup and reference a sync object.
* @file_private: drm file private pointer
@@ -113,8 +97,6 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
{
int ret;
- WARN_ON(*fence);
-
*fence = drm_syncobj_fence_get(syncobj);
if (*fence)
return 1;
@@ -158,13 +140,11 @@ void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
/**
* drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in
- * @point: timeline point
* @fence: fence to install in sync file.
*
- * This replaces the fence on a sync object, or a timeline point fence.
+ * This replaces the fence on a sync object.
*/
void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
- u64 point,
struct dma_fence *fence)
{
struct dma_fence *old_fence;
@@ -192,23 +172,18 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
}
EXPORT_SYMBOL(drm_syncobj_replace_fence);
-static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
+/**
+ * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
+ * @syncobj: sync object to assign the fence on
+ *
+ * Assign a already signaled stub fence to the sync object.
+ */
+static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
{
- struct drm_syncobj_stub_fence *fence;
- fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (fence == NULL)
- return -ENOMEM;
-
- spin_lock_init(&fence->lock);
- dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops,
- &fence->lock, 0, 0);
- dma_fence_signal(&fence->base);
-
- drm_syncobj_replace_fence(syncobj, 0, &fence->base);
-
- dma_fence_put(&fence->base);
+ struct dma_fence *fence = dma_fence_get_stub();
- return 0;
+ drm_syncobj_replace_fence(syncobj, fence);
+ dma_fence_put(fence);
}
/**
@@ -216,6 +191,7 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
* @file_private: drm file private pointer
* @handle: sync object handle to lookup.
* @point: timeline point
+ * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
* @fence: out parameter for the fence
*
* This is just a convenience function that combines drm_syncobj_find() and
@@ -226,7 +202,7 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
* dma_fence_put().
*/
int drm_syncobj_find_fence(struct drm_file *file_private,
- u32 handle, u64 point,
+ u32 handle, u64 point, u64 flags,
struct dma_fence **fence)
{
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
@@ -255,7 +231,7 @@ void drm_syncobj_free(struct kref *kref)
struct drm_syncobj *syncobj = container_of(kref,
struct drm_syncobj,
refcount);
- drm_syncobj_replace_fence(syncobj, 0, NULL);
+ drm_syncobj_replace_fence(syncobj, NULL);
kfree(syncobj);
}
EXPORT_SYMBOL(drm_syncobj_free);
@@ -275,7 +251,6 @@ EXPORT_SYMBOL(drm_syncobj_free);
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
struct dma_fence *fence)
{
- int ret;
struct drm_syncobj *syncobj;
syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
@@ -286,16 +261,11 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
INIT_LIST_HEAD(&syncobj->cb_list);
spin_lock_init(&syncobj->lock);
- if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
- ret = drm_syncobj_assign_null_handle(syncobj);
- if (ret < 0) {
- drm_syncobj_put(syncobj);
- return ret;
- }
- }
+ if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
+ drm_syncobj_assign_null_handle(syncobj);
if (fence)
- drm_syncobj_replace_fence(syncobj, 0, fence);
+ drm_syncobj_replace_fence(syncobj, fence);
*out_syncobj = syncobj;
return 0;
@@ -480,7 +450,7 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return -ENOENT;
}
- drm_syncobj_replace_fence(syncobj, 0, fence);
+ drm_syncobj_replace_fence(syncobj, fence);
dma_fence_put(fence);
drm_syncobj_put(syncobj);
return 0;
@@ -497,7 +467,7 @@ static int drm_syncobj_export_sync_file(struct drm_file *file_private,
if (fd < 0)
return fd;
- ret = drm_syncobj_find_fence(file_private, handle, 0, &fence);
+ ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
if (ret)
goto err_put_fd;
@@ -719,9 +689,6 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
for (i = 0; i < count; ++i) {
- if (entries[i].fence)
- continue;
-
drm_syncobj_fence_get_or_add_callback(syncobjs[i],
&entries[i].fence,
&entries[i].syncobj_cb,
@@ -954,7 +921,7 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
return ret;
for (i = 0; i < args->count_handles; i++)
- drm_syncobj_replace_fence(syncobjs[i], 0, NULL);
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
drm_syncobj_array_free(syncobjs, args->count_handles);
@@ -986,11 +953,8 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
- for (i = 0; i < args->count_handles; i++) {
- ret = drm_syncobj_assign_null_handle(syncobjs[i]);
- if (ret < 0)
- break;
- }
+ for (i = 0; i < args->count_handles; i++)
+ drm_syncobj_assign_null_handle(syncobjs[i]);
drm_syncobj_array_free(syncobjs, args->count_handles);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 83c1f46670bf..52802e6049e0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -550,7 +550,7 @@ out_register:
out_bind:
kfree(priv);
out_unref:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -567,7 +567,7 @@ static void etnaviv_unbind(struct device *dev)
drm->dev_private = NULL;
kfree(priv);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops etnaviv_master_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 9146e30e24a6..3fbb4855396c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -118,6 +118,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
+ unsigned long flags;
/* Only catch the first event, or when manually re-armed */
if (!etnaviv_dump_core)
@@ -134,13 +135,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
mmu_size + gpu->buffer.size;
/* Add in the active command buffers */
- spin_lock(&gpu->sched.job_list_lock);
+ spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
file_size += submit->cmdbuf.size;
n_obj++;
}
- spin_unlock(&gpu->sched.job_list_lock);
+ spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
/* Add in the active buffer objects */
list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
@@ -182,14 +183,14 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer));
- spin_lock(&gpu->sched.job_list_lock);
+ spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf));
}
- spin_unlock(&gpu->sched.job_list_lock);
+ spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
/* Reserve space for the bomap */
if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 983e67f19e45..30875f8f2933 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -179,7 +179,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
struct reservation_object *robj = bo->obj->resv;
if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
- ret = reservation_object_reserve_shared(robj);
+ ret = reservation_object_reserve_shared(robj, 1);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index e7c3ed6c9a2e..49a6763693f1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
* If the GPU managed to complete this jobs fence, the timout is
* spurious. Bail out.
*/
- if (fence_completed(gpu, submit->out_fence->seqno))
+ if (dma_fence_is_signaled(submit->out_fence))
return;
/*
@@ -105,8 +105,6 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
change = dma_addr - gpu->hangcheck_dma_addr;
if (change < 0 || change > 16) {
gpu->hangcheck_dma_addr = dma_addr;
- schedule_delayed_work(&sched_job->sched->work_tdr,
- sched_job->sched->timeout);
return;
}
@@ -127,6 +125,8 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ drm_sched_job_cleanup(sched_job);
+
etnaviv_submit_put(submit);
}
@@ -159,6 +159,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
submit->out_fence, 0,
INT_MAX, GFP_KERNEL);
if (submit->out_fence_id < 0) {
+ drm_sched_job_cleanup(&submit->sched_job);
ret = -ENOMEM;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 208bc27be3cc..3691a140c950 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,11 +10,6 @@ config DRM_EXYNOS
if DRM_EXYNOS
-config DRM_EXYNOS_IOMMU
- bool
- depends on EXYNOS_IOMMU
- default y
-
comment "CRTCs"
config DRM_EXYNOS_FIMD
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 2ad146bbf4f5..2fd2f3ee4fcf 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -4,10 +4,9 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
- exynos_drm_gem.o exynos_drm_plane.o
+ exynos_drm_gem.o exynos_drm_plane.o exynos_drm_dma.o
exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
-exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o
exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON) += exynos7_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 94529aa82339..5b4e0e8b23bc 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -25,7 +25,6 @@
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
#include "regs-decon5433.h"
#define DSD_CFG_MUX 0x1004
@@ -84,6 +83,14 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
[CURSON_WIN] = DRM_PLANE_TYPE_CURSOR,
};
+static const unsigned int capabilities[WINDOWS_NR] = {
+ 0,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+};
+
static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask,
u32 val)
{
@@ -164,13 +171,6 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
return frm;
}
-static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
-{
- struct decon_context *ctx = crtc->ctx;
-
- return decon_get_frame_count(ctx, false);
-}
-
static void decon_setup_trigger(struct decon_context *ctx)
{
if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
@@ -259,11 +259,76 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
}
+static void decon_win_set_bldeq(struct decon_context *ctx, unsigned int win,
+ unsigned int alpha, unsigned int pixel_alpha)
+{
+ u32 mask = BLENDERQ_A_FUNC_F(0xf) | BLENDERQ_B_FUNC_F(0xf);
+ u32 val = 0;
+
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ case DRM_MODE_BLEND_COVERAGE:
+ val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA_A);
+ val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+ break;
+ case DRM_MODE_BLEND_PREMULTI:
+ default:
+ if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+ val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA0);
+ val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+ } else {
+ val |= BLENDERQ_A_FUNC_F(BLENDERQ_ONE);
+ val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+ }
+ break;
+ }
+ decon_set_bits(ctx, DECON_BLENDERQx(win), mask, val);
+}
+
+static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
+ unsigned int alpha, unsigned int pixel_alpha)
+{
+ u32 win_alpha = alpha >> 8;
+ u32 val = 0;
+
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ break;
+ case DRM_MODE_BLEND_COVERAGE:
+ case DRM_MODE_BLEND_PREMULTI:
+ default:
+ val |= WINCONx_ALPHA_SEL_F;
+ val |= WINCONx_BLD_PIX_F;
+ val |= WINCONx_ALPHA_MUL_F;
+ break;
+ }
+ decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_BLEND_MODE_MASK, val);
+
+ if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+ val = VIDOSD_Wx_ALPHA_R_F(win_alpha) |
+ VIDOSD_Wx_ALPHA_G_F(win_alpha) |
+ VIDOSD_Wx_ALPHA_B_F(win_alpha);
+ decon_set_bits(ctx, DECON_VIDOSDxC(win),
+ VIDOSDxC_ALPHA0_RGB_MASK, val);
+ decon_set_bits(ctx, DECON_BLENDCON, BLEND_NEW, BLEND_NEW);
+ }
+}
+
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
struct drm_framebuffer *fb)
{
+ struct exynos_drm_plane plane = ctx->planes[win];
+ struct exynos_drm_plane_state *state =
+ to_exynos_plane_state(plane.base.state);
+ unsigned int alpha = state->base.alpha;
+ unsigned int pixel_alpha;
unsigned long val;
+ if (fb->format->has_alpha)
+ pixel_alpha = state->base.pixel_blend_mode;
+ else
+ pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
+
val = readl(ctx->addr + DECON_WINCONx(win));
val &= WINCONx_ENWIN_F;
@@ -286,7 +351,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
case DRM_FORMAT_ARGB8888:
default:
val |= WINCONx_BPPMODE_32BPP_A8888;
- val |= WINCONx_WSWP_F | WINCONx_BLD_PIX_F | WINCONx_ALPHA_SEL_F;
+ val |= WINCONx_WSWP_F;
val |= WINCONx_BURSTLEN_16WORD;
break;
}
@@ -305,8 +370,12 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
+ decon_set_bits(ctx, DECON_WINCONx(win), ~WINCONx_BLEND_MODE_MASK, val);
- writel(val, ctx->addr + DECON_WINCONx(win));
+ if (win > 0) {
+ decon_win_set_bldmod(ctx, win, alpha, pixel_alpha);
+ decon_win_set_bldeq(ctx, win, alpha, pixel_alpha);
+ }
}
static void decon_shadow_protect(struct decon_context *ctx, bool protect)
@@ -536,7 +605,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.disable = decon_disable,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
- .get_vblank_counter = decon_get_vblank_counter,
.atomic_begin = decon_atomic_begin,
.update_plane = decon_update_plane,
.disable_plane = decon_disable_plane,
@@ -554,13 +622,13 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
int ret;
ctx->drm_dev = drm_dev;
- drm_dev->max_vblank_count = 0xffffffff;
for (win = ctx->first_win; win < WINDOWS_NR; win++) {
ctx->configs[win].pixel_formats = decon_formats;
ctx->configs[win].num_pixel_formats = ARRAY_SIZE(decon_formats);
ctx->configs[win].zpos = win - ctx->first_win;
ctx->configs[win].type = decon_win_types[win];
+ ctx->configs[win].capabilities = capabilities[win];
ret = exynos_plane_init(drm_dev, &ctx->planes[win], win,
&ctx->configs[win]);
@@ -578,7 +646,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
decon_clear_channels(ctx->crtc);
- return drm_iommu_attach_device(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev);
}
static void decon_unbind(struct device *dev, struct device *master, void *data)
@@ -588,7 +656,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
decon_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
}
static const struct component_ops decon_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 88cbd000eb09..381aa3d60e37 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -30,7 +30,6 @@
#include "exynos_drm_plane.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_iommu.h"
#include "regs-decon7.h"
/*
@@ -133,13 +132,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
decon_clear_channels(ctx->crtc);
- return drm_iommu_attach_device(drm_dev, ctx->dev);
+ return exynos_drm_register_dma(drm_dev, ctx->dev);
}
static void decon_ctx_remove(struct decon_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
}
static u32 decon_calc_clkdiv(struct decon_context *ctx,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index eea90251808f..2696289ecc78 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -162,16 +162,6 @@ static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc)
exynos_crtc->ops->disable_vblank(exynos_crtc);
}
-static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc)
-{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
- if (exynos_crtc->ops->get_vblank_counter)
- return exynos_crtc->ops->get_vblank_counter(exynos_crtc);
-
- return 0;
-}
-
static const struct drm_crtc_funcs exynos_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -181,7 +171,6 @@ static const struct drm_crtc_funcs exynos_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
- .get_vblank_counter = exynos_drm_crtc_get_vblank_counter,
};
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
new file mode 100644
index 000000000000..3432c5ee9f0c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2012 Samsung Electronics Co., Ltd.
+// Author: Inki Dae <inki.dae@samsung.com>
+// Author: Andrzej Hajda <a.hajda@samsung.com>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#include "exynos_drm_drv.h"
+
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#else
+#define arm_iommu_create_mapping(...) ({ NULL; })
+#define arm_iommu_attach_device(...) ({ -ENODEV; })
+#define arm_iommu_release_mapping(...) ({ })
+#define arm_iommu_detach_device(...) ({ })
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
+#if !defined(CONFIG_IOMMU_DMA)
+#define iommu_dma_init_domain(...) ({ -EINVAL; })
+#endif
+
+#define EXYNOS_DEV_ADDR_START 0x20000000
+#define EXYNOS_DEV_ADDR_SIZE 0x40000000
+
+static inline int configure_dma_max_seg_size(struct device *dev)
+{
+ if (!dev->dma_parms)
+ dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ return 0;
+}
+
+static inline void clear_dma_max_seg_size(struct device *dev)
+{
+ kfree(dev->dma_parms);
+ dev->dma_parms = NULL;
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+static int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ int ret;
+
+ if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
+ DRM_ERROR("Device %s lacks support for IOMMU\n",
+ dev_name(subdrv_dev));
+ return -EINVAL;
+ }
+
+ ret = configure_dma_max_seg_size(subdrv_dev);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
+ if (to_dma_iommu_mapping(subdrv_dev))
+ arm_iommu_detach_device(subdrv_dev);
+
+ ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
+ } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
+ ret = iommu_attach_device(priv->mapping, subdrv_dev);
+ }
+
+ if (ret)
+ clear_dma_max_seg_size(subdrv_dev);
+
+ return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+static void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ arm_iommu_detach_device(subdrv_dev);
+ else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ iommu_detach_device(priv->mapping, subdrv_dev);
+
+ clear_dma_max_seg_size(subdrv_dev);
+}
+
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
+{
+ struct exynos_drm_private *priv = drm->dev_private;
+
+ if (!priv->dma_dev) {
+ priv->dma_dev = dev;
+ DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
+ dev_name(dev));
+ }
+
+ if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+ return 0;
+
+ if (!priv->mapping) {
+ void *mapping;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
+ else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ mapping = iommu_get_domain_for_dev(priv->dma_dev);
+
+ if (IS_ERR(mapping))
+ return PTR_ERR(mapping);
+ priv->mapping = mapping;
+ }
+
+ return drm_iommu_attach_device(drm, dev);
+}
+
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
+{
+ if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+ drm_iommu_detach_device(drm, dev);
+}
+
+void exynos_drm_cleanup_dma(struct drm_device *drm)
+{
+ struct exynos_drm_private *priv = drm->dev_private;
+
+ if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+ return;
+
+ arm_iommu_release_mapping(priv->mapping);
+ priv->mapping = NULL;
+ priv->dma_dev = NULL;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 6f76baf4550a..2c75e789b2a7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -30,7 +30,6 @@
#include "exynos_drm_ipp.h"
#include "exynos_drm_vidi.h"
#include "exynos_drm_g2d.h"
-#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -175,8 +174,7 @@ struct exynos_drm_driver_info {
#define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */
#define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */
-#define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */
-#define DRM_FIMC_DEVICE BIT(3) /* devices shared with V4L2 subsystem */
+#define DRM_FIMC_DEVICE BIT(2) /* devices shared with V4L2 subsystem */
#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
@@ -187,16 +185,16 @@ struct exynos_drm_driver_info {
static struct exynos_drm_driver_info exynos_drm_drivers[] = {
{
DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
DRM_COMPONENT_DRIVER
@@ -267,27 +265,6 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
return match ?: ERR_PTR(-ENODEV);
}
-static struct device *exynos_drm_get_dma_device(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
- struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
- struct device *dev;
-
- if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
- continue;
-
- while ((dev = bus_find_device(&platform_bus_type, NULL,
- &info->driver->driver,
- (void *)platform_bus_type.match))) {
- put_device(dev);
- return dev;
- }
- }
- return NULL;
-}
-
static int exynos_drm_bind(struct device *dev)
{
struct exynos_drm_private *private;
@@ -312,23 +289,6 @@ static int exynos_drm_bind(struct device *dev)
dev_set_drvdata(dev, drm);
drm->dev_private = (void *)private;
- /* the first real CRTC device is used for all dma mapping operations */
- private->dma_dev = exynos_drm_get_dma_device();
- if (!private->dma_dev) {
- DRM_ERROR("no device found for DMA mapping operations.\n");
- ret = -ENODEV;
- goto err_free_private;
- }
- DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
- dev_name(private->dma_dev));
-
- /* create common IOMMU mapping for all devices attached to Exynos DRM */
- ret = drm_create_iommu_mapping(drm);
- if (ret < 0) {
- DRM_ERROR("failed to create iommu mapping.\n");
- goto err_free_private;
- }
-
drm_mode_config_init(drm);
exynos_drm_mode_config_init(drm);
@@ -385,8 +345,7 @@ err_unbind_all:
component_unbind_all(drm->dev, drm);
err_mode_config_cleanup:
drm_mode_config_cleanup(drm);
- drm_release_iommu_mapping(drm);
-err_free_private:
+ exynos_drm_cleanup_dma(drm);
kfree(private);
err_free_drm:
drm_dev_put(drm);
@@ -405,7 +364,7 @@ static void exynos_drm_unbind(struct device *dev)
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
- drm_release_iommu_mapping(drm);
+ exynos_drm_cleanup_dma(drm);
kfree(drm->dev_private);
drm->dev_private = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index ec9604f1272b..71eb240bc1f4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -135,7 +135,6 @@ struct exynos_drm_crtc_ops {
void (*disable)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
- u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode);
bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
@@ -215,6 +214,17 @@ static inline struct device *to_dma_dev(struct drm_device *dev)
return priv->dma_dev;
}
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ return priv->mapping ? true : false;
+}
+
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
+void exynos_drm_cleanup_dma(struct drm_device *drm);
+
#ifdef CONFIG_DRM_EXYNOS_DPI
struct drm_encoder *exynos_dpi_probe(struct device *dev);
int exynos_dpi_remove(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 07af7758066d..d81e62ae286a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -14,6 +14,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include <drm/drm_atomic_helper.h>
@@ -1474,12 +1475,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct drm_connector *connector = &dsi->connector;
+ struct drm_device *drm = encoder->dev;
int ret;
connector->polled = DRM_CONNECTOR_POLL_HPD;
- ret = drm_connector_init(encoder->dev, connector,
- &exynos_dsi_connector_funcs,
+ ret = drm_connector_init(drm, connector, &exynos_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
if (ret) {
DRM_ERROR("Failed to initialize connector with drm\n");
@@ -1489,7 +1490,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
connector->status = connector_status_disconnected;
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
drm_connector_attach_encoder(connector, encoder);
+ if (!drm->registered)
+ return 0;
+ connector->funcs->reset(connector);
+ drm_fb_helper_add_one_connector(drm->fb_helper, connector);
+ drm_connector_register(connector);
return 0;
}
@@ -1527,7 +1533,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
}
dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (dsi->panel) {
+ if (IS_ERR(dsi->panel)) {
+ dsi->panel = NULL;
+ } else {
drm_panel_attach(dsi->panel, &dsi->connector);
dsi->connector.status = connector_status_connected;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 9f52382e19ee..31eb538a44ae 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -24,7 +24,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_crtc.h"
static int check_fb_gem_memory_type(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 918dd2c82209..ce9604ca8041 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -23,7 +23,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
-#include "exynos_drm_iommu.h"
#define MAX_CONNECTOR 4
#define PREFERRED_BPP 32
@@ -192,7 +191,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
struct drm_fb_helper *helper;
int ret;
- if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+ if (!dev->mode_config.num_crtc)
return 0;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index e8d0670bb5f8..90dfea0aec4d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -25,7 +25,6 @@
#include <drm/exynos_drm.h>
#include "regs-fimc.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
/*
@@ -1129,7 +1128,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1149,7 +1148,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev);
}
static const struct component_ops fimc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index b7f56935a46b..e3d6a8584715 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -32,7 +32,6 @@
#include "exynos_drm_fb.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
/*
* FIMD stands for Fully Interactive Mobile Display and
@@ -1011,7 +1010,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
if (is_drm_iommu_supported(drm_dev))
fimd_clear_channels(ctx->crtc);
- return drm_iommu_attach_device(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev);
}
static void fimd_unbind(struct device *dev, struct device *master,
@@ -1021,7 +1020,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
fimd_disable(ctx->crtc);
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
if (ctx->encoder)
exynos_dpi_remove(ctx->encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f2481a2014bb..24c536d6d9cf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -25,7 +25,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_g2d.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
@@ -1405,7 +1404,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = drm_iommu_attach_device(drm_dev, dev);
+ ret = exynos_drm_register_dma(drm_dev, dev);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
g2d_fini_cmdlist(g2d);
@@ -1430,7 +1429,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
priv->g2d_dev = NULL;
cancel_work_sync(&g2d->runqueue_work);
- drm_iommu_detach_device(g2d->drm_dev, dev);
+ exynos_drm_unregister_dma(g2d->drm_dev, dev);
}
static const struct component_ops g2d_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 34ace85feb68..df66c383a877 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -19,7 +19,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index ce15d46bfce8..f048d97fe9e2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -24,7 +24,6 @@
#include <drm/exynos_drm.h>
#include "regs-gsc.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
/*
@@ -1170,7 +1169,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1190,7 +1189,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev);
}
static const struct component_ops gsc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
deleted file mode 100644
index 0f373702414e..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/* exynos_drm_iommu.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/iommu.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
-
-static inline int configure_dma_max_seg_size(struct device *dev)
-{
- if (!dev->dma_parms)
- dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
-
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
- return 0;
-}
-
-static inline void clear_dma_max_seg_size(struct device *dev)
-{
- kfree(dev->dma_parms);
- dev->dma_parms = NULL;
-}
-
-/*
- * drm_create_iommu_mapping - create a mapping structure
- *
- * @drm_dev: DRM device
- */
-int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- return __exynos_iommu_create_mapping(priv, EXYNOS_DEV_ADDR_START,
- EXYNOS_DEV_ADDR_SIZE);
-}
-
-/*
- * drm_release_iommu_mapping - release iommu mapping structure
- *
- * @drm_dev: DRM device
- */
-void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- __exynos_iommu_release_mapping(priv);
-}
-
-/*
- * drm_iommu_attach_device- attach device to iommu mapping
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be attach
- *
- * This function should be called by sub drivers to attach it to iommu
- * mapping.
- */
-int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
- int ret;
-
- if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
- DRM_ERROR("Device %s lacks support for IOMMU\n",
- dev_name(subdrv_dev));
- return -EINVAL;
- }
-
- ret = configure_dma_max_seg_size(subdrv_dev);
- if (ret)
- return ret;
-
- ret = __exynos_iommu_attach(priv, subdrv_dev);
- if (ret)
- clear_dma_max_seg_size(subdrv_dev);
-
- return 0;
-}
-
-/*
- * drm_iommu_detach_device -detach device address space mapping from device
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be detached
- *
- * This function should be called by sub drivers to detach it from iommu
- * mapping
- */
-void drm_iommu_detach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- __exynos_iommu_detach(priv, subdrv_dev);
- clear_dma_max_seg_size(subdrv_dev);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
deleted file mode 100644
index 797d9ee5f15a..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* exynos_drm_iommu.h
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Authoer: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_IOMMU_H_
-#define _EXYNOS_DRM_IOMMU_H_
-
-#define EXYNOS_DEV_ADDR_START 0x20000000
-#define EXYNOS_DEV_ADDR_SIZE 0x40000000
-
-#ifdef CONFIG_DRM_EXYNOS_IOMMU
-
-#if defined(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-
-static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
- unsigned long start, unsigned long size)
-{
- priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start,
- size);
- return IS_ERR(priv->mapping);
-}
-
-static inline void
-__exynos_iommu_release_mapping(struct exynos_drm_private *priv)
-{
- arm_iommu_release_mapping(priv->mapping);
-}
-
-static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- if (dev->archdata.mapping)
- arm_iommu_detach_device(dev);
-
- return arm_iommu_attach_device(dev, priv->mapping);
-}
-
-static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- arm_iommu_detach_device(dev);
-}
-
-#elif defined(CONFIG_IOMMU_DMA)
-#include <linux/dma-iommu.h>
-
-static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
- unsigned long start, unsigned long size)
-{
- priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
- return 0;
-}
-
-static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
-{
- priv->mapping = NULL;
-}
-
-static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- struct iommu_domain *domain = priv->mapping;
-
- if (dev != priv->dma_dev)
- return iommu_attach_device(domain, dev);
- return 0;
-}
-
-static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- struct iommu_domain *domain = priv->mapping;
-
- if (dev != priv->dma_dev)
- iommu_detach_device(domain, dev);
-}
-#else
-#error Unsupported architecture and IOMMU/DMA-mapping glue code
-#endif
-
-int drm_create_iommu_mapping(struct drm_device *drm_dev);
-
-void drm_release_iommu_mapping(struct drm_device *drm_dev);
-
-int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev);
-
-void drm_iommu_detach_device(struct drm_device *dev_dev,
- struct device *subdrv_dev);
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- return priv->mapping ? true : false;
-}
-
-#else
-
-static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
- return 0;
-}
-
-static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
-}
-
-static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- return 0;
-}
-
-static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
-}
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
- return false;
-}
-
-#endif
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index a820a68429b9..8d67b2a54be3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -23,7 +23,6 @@
#include <drm/exynos_drm.h>
#include "regs-rotator.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
/*
@@ -244,7 +243,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &rot->ipp;
rot->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
@@ -263,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &rot->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(rot->drm_dev, rot->dev);
+ exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
}
static const struct component_ops rotator_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index cd66774e817d..71270efa64f3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -23,7 +23,6 @@
#include "regs-scaler.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
#define scaler_read(offset) readl(scaler->regs + (offset))
@@ -452,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &scaler->ipp;
scaler->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -473,7 +472,7 @@ static void scaler_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &scaler->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(scaler->drm_dev, scaler->dev);
+ exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
}
static const struct component_ops scaler_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e3a4ecbc503b..0573eab0e190 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -40,7 +40,6 @@
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
#define MIXER_WIN_NR 3
#define VP_DEFAULT_WIN 2
@@ -381,19 +380,16 @@ static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_SCAN_MASK);
}
-static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
+static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, struct drm_display_mode *mode)
{
+ enum hdmi_quantization_range range = drm_default_rgb_quant_range(mode);
u32 val;
- switch (height) {
- case 480:
- case 576:
- val = MXR_CFG_RGB601_0_255;
- break;
- case 720:
- case 1080:
- default:
- val = MXR_CFG_RGB709_16_235;
+ if (mode->vdisplay < 720) {
+ val = MXR_CFG_RGB601;
+ } else {
+ val = MXR_CFG_RGB709;
+
/* Configure the BT.709 CSC matrix for full range RGB. */
mixer_reg_write(ctx, MXR_CM_COEFF_Y,
MXR_CSC_CT( 0.184, 0.614, 0.063) |
@@ -402,9 +398,13 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
MXR_CSC_CT(-0.102, -0.338, 0.440));
mixer_reg_write(ctx, MXR_CM_COEFF_CR,
MXR_CSC_CT( 0.440, -0.399, -0.040));
- break;
}
+ if (range == HDMI_QUANTIZATION_RANGE_FULL)
+ val |= MXR_CFG_QUANT_RANGE_FULL;
+ else
+ val |= MXR_CFG_QUANT_RANGE_LIMITED;
+
mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
}
@@ -461,7 +461,7 @@ static void mixer_commit(struct mixer_context *ctx)
struct drm_display_mode *mode = &ctx->crtc->base.state->adjusted_mode;
mixer_cfg_scan(ctx, mode->hdisplay, mode->vdisplay);
- mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
+ mixer_cfg_rgb_fmt(ctx, mode);
mixer_run(ctx);
}
@@ -878,12 +878,12 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
}
}
- return drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
+ return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
}
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
{
- drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+ exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
}
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
diff --git a/drivers/gpu/drm/exynos/regs-decon5433.h b/drivers/gpu/drm/exynos/regs-decon5433.h
index 19ad9e47945e..63db6974bf14 100644
--- a/drivers/gpu/drm/exynos/regs-decon5433.h
+++ b/drivers/gpu/drm/exynos/regs-decon5433.h
@@ -104,6 +104,7 @@
#define WINCONx_BURSTLEN_16WORD (0x0 << 10)
#define WINCONx_BURSTLEN_8WORD (0x1 << 10)
#define WINCONx_BURSTLEN_4WORD (0x2 << 10)
+#define WINCONx_ALPHA_MUL_F (1 << 7)
#define WINCONx_BLD_PIX_F (1 << 6)
#define WINCONx_BPPMODE_MASK (0xf << 2)
#define WINCONx_BPPMODE_16BPP_565 (0x5 << 2)
@@ -116,11 +117,15 @@
#define WINCONx_BPPMODE_16BPP_A4444 (0xe << 2)
#define WINCONx_ALPHA_SEL_F (1 << 1)
#define WINCONx_ENWIN_F (1 << 0)
+#define WINCONx_BLEND_MODE_MASK (0xc2)
/* SHADOWCON */
#define SHADOWCON_PROTECT_MASK GENMASK(14, 10)
#define SHADOWCON_Wx_PROTECT(n) (1 << (10 + (n)))
+/* VIDOSDxC */
+#define VIDOSDxC_ALPHA0_RGB_MASK (0xffffff)
+
/* VIDOSDxD */
#define VIDOSD_Wx_ALPHA_R_F(n) (((n) & 0xff) << 16)
#define VIDOSD_Wx_ALPHA_G_F(n) (((n) & 0xff) << 8)
@@ -206,4 +211,21 @@
#define CRCCTRL_CRCEN (0x1 << 0)
#define CRCCTRL_MASK (0x7)
+/* BLENDCON */
+#define BLEND_NEW (1 << 0)
+
+/* BLENDERQx */
+#define BLENDERQ_ZERO 0x0
+#define BLENDERQ_ONE 0x1
+#define BLENDERQ_ALPHA_A 0x2
+#define BLENDERQ_ONE_MINUS_ALPHA_A 0x3
+#define BLENDERQ_ALPHA0 0x6
+#define BLENDERQ_Q_FUNC_F(n) (n << 18)
+#define BLENDERQ_P_FUNC_F(n) (n << 12)
+#define BLENDERQ_B_FUNC_F(n) (n << 6)
+#define BLENDERQ_A_FUNC_F(n) (n << 0)
+
+/* BLENDCON */
+#define BLEND_NEW (1 << 0)
+
#endif /* EXYNOS_REGS_DECON5433_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index d2b8194a07bf..5ff095b0c1b3 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -85,10 +85,11 @@
/* bits for MXR_CFG */
#define MXR_CFG_LAYER_UPDATE (1 << 31)
#define MXR_CFG_LAYER_UPDATE_COUNT_MASK (3 << 29)
-#define MXR_CFG_RGB601_0_255 (0 << 9)
-#define MXR_CFG_RGB601_16_235 (1 << 9)
-#define MXR_CFG_RGB709_0_255 (2 << 9)
-#define MXR_CFG_RGB709_16_235 (3 << 9)
+#define MXR_CFG_QUANT_RANGE_FULL (0 << 9)
+#define MXR_CFG_QUANT_RANGE_LIMITED (1 << 9)
+#define MXR_CFG_RGB601 (0 << 10)
+#define MXR_CFG_RGB709 (1 << 10)
+
#define MXR_CFG_RGB_FMT_MASK 0x600
#define MXR_CFG_OUT_YUV444 (0 << 8)
#define MXR_CFG_OUT_RGB888 (1 << 8)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 0e3752437e44..18afc94e4dff 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <video/videomode.h>
#include "fsl_dcu_drm_crtc.h"
#include "fsl_dcu_drm_drv.h"
@@ -85,40 +86,34 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
struct drm_connector *con = &fsl_dev->connector.base;
struct drm_display_mode *mode = &crtc->state->mode;
- unsigned int hbp, hfp, hsw, vbp, vfp, vsw, index, pol = 0;
+ unsigned int pol = 0;
+ struct videomode vm;
- index = drm_crtc_index(crtc);
clk_set_rate(fsl_dev->pix_clk, mode->clock * 1000);
- /* Configure timings: */
- hbp = mode->htotal - mode->hsync_end;
- hfp = mode->hsync_start - mode->hdisplay;
- hsw = mode->hsync_end - mode->hsync_start;
- vbp = mode->vtotal - mode->vsync_end;
- vfp = mode->vsync_start - mode->vdisplay;
- vsw = mode->vsync_end - mode->vsync_start;
+ drm_display_mode_to_videomode(mode, &vm);
/* INV_PXCK as default (most display sample data on rising edge) */
if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE))
pol |= DCU_SYN_POL_INV_PXCK;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
pol |= DCU_SYN_POL_INV_HS_LOW;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW)
pol |= DCU_SYN_POL_INV_VS_LOW;
regmap_write(fsl_dev->regmap, DCU_HSYN_PARA,
- DCU_HSYN_PARA_BP(hbp) |
- DCU_HSYN_PARA_PW(hsw) |
- DCU_HSYN_PARA_FP(hfp));
+ DCU_HSYN_PARA_BP(vm.hback_porch) |
+ DCU_HSYN_PARA_PW(vm.hsync_len) |
+ DCU_HSYN_PARA_FP(vm.hfront_porch));
regmap_write(fsl_dev->regmap, DCU_VSYN_PARA,
- DCU_VSYN_PARA_BP(vbp) |
- DCU_VSYN_PARA_PW(vsw) |
- DCU_VSYN_PARA_FP(vfp));
+ DCU_VSYN_PARA_BP(vm.vback_porch) |
+ DCU_VSYN_PARA_PW(vm.vsync_len) |
+ DCU_VSYN_PARA_FP(vm.vfront_porch));
regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
- DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
- DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
+ DCU_DISP_SIZE_DELTA_Y(vm.vactive) |
+ DCU_DISP_SIZE_DELTA_X(vm.hactive));
regmap_write(fsl_dev->regmap, DCU_SYN_POL, pol);
regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
DCU_BGND_G(0) | DCU_BGND_B(0));
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0496be5212e1..ceddc3e29258 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -89,20 +90,11 @@ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
"Invalid legacyfb_depth. Defaulting to 24bpp\n");
legacyfb_depth = 24;
}
- fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1);
- if (IS_ERR(fsl_dev->fbdev)) {
- ret = PTR_ERR(fsl_dev->fbdev);
- fsl_dev->fbdev = NULL;
- goto done;
- }
return 0;
done:
drm_kms_helper_poll_fini(dev);
- if (fsl_dev->fbdev)
- drm_fbdev_cma_fini(fsl_dev->fbdev);
-
drm_mode_config_cleanup(dev);
drm_irq_uninstall(dev);
dev->dev_private = NULL;
@@ -112,14 +104,9 @@ done:
static void fsl_dcu_unload(struct drm_device *dev)
{
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
drm_atomic_helper_shutdown(dev);
drm_kms_helper_poll_fini(dev);
- if (fsl_dev->fbdev)
- drm_fbdev_cma_fini(fsl_dev->fbdev);
-
drm_mode_config_cleanup(dev);
drm_irq_uninstall(dev);
@@ -147,19 +134,11 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static void fsl_dcu_drm_lastclose(struct drm_device *dev)
-{
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
- drm_fbdev_cma_restore_mode(fsl_dev->fbdev);
-}
-
DEFINE_DRM_GEM_CMA_FOPS(fsl_dcu_drm_fops);
static struct drm_driver fsl_dcu_drm_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
| DRIVER_PRIME | DRIVER_ATOMIC,
- .lastclose = fsl_dcu_drm_lastclose,
.load = fsl_dcu_load,
.unload = fsl_dcu_unload,
.irq_handler = fsl_dcu_drm_irq,
@@ -355,6 +334,8 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
if (ret < 0)
goto put;
+ drm_fbdev_generic_setup(drm, legacyfb_depth);
+
return 0;
put:
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index 93bfb98012d4..cb87bb74cb87 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -191,7 +191,6 @@ struct fsl_dcu_drm_device {
/*protects hardware register*/
spinlock_t irq_lock;
struct drm_device *drm;
- struct drm_fbdev_cma *fbdev;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct fsl_dcu_drm_connector connector;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 45c25a488f42..3c168ae77b0c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -49,8 +49,6 @@ struct hibmc_drm_private {
bool mode_config_initialized;
/* ttm */
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
bool initialized;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 2e3e0bdb8932..dd383267884c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -29,55 +29,6 @@ hibmc_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct hibmc_drm_private, bdev);
}
-static int
-hibmc_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void
-hibmc_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int hibmc_ttm_global_init(struct hibmc_drm_private *hibmc)
-{
- int ret;
-
- hibmc->mem_global_ref.global_type = DRM_GLOBAL_TTM_MEM;
- hibmc->mem_global_ref.size = sizeof(struct ttm_mem_global);
- hibmc->mem_global_ref.init = &hibmc_ttm_mem_global_init;
- hibmc->mem_global_ref.release = &hibmc_ttm_mem_global_release;
- ret = drm_global_item_ref(&hibmc->mem_global_ref);
- if (ret) {
- DRM_ERROR("could not get ref on ttm global: %d\n", ret);
- return ret;
- }
-
- hibmc->bo_global_ref.mem_glob =
- hibmc->mem_global_ref.object;
- hibmc->bo_global_ref.ref.global_type = DRM_GLOBAL_TTM_BO;
- hibmc->bo_global_ref.ref.size = sizeof(struct ttm_bo_global);
- hibmc->bo_global_ref.ref.init = &ttm_bo_global_init;
- hibmc->bo_global_ref.ref.release = &ttm_bo_global_release;
- ret = drm_global_item_ref(&hibmc->bo_global_ref.ref);
- if (ret) {
- DRM_ERROR("failed setting up TTM BO subsystem: %d\n", ret);
- drm_global_item_unref(&hibmc->mem_global_ref);
- return ret;
- }
- return 0;
-}
-
-static void
-hibmc_ttm_global_release(struct hibmc_drm_private *hibmc)
-{
- drm_global_item_unref(&hibmc->bo_global_ref.ref);
- drm_global_item_unref(&hibmc->mem_global_ref);
- hibmc->mem_global_ref.release = NULL;
-}
-
static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo);
@@ -237,18 +188,12 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
struct drm_device *dev = hibmc->dev;
struct ttm_bo_device *bdev = &hibmc->bdev;
- ret = hibmc_ttm_global_init(hibmc);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&hibmc->bdev,
- hibmc->bo_global_ref.ref.object,
&hibmc_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
- hibmc_ttm_global_release(hibmc);
DRM_ERROR("error initializing bo driver: %d\n", ret);
return ret;
}
@@ -256,7 +201,6 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
hibmc->fb_size >> PAGE_SHIFT);
if (ret) {
- hibmc_ttm_global_release(hibmc);
DRM_ERROR("failed ttm VRAM init: %d\n", ret);
return ret;
}
@@ -271,7 +215,6 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
return;
ttm_bo_device_release(&hibmc->bdev);
- hibmc_ttm_global_release(hibmc);
hibmc->mm_inited = false;
}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1c2857f13ad4..19b5fe5016bf 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -75,6 +75,7 @@ i915-y += i915_cmd_parser.o \
i915_gemfs.o \
i915_query.o \
i915_request.o \
+ i915_scheduler.o \
i915_timeline.o \
i915_trace_points.o \
i915_vma.o \
@@ -112,6 +113,8 @@ i915-y += intel_audio.o \
intel_bios.o \
intel_cdclk.o \
intel_color.o \
+ intel_combo_phy.o \
+ intel_connector.o \
intel_display.o \
intel_dpio_phy.o \
intel_dpll_mgr.o \
@@ -120,9 +123,9 @@ i915-y += intel_audio.o \
intel_frontbuffer.o \
intel_hdcp.o \
intel_hotplug.o \
- intel_modes.o \
intel_overlay.o \
intel_psr.o \
+ intel_quirks.o \
intel_sideband.o \
intel_sprite.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
@@ -142,6 +145,7 @@ i915-y += dvo_ch7017.o \
intel_dp_link_training.o \
intel_dp_mst.o \
intel_dp.o \
+ intel_dsi.o \
intel_dsi_dcs_backlight.o \
intel_dsi_vbt.o \
intel_dvo.o \
@@ -153,14 +157,17 @@ i915-y += dvo_ch7017.o \
intel_sdvo.o \
intel_tv.o \
vlv_dsi.o \
- vlv_dsi_pll.o
+ vlv_dsi_pll.o \
+ intel_vdsc.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/i915_random.o \
selftests/i915_selftest.o \
- selftests/igt_flush_test.o
+ selftests/igt_flush_test.o \
+ selftests/igt_reset.o \
+ selftests/igt_spinner.o
# virtual gpu code
i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2402395a068d..58e166effa45 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1905,7 +1905,6 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
}
- mm->ggtt_mm.last_partial_off = -1UL;
return mm;
}
@@ -1930,7 +1929,6 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
- mm->ggtt_mm.last_partial_off = -1UL;
}
vgpu_free_mm(mm);
@@ -2168,6 +2166,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
struct intel_gvt_gtt_entry e, m;
dma_addr_t dma_addr;
int ret;
+ struct intel_gvt_partial_pte *partial_pte, *pos, *n;
+ bool partial_update = false;
if (bytes != 4 && bytes != 8)
return -EINVAL;
@@ -2178,68 +2178,57 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (!vgpu_gmadr_is_valid(vgpu, gma))
return 0;
- ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
-
+ e.type = GTT_TYPE_GGTT_PTE;
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
/* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
- * write, we assume the two 4 bytes writes are consecutive.
- * Otherwise, we abort and report error
+ * write, save the first 4 bytes in a list and update virtual
+ * PTE. Only update shadow PTE when the second 4 bytes comes.
*/
if (bytes < info->gtt_entry_size) {
- if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
- /* the first partial part*/
- ggtt_mm->ggtt_mm.last_partial_off = off;
- ggtt_mm->ggtt_mm.last_partial_data = e.val64;
- return 0;
- } else if ((g_gtt_index ==
- (ggtt_mm->ggtt_mm.last_partial_off >>
- info->gtt_entry_size_shift)) &&
- (off != ggtt_mm->ggtt_mm.last_partial_off)) {
- /* the second partial part */
-
- int last_off = ggtt_mm->ggtt_mm.last_partial_off &
- (info->gtt_entry_size - 1);
-
- memcpy((void *)&e.val64 + last_off,
- (void *)&ggtt_mm->ggtt_mm.last_partial_data +
- last_off, bytes);
-
- ggtt_mm->ggtt_mm.last_partial_off = -1UL;
- } else {
- int last_offset;
-
- gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
- ggtt_mm->ggtt_mm.last_partial_off, off,
- bytes, info->gtt_entry_size);
-
- /* set host ggtt entry to scratch page and clear
- * virtual ggtt entry as not present for last
- * partially write offset
- */
- last_offset = ggtt_mm->ggtt_mm.last_partial_off &
- (~(info->gtt_entry_size - 1));
-
- ggtt_get_host_entry(ggtt_mm, &m, last_offset);
- ggtt_invalidate_pte(vgpu, &m);
- ops->set_pfn(&m, gvt->gtt.scratch_mfn);
- ops->clear_present(&m);
- ggtt_set_host_entry(ggtt_mm, &m, last_offset);
- ggtt_invalidate(gvt->dev_priv);
-
- ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
- ops->clear_present(&e);
- ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
-
- ggtt_mm->ggtt_mm.last_partial_off = off;
- ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+ bool found = false;
+
+ list_for_each_entry_safe(pos, n,
+ &ggtt_mm->ggtt_mm.partial_pte_list, list) {
+ if (g_gtt_index == pos->offset >>
+ info->gtt_entry_size_shift) {
+ if (off != pos->offset) {
+ /* the second partial part*/
+ int last_off = pos->offset &
+ (info->gtt_entry_size - 1);
+
+ memcpy((void *)&e.val64 + last_off,
+ (void *)&pos->data + last_off,
+ bytes);
+
+ list_del(&pos->list);
+ kfree(pos);
+ found = true;
+ break;
+ }
+
+ /* update of the first partial part */
+ pos->data = e.val64;
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+ return 0;
+ }
+ }
- return 0;
+ if (!found) {
+ /* the first partial part */
+ partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
+ if (!partial_pte)
+ return -ENOMEM;
+ partial_pte->offset = off;
+ partial_pte->data = e.val64;
+ list_add_tail(&partial_pte->list,
+ &ggtt_mm->ggtt_mm.partial_pte_list);
+ partial_update = true;
}
}
- if (ops->test_present(&e)) {
+ if (!partial_update && (ops->test_present(&e))) {
gfn = ops->get_pfn(&e);
m = e;
@@ -2263,16 +2252,18 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
} else
ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
} else {
- ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
- ggtt_invalidate_pte(vgpu, &m);
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
ops->clear_present(&m);
}
out:
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+
+ ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
+ ggtt_invalidate_pte(vgpu, &e);
+
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
ggtt_invalidate(gvt->dev_priv);
- ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
return 0;
}
@@ -2430,6 +2421,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
intel_vgpu_reset_ggtt(vgpu, false);
+ INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);
+
return create_scratch_page_tree(vgpu);
}
@@ -2454,6 +2447,14 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
{
+ struct intel_gvt_partial_pte *pos;
+
+ list_for_each_entry(pos,
+ &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) {
+ gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
+ pos->offset, pos->data);
+ kfree(pos);
+ }
intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
vgpu->gtt.ggtt_mm = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 7a9b36176efb..d8cb04cc946d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -35,7 +35,6 @@
#define _GVT_GTT_H_
#define I915_GTT_PAGE_SHIFT 12
-#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1))
struct intel_vgpu_mm;
@@ -133,6 +132,12 @@ enum intel_gvt_mm_type {
#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
+struct intel_gvt_partial_pte {
+ unsigned long offset;
+ u64 data;
+ struct list_head list;
+};
+
struct intel_vgpu_mm {
enum intel_gvt_mm_type type;
struct intel_vgpu *vgpu;
@@ -157,8 +162,7 @@ struct intel_vgpu_mm {
} ppgtt_mm;
struct {
void *virtual_ggtt;
- unsigned long last_partial_off;
- u64 last_partial_data;
+ struct list_head partial_pte_list;
} ggtt_mm;
};
};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 90f50f67909a..aa280bb07125 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1609,7 +1609,7 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
return 0;
}
-static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
+static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
vgpu_vreg(vgpu, offset) = 0;
@@ -2607,6 +2607,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
+ MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
return 0;
}
@@ -3205,9 +3208,6 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
- MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
- MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
-
MMIO_D(RC6_CTX_BASE, D_BXT);
MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 10e63eea5492..36a5147cd01e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -131,7 +131,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
- {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
+ {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index ea34003d6dd2..b8fbe3fabea3 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -334,6 +334,28 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
}
+static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
+ struct i915_gem_context *ctx)
+{
+ struct intel_vgpu_mm *mm = workload->shadow_mm;
+ struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+ int i = 0;
+
+ if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
+ return -1;
+
+ if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+ px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
+ } else {
+ for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
+ px_dma(ppgtt->pdp.page_directory[i]) =
+ mm->ppgtt_mm.shadow_pdps[i];
+ }
+ }
+
+ return 0;
+}
+
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -358,6 +380,12 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->req)
return 0;
+ ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+ if (ret < 0) {
+ gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+ return ret;
+ }
+
/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4f3ac0a12889..38dcee1ca062 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -943,30 +943,30 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
size_t count, loff_t *pos)
{
- struct i915_gpu_state *error = file->private_data;
- struct drm_i915_error_state_buf str;
+ struct i915_gpu_state *error;
ssize_t ret;
- loff_t tmp;
+ void *buf;
+ error = file->private_data;
if (!error)
return 0;
- ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
- if (ret)
- return ret;
+ /* Bounce buffer required because of kernfs __user API convenience. */
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- ret = i915_error_state_to_str(&str, error);
- if (ret)
+ ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
+ if (ret <= 0)
goto out;
- tmp = 0;
- ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
- if (ret < 0)
- goto out;
+ if (!copy_to_user(ubuf, buf, ret))
+ *pos += ret;
+ else
+ ret = -EFAULT;
- *pos = str.start + ret;
out:
- i915_error_state_buf_release(&str);
+ kfree(buf);
return ret;
}
@@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
if (!IS_GEN5(dev_priv))
return -ENODEV;
+ intel_runtime_pm_get(dev_priv);
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
@@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
seq_printf(m, "GFX power: %ld\n", gfx);
seq_printf(m, "Total power: %ld\n", chipset + gfx);
+ intel_runtime_pm_put(dev_priv);
+
return 0;
}
@@ -2215,8 +2219,23 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ u32 act_freq = rps->cur_freq;
struct drm_file *file;
+ if (intel_runtime_pm_get_if_in_use(dev_priv)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ mutex_lock(&dev_priv->pcu_lock);
+ act_freq = vlv_punit_read(dev_priv,
+ PUNIT_REG_GPU_FREQ_STS);
+ act_freq = (act_freq >> 8) & 0xff;
+ mutex_unlock(&dev_priv->pcu_lock);
+ } else {
+ act_freq = intel_get_cagf(dev_priv,
+ I915_READ(GEN6_RPSTAT1));
+ }
+ intel_runtime_pm_put(dev_priv);
+ }
+
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
@@ -2224,8 +2243,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
- seq_printf(m, "Frequency requested %d\n",
- intel_gpu_freq(dev_priv, rps->cur_freq));
+ seq_printf(m, "Frequency requested %d, actual %d\n",
+ intel_gpu_freq(dev_priv, rps->cur_freq),
+ intel_gpu_freq(dev_priv, act_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
intel_gpu_freq(dev_priv, rps->min_freq),
intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
@@ -2900,16 +2920,15 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
- if (IS_KABYLAKE(dev_priv) ||
- (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
- seq_printf(m, "DC3 -> DC5 count: %d\n",
- I915_READ(SKL_CSR_DC3_DC5_COUNT));
+ if (WARN_ON(INTEL_GEN(dev_priv) > 11))
+ goto out;
+
+ seq_printf(m, "DC3 -> DC5 count: %d\n",
+ I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+ SKL_CSR_DC3_DC5_COUNT));
+ if (!IS_GEN9_LP(dev_priv))
seq_printf(m, "DC5 -> DC6 count: %d\n",
I915_READ(SKL_CSR_DC5_DC6_COUNT));
- } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
- seq_printf(m, "DC3 -> DC5 count: %d\n",
- I915_READ(BXT_CSR_DC3_DC5_COUNT));
- }
out:
seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
@@ -3049,16 +3068,17 @@ static void intel_connector_info(struct seq_file *m,
seq_printf(m, "connector %d: type %s, status: %s\n",
connector->base.id, connector->name,
drm_get_connector_status_name(connector->status));
- if (connector->status == connector_status_connected) {
- seq_printf(m, "\tname: %s\n", connector->display_info.name);
- seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
- connector->display_info.width_mm,
- connector->display_info.height_mm);
- seq_printf(m, "\tsubpixel order: %s\n",
- drm_get_subpixel_order_name(connector->display_info.subpixel_order));
- seq_printf(m, "\tCEA rev: %d\n",
- connector->display_info.cea_rev);
- }
+
+ if (connector->status == connector_status_disconnected)
+ return;
+
+ seq_printf(m, "\tname: %s\n", connector->display_info.name);
+ seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+ connector->display_info.width_mm,
+ connector->display_info.height_mm);
+ seq_printf(m, "\tsubpixel order: %s\n",
+ drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+ seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
if (!intel_encoder)
return;
@@ -3355,13 +3375,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
- struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
- int i;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
+ struct i915_wa *wa;
+ unsigned int i;
- seq_printf(m, "Workarounds applied: %d\n", wa->count);
- for (i = 0; i < wa->count; ++i)
+ seq_printf(m, "Workarounds applied: %u\n", wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
- wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
+ i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
return 0;
}
@@ -3421,31 +3443,32 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct skl_ddb_allocation *ddb;
struct skl_ddb_entry *entry;
- enum pipe pipe;
- int plane;
+ struct intel_crtc *crtc;
if (INTEL_GEN(dev_priv) < 9)
return -ENODEV;
drm_modeset_lock_all(dev);
- ddb = &dev_priv->wm.skl_hw.ddb;
-
seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
- for_each_pipe(dev_priv, pipe) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_universal_plane(dev_priv, pipe, plane) {
- entry = &ddb->plane[pipe][plane];
- seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
entry->start, entry->end,
skl_ddb_entry_size(entry));
}
- entry = &ddb->plane[pipe][PLANE_CURSOR];
+ entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
entry->end, skl_ddb_entry_size(entry));
}
@@ -4172,6 +4195,7 @@ i915_drop_caches_set(void *data, u64 val)
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
+ intel_runtime_pm_get(i915);
if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
i915_gem_set_wedged(i915);
@@ -4181,7 +4205,7 @@ i915_drop_caches_set(void *data, u64 val)
if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
- return ret;
+ goto out;
if (val & DROP_ACTIVE)
ret = i915_gem_wait_for_idle(i915,
@@ -4189,11 +4213,8 @@ i915_drop_caches_set(void *data, u64 val)
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (ret == 0 && val & DROP_RESET_SEQNO) {
- intel_runtime_pm_get(i915);
+ if (ret == 0 && val & DROP_RESET_SEQNO)
ret = i915_gem_set_global_seqno(&i915->drm, 1);
- intel_runtime_pm_put(i915);
- }
if (val & DROP_RETIRE)
i915_retire_requests(i915);
@@ -4231,6 +4252,9 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_FREED)
i915_gem_drain_freed_objects(i915);
+out:
+ intel_runtime_pm_put(i915);
+
return ret;
}
@@ -4331,7 +4355,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
for (s = 0; s < info->sseu.max_slices; s++) {
/*
* FIXME: Valid SS Mask respects the spec and read
- * only valid bits for those registers, excluding reserverd
+ * only valid bits for those registers, excluding reserved
* although this seems wrong because it would leave many
* subslices without ACK.
*/
@@ -4571,6 +4595,13 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = m->private;
struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ /* Synchronize with everything first in case there's been an HPD
+ * storm, but we haven't finished handling it in the kernel yet
+ */
+ synchronize_irq(dev_priv->drm.irq);
+ flush_work(&dev_priv->hotplug.dig_port_work);
+ flush_work(&dev_priv->hotplug.hotplug_work);
+
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
yesno(delayed_work_pending(&hotplug->reenable_work)));
@@ -4641,24 +4672,122 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
.write = i915_hpd_storm_ctl_write
};
+static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Enabled: %s\n",
+ yesno(dev_priv->hotplug.hpd_short_storm_enabled));
+
+ return 0;
+}
+
+static int
+i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_short_storm_ctl_show,
+ inode->i_private);
+}
+
+static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ char *newline;
+ char tmp[16];
+ int i;
+ bool new_state;
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ /* Reset to the "default" state for this system */
+ if (strcmp(tmp, "reset") == 0)
+ new_state = !HAS_DP_MST(dev_priv);
+ else if (kstrtobool(tmp, &new_state) != 0)
+ return -EINVAL;
+
+ DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
+ new_state ? "En" : "Dis");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_short_storm_enabled = new_state;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+ return len;
+}
+
+static const struct file_operations i915_hpd_short_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_short_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_short_storm_ctl_write,
+};
+
static int i915_drrs_ctl_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *intel_crtc;
- struct intel_encoder *encoder;
- struct intel_dp *intel_dp;
+ struct intel_crtc *crtc;
if (INTEL_GEN(dev_priv) < 7)
return -ENODEV;
- drm_modeset_lock_all(dev);
- for_each_intel_crtc(dev, intel_crtc) {
- if (!intel_crtc->base.state->active ||
- !intel_crtc->config->has_drrs)
- continue;
+ for_each_intel_crtc(dev, crtc) {
+ struct drm_connector_list_iter conn_iter;
+ struct intel_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_crtc_commit *commit;
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (!crtc_state->base.active ||
+ !crtc_state->has_drrs)
+ goto out;
- for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
+ commit = crtc_state->base.commit;
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (ret)
+ goto out;
+ }
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp;
+
+ if (!(crtc_state->base.connector_mask &
+ drm_connector_mask(connector)))
+ continue;
+
+ encoder = intel_attached_encoder(connector);
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
@@ -4668,13 +4797,18 @@ static int i915_drrs_ctl_set(void *data, u64 val)
intel_dp = enc_to_intel_dp(&encoder->base);
if (val)
intel_edp_drrs_enable(intel_dp,
- intel_crtc->config);
+ crtc_state);
else
intel_edp_drrs_disable(intel_dp,
- intel_crtc->config);
+ crtc_state);
}
+ drm_connector_list_iter_end(&conn_iter);
+
+out:
+ drm_modeset_unlock(&crtc->base.mutex);
+ if (ret)
+ return ret;
}
- drm_modeset_unlock_all(dev);
return 0;
}
@@ -4818,6 +4952,7 @@ static const struct i915_debugfs_files {
{"i915_guc_log_level", &i915_guc_log_level_fops},
{"i915_guc_log_relay", &i915_guc_log_relay_fops},
{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
+ {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
{"i915_ipc_status", &i915_ipc_status_fops},
{"i915_drrs_ctl", &i915_drrs_ctl_fops},
{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
@@ -4899,13 +5034,10 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
continue;
err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
- if (err <= 0) {
- DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
- size, b->offset, err);
- continue;
- }
-
- seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
+ if (err < 0)
+ seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
+ else
+ seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
}
return 0;
@@ -4934,6 +5066,28 @@ static int i915_panel_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_panel);
+static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ /* HDCP is supported by connector */
+ if (!intel_connector->hdcp.shim)
+ return -EINVAL;
+
+ seq_printf(m, "%s:%d HDCP version: ", connector->name,
+ connector->base.id);
+ seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
+ "None" : "HDCP1.4");
+ seq_puts(m, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+
/**
* i915_debugfs_connector_add - add i915 specific connector debugfs files
* @connector: pointer to a registered drm_connector
@@ -4963,5 +5117,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
connector, &i915_psr_sink_status_fops);
}
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
+ connector, &i915_hdcp_sink_capability_fops);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 44e2c0f5ec50..b310a897a4ad 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -53,6 +53,7 @@
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_uc.h"
+#include "intel_workarounds.h"
static struct drm_driver driver;
@@ -287,7 +288,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
* Use PCH_NOP (PCH but no South Display) for PCH platforms without
* display.
*/
- if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
+ if (pch && !HAS_DISPLAY(dev_priv)) {
DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
dev_priv->pch_type = PCH_NOP;
dev_priv->pch_id = 0;
@@ -345,7 +346,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = HAS_WT(dev_priv);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
- value = USES_PPGTT(dev_priv);
+ value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
break;
case I915_PARAM_HAS_SEMAPHORES:
value = HAS_LEGACY_SEMAPHORES(dev_priv);
@@ -645,6 +646,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (i915_inject_load_failure())
return -ENODEV;
+ if (HAS_DISPLAY(dev_priv)) {
+ ret = drm_vblank_init(&dev_priv->drm,
+ INTEL_INFO(dev_priv)->num_pipes);
+ if (ret)
+ goto out;
+ }
+
intel_bios_init(dev_priv);
/* If we have > 1 VGA cards, then we need to arbitrate access
@@ -687,9 +695,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_modeset;
- intel_setup_overlay(dev_priv);
+ intel_overlay_setup(dev_priv);
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return 0;
ret = intel_fbdev_init(dev);
@@ -699,6 +707,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(dev_priv);
+ intel_init_ipc(dev_priv);
+
return 0;
cleanup_gem:
@@ -859,6 +869,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_HSW_EARLY_SDV(dev_priv);
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
+ pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
if (pre) {
DRM_ERROR("This is a pre-production stepping. "
@@ -1030,6 +1041,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
err_uncore:
intel_uncore_fini(dev_priv);
+ i915_mmio_cleanup(dev_priv);
err_bridge:
pci_dev_put(dev_priv->bridge_dev);
@@ -1049,17 +1061,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
- /*
- * i915.enable_ppgtt is read-only, so do an early pass to validate the
- * user's requested state against the hardware/driver capabilities. We
- * do this now so that we can print out any log messages once rather
- * than every time we check intel_enable_ppgtt().
- */
- i915_modparams.enable_ppgtt =
- intel_sanitize_enable_ppgtt(dev_priv,
- i915_modparams.enable_ppgtt);
- DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
-
intel_gvt_sanitize_options(dev_priv);
}
@@ -1175,8 +1176,6 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
return -EINVAL;
}
- dram_info->valid_dimm = true;
-
/*
* If any of the channel is single rank channel, worst case output
* will be same as if single rank memory, so consider single rank
@@ -1193,8 +1192,7 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
return -EINVAL;
}
- if (ch0.is_16gb_dimm || ch1.is_16gb_dimm)
- dram_info->is_16gb_dimm = true;
+ dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
val_ch1,
@@ -1314,7 +1312,6 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
return -EINVAL;
}
- dram_info->valid_dimm = true;
dram_info->valid = true;
return 0;
}
@@ -1327,19 +1324,24 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
int ret;
dram_info->valid = false;
- dram_info->valid_dimm = false;
- dram_info->is_16gb_dimm = false;
dram_info->rank = I915_DRAM_RANK_INVALID;
dram_info->bandwidth_kbps = 0;
dram_info->num_channels = 0;
+ /*
+ * Assume 16Gb DIMMs are present until proven otherwise.
+ * This is only used for the level 0 watermark latency
+ * w/a which does not apply to bxt/glk.
+ */
+ dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
+
if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
return;
/* Need to calculate bandwidth only for Gen9 */
if (IS_BROXTON(dev_priv))
ret = bxt_get_dram_info(dev_priv);
- else if (INTEL_GEN(dev_priv) == 9)
+ else if (IS_GEN9(dev_priv))
ret = skl_get_dram_info(dev_priv);
else
ret = skl_dram_get_channels_info(dev_priv);
@@ -1374,6 +1376,29 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
+ if (HAS_PPGTT(dev_priv)) {
+ if (intel_vgpu_active(dev_priv) &&
+ !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
+ i915_report_error(dev_priv,
+ "incompatible vGPU found, support for isolated ppGTT required\n");
+ return -ENXIO;
+ }
+ }
+
+ if (HAS_EXECLISTS(dev_priv)) {
+ /*
+ * Older GVT emulation depends upon intercepting CSB mmio,
+ * which we no longer use, preferring to use the HWSP cache
+ * instead.
+ */
+ if (intel_vgpu_active(dev_priv) &&
+ !intel_vgpu_has_hwsp_emulation(dev_priv)) {
+ i915_report_error(dev_priv,
+ "old vGPU host found, support for HWSP emulation required\n");
+ return -ENXIO;
+ }
+ }
+
intel_sanitize_options(dev_priv);
i915_perf_init(dev_priv);
@@ -1443,6 +1468,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
+ intel_gt_init_workarounds(dev_priv);
i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1542,7 +1568,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
- if (INTEL_INFO(dev_priv)->num_pipes) {
+ if (HAS_DISPLAY(dev_priv)) {
/* Must be done after probing outputs */
intel_opregion_register(dev_priv);
acpi_video_register();
@@ -1566,7 +1592,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* We need to coordinate the hotplugs with the asynchronous fbdev
* configuration, for which we use the fbdev->async_cookie.
*/
- if (INTEL_INFO(dev_priv)->num_pipes)
+ if (HAS_DISPLAY(dev_priv))
drm_kms_helper_poll_init(dev);
intel_power_domains_enable(dev_priv);
@@ -1629,14 +1655,16 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
(struct intel_device_info *)ent->driver_data;
struct intel_device_info *device_info;
struct drm_i915_private *i915;
+ int err;
i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
if (!i915)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) {
+ err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
+ if (err) {
kfree(i915);
- return NULL;
+ return ERR_PTR(err);
}
i915->drm.pdev = pdev;
@@ -1649,8 +1677,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
device_info->device_id = pdev->device;
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
- sizeof(device_info->platform_mask) * BITS_PER_BYTE);
- BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
+ BITS_PER_TYPE(device_info->platform_mask));
+ BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
return i915;
}
@@ -1685,8 +1713,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
dev_priv = i915_driver_create(pdev, ent);
- if (!dev_priv)
- return -ENOMEM;
+ if (IS_ERR(dev_priv))
+ return PTR_ERR(dev_priv);
/* Disable nuclear pageflip by default on pre-ILK */
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
@@ -1710,26 +1738,12 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
- /*
- * TODO: move the vblank init and parts of modeset init steps into one
- * of the i915_driver_init_/i915_driver_register functions according
- * to the role/effect of the given init step.
- */
- if (INTEL_INFO(dev_priv)->num_pipes) {
- ret = drm_vblank_init(&dev_priv->drm,
- INTEL_INFO(dev_priv)->num_pipes);
- if (ret)
- goto out_cleanup_hw;
- }
-
ret = i915_load_modeset_init(&dev_priv->drm);
if (ret < 0)
goto out_cleanup_hw;
i915_driver_register(dev_priv);
- intel_init_ipc(dev_priv);
-
enable_rpm_wakeref_asserts(dev_priv);
i915_welcome_messages(dev_priv);
@@ -1781,7 +1795,6 @@ void i915_driver_unload(struct drm_device *dev)
i915_reset_error_state(dev_priv);
i915_gem_fini(dev_priv);
- intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini_hw(dev_priv);
@@ -1919,9 +1932,7 @@ static int i915_drm_suspend(struct drm_device *dev)
i915_save_state(dev_priv);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
- intel_opregion_notify_adapter(dev_priv, opregion_target_state);
-
- intel_opregion_unregister(dev_priv);
+ intel_opregion_suspend(dev_priv, opregion_target_state);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -1962,7 +1973,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
get_suspend_mode(dev_priv, hibernation));
ret = 0;
- if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
bxt_enable_dc9(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_enable_pc8(dev_priv);
@@ -2040,7 +2051,6 @@ static int i915_drm_resume(struct drm_device *dev)
i915_restore_state(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
- intel_opregion_setup(dev_priv);
intel_init_pch_refclk(dev_priv);
@@ -2082,12 +2092,10 @@ static int i915_drm_resume(struct drm_device *dev)
* */
intel_hpd_init(dev_priv);
- intel_opregion_register(dev_priv);
+ intel_opregion_resume(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
- intel_opregion_notify_adapter(dev_priv, PCI_D0);
-
intel_power_domains_enable(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2155,7 +2163,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_resume_early(dev_priv);
- if (IS_GEN9_LP(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2922,7 +2930,10 @@ static int intel_runtime_suspend(struct device *kdev)
intel_uncore_suspend(dev_priv);
ret = 0;
- if (IS_GEN9_LP(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ icl_display_core_uninit(dev_priv);
+ bxt_enable_dc9(dev_priv);
+ } else if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_uninit(dev_priv);
bxt_enable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3007,7 +3018,18 @@ static int intel_runtime_resume(struct device *kdev)
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
- if (IS_GEN9_LP(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ bxt_disable_dc9(dev_priv);
+ icl_display_core_init(dev_priv, true);
+ if (dev_priv->csr.dmc_payload) {
+ if (dev_priv->csr.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC6)
+ skl_enable_dc6(dev_priv);
+ else if (dev_priv->csr.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC5)
+ gen9_enable_dc5(dev_priv);
+ }
+ } else if (IS_GEN9_LP(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8624b4bdc242..b1c31967194b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,7 +53,9 @@
#include <drm/drm_auth.h>
#include <drm/drm_cache.h>
#include <drm/drm_util.h>
+#include <drm/drm_dsc.h>
+#include "i915_fixed.h"
#include "i915_params.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -67,6 +69,7 @@
#include "intel_ringbuffer.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
+#include "intel_workarounds.h"
#include "intel_uc.h"
#include "i915_gem.h"
@@ -87,8 +90,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20180921"
-#define DRIVER_TIMESTAMP 1537521997
+#define DRIVER_DATE "20181204"
+#define DRIVER_TIMESTAMP 1543944377
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -127,144 +130,6 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
-typedef struct {
- uint32_t val;
-} uint_fixed_16_16_t;
-
-#define FP_16_16_MAX ({ \
- uint_fixed_16_16_t fp; \
- fp.val = UINT_MAX; \
- fp; \
-})
-
-static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
-{
- if (val.val == 0)
- return true;
- return false;
-}
-
-static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
-{
- uint_fixed_16_16_t fp;
-
- WARN_ON(val > U16_MAX);
-
- fp.val = val << 16;
- return fp;
-}
-
-static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
-{
- return DIV_ROUND_UP(fp.val, 1 << 16);
-}
-
-static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
-{
- return fp.val >> 16;
-}
-
-static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
- uint_fixed_16_16_t min2)
-{
- uint_fixed_16_16_t min;
-
- min.val = min(min1.val, min2.val);
- return min;
-}
-
-static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
- uint_fixed_16_16_t max2)
-{
- uint_fixed_16_16_t max;
-
- max.val = max(max1.val, max2.val);
- return max;
-}
-
-static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
-{
- uint_fixed_16_16_t fp;
- WARN_ON(val > U32_MAX);
- fp.val = (uint32_t) val;
- return fp;
-}
-
-static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
- uint_fixed_16_16_t d)
-{
- return DIV_ROUND_UP(val.val, d.val);
-}
-
-static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
- uint_fixed_16_16_t mul)
-{
- uint64_t intermediate_val;
-
- intermediate_val = (uint64_t) val * mul.val;
- intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
- WARN_ON(intermediate_val > U32_MAX);
- return (uint32_t) intermediate_val;
-}
-
-static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
- uint_fixed_16_16_t mul)
-{
- uint64_t intermediate_val;
-
- intermediate_val = (uint64_t) val.val * mul.val;
- intermediate_val = intermediate_val >> 16;
- return clamp_u64_to_fixed16(intermediate_val);
-}
-
-static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
-{
- uint64_t interm_val;
-
- interm_val = (uint64_t)val << 16;
- interm_val = DIV_ROUND_UP_ULL(interm_val, d);
- return clamp_u64_to_fixed16(interm_val);
-}
-
-static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
- uint_fixed_16_16_t d)
-{
- uint64_t interm_val;
-
- interm_val = (uint64_t)val << 16;
- interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
- WARN_ON(interm_val > U32_MAX);
- return (uint32_t) interm_val;
-}
-
-static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
- uint_fixed_16_16_t mul)
-{
- uint64_t intermediate_val;
-
- intermediate_val = (uint64_t) val * mul.val;
- return clamp_u64_to_fixed16(intermediate_val);
-}
-
-static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
- uint_fixed_16_16_t add2)
-{
- uint64_t interm_sum;
-
- interm_sum = (uint64_t) add1.val + add2.val;
- return clamp_u64_to_fixed16(interm_sum);
-}
-
-static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
- uint32_t add2)
-{
- uint64_t interm_sum;
- uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
-
- interm_sum = (uint64_t) add1.val + interm_add2.val;
- return clamp_u64_to_fixed16(interm_sum);
-}
-
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -283,7 +148,8 @@ enum hpd_pin {
#define for_each_hpd_pin(__pin) \
for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
-#define HPD_STORM_DEFAULT_THRESHOLD 5
+/* Threshold == 5 for long IRQs, 50 for short */
+#define HPD_STORM_DEFAULT_THRESHOLD 50
struct i915_hotplug {
struct work_struct hotplug_work;
@@ -308,6 +174,8 @@ struct i915_hotplug {
bool poll_enabled;
unsigned int hpd_storm_threshold;
+ /* Whether or not to count short HPD IRQs in HPD storms */
+ u8 hpd_short_storm_enabled;
/*
* if we get a HPD irq from DP and a HPD irq from non-DP
@@ -465,8 +333,10 @@ struct drm_i915_display_funcs {
struct intel_csr {
struct work_struct work;
const char *fw_path;
+ uint32_t required_version;
+ uint32_t max_fw_size; /* bytes */
uint32_t *dmc_payload;
- uint32_t dmc_fw_size;
+ uint32_t dmc_fw_size; /* dwords */
uint32_t version;
uint32_t mmio_count;
i915_reg_t mmioaddr[8];
@@ -546,6 +416,8 @@ struct intel_fbc {
int adjusted_y;
int y;
+
+ uint16_t pixel_blend_mode;
} plane;
struct {
@@ -624,17 +496,19 @@ struct i915_psr {
bool sink_support;
bool prepared, enabled;
struct intel_dp *dp;
+ enum pipe pipe;
bool active;
struct work_struct work;
unsigned busy_frontbuffer_bits;
bool sink_psr2_support;
bool link_standby;
bool colorimetry_support;
- bool alpm;
bool psr2_enabled;
u8 sink_sync_latency;
ktime_t last_entry_attempt;
ktime_t last_exit;
+ bool sink_not_reliable;
+ bool irq_aux_error;
};
enum intel_pch {
@@ -918,6 +792,11 @@ struct i915_power_well_desc {
/* The pw is backing the VGA functionality */
bool has_vga:1;
bool has_fuses:1;
+ /*
+ * The pw is for an ICL+ TypeC PHY port in
+ * Thunderbolt mode.
+ */
+ bool is_tc_tbt:1;
} hsw;
};
const struct i915_power_well_ops *ops;
@@ -1042,17 +921,6 @@ struct i915_gem_mm {
#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
-#define DP_AUX_A 0x40
-#define DP_AUX_B 0x10
-#define DP_AUX_C 0x20
-#define DP_AUX_D 0x30
-#define DP_AUX_E 0x50
-#define DP_AUX_F 0x60
-
-#define DDC_PIN_B 0x05
-#define DDC_PIN_C 0x04
-#define DDC_PIN_D 0x06
-
struct ddi_vbt_port_info {
int max_tmds_clock;
@@ -1099,6 +967,7 @@ struct intel_vbt_data {
unsigned int panel_type:4;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ enum drm_panel_orientation orientation;
enum drrs_support_type drrs_type;
@@ -1144,6 +1013,7 @@ struct intel_vbt_data {
u8 *data;
const u8 *sequence[MIPI_SEQ_MAX];
u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
+ enum drm_panel_orientation orientation;
} dsi;
int crt_ddc_pin;
@@ -1228,9 +1098,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
}
struct skl_ddb_allocation {
- /* packed/y */
- struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
- struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES];
u8 enabled_slices; /* GEN11 has configurable 2 slices */
};
@@ -1240,9 +1107,9 @@ struct skl_ddb_values {
};
struct skl_wm_level {
- bool plane_en;
uint16_t plane_res_b;
uint8_t plane_res_l;
+ bool plane_en;
};
/* Stores plane specific WM parameters */
@@ -1323,20 +1190,6 @@ struct i915_frontbuffer_tracking {
unsigned flip_bits;
};
-struct i915_wa_reg {
- u32 addr;
- u32 value;
- /* bitmask representing WA bits */
- u32 mask;
-};
-
-#define I915_MAX_WA_REGS 16
-
-struct i915_workarounds {
- struct i915_wa_reg reg[I915_MAX_WA_REGS];
- u32 count;
-};
-
struct i915_virtual_gpu {
bool active;
u32 caps;
@@ -1520,30 +1373,12 @@ struct i915_oa_ops {
bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
/**
- * @init_oa_buffer: Resets the head and tail pointers of the
- * circular buffer for periodic OA reports.
- *
- * Called when first opening a stream for OA metrics, but also may be
- * called in response to an OA buffer overflow or other error
- * condition.
- *
- * Note it may be necessary to clear the full OA buffer here as part of
- * maintaining the invariable that new reports must be written to
- * zeroed memory for us to be able to reliable detect if an expected
- * report has not yet landed in memory. (At least on Haswell the OA
- * buffer tail pointer is not synchronized with reports being visible
- * to the CPU)
- */
- void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
-
- /**
* @enable_metric_set: Selects and applies any MUX configuration to set
* up the Boolean and Custom (B/C) counters that are part of the
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
- int (*enable_metric_set)(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config);
+ int (*enable_metric_set)(struct i915_perf_stream *stream);
/**
* @disable_metric_set: Remove system constraints associated with using
@@ -1554,12 +1389,12 @@ struct i915_oa_ops {
/**
* @oa_enable: Enable periodic sampling
*/
- void (*oa_enable)(struct drm_i915_private *dev_priv);
+ void (*oa_enable)(struct i915_perf_stream *stream);
/**
* @oa_disable: Disable periodic sampling
*/
- void (*oa_disable)(struct drm_i915_private *dev_priv);
+ void (*oa_disable)(struct i915_perf_stream *stream);
/**
* @read: Copy data from the circular OA buffer into a given userspace
@@ -1804,7 +1639,7 @@ struct drm_i915_private {
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
- struct i915_workarounds workarounds;
+ struct i915_wa_list gt_wa_list;
struct i915_frontbuffer_tracking fb_tracking;
@@ -1948,7 +1783,6 @@ struct drm_i915_private {
struct dram_info {
bool valid;
- bool valid_dimm;
bool is_16gb_dimm;
u8 num_channels;
enum dram_rank {
@@ -2149,6 +1983,8 @@ struct drm_i915_private {
struct delayed_work idle_work;
ktime_t last_init_time;
+
+ struct i915_vma *scratch;
} gt;
/* perform PHY state sanity checks? */
@@ -2323,6 +2159,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
(((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
+bool i915_sg_trim(struct sg_table *orig_st);
+
static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
{
unsigned int page_sizes;
@@ -2368,20 +2206,12 @@ intel_info(const struct drm_i915_private *dev_priv)
#define REVID_FOREVER 0xff
#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
-#define GEN_FOREVER (0)
-
#define INTEL_GEN_MASK(s, e) ( \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
- GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
- (s) != GEN_FOREVER ? (s) - 1 : 0) \
-)
+ GENMASK((e) - 1, (s) - 1))
-/*
- * Returns true if Gen is in inclusive range [Start, End].
- *
- * Use GEN_FOREVER for unbound start and or end.
- */
+/* Returns true if Gen is in inclusive range [Start, End] */
#define IS_GEN(dev_priv, s, e) \
(!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
@@ -2462,6 +2292,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
+#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
+ INTEL_DEVID(dev_priv) == 0x87C0)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
(dev_priv)->info.gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
@@ -2593,17 +2425,22 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
-#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
-#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
-#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
+#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt)
+#define HAS_PPGTT(dev_priv) \
+ (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
+#define HAS_FULL_PPGTT(dev_priv) \
+ (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
+#define HAS_FULL_48BIT_PPGTT(dev_priv) \
+ (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL)
+
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
})
-#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
+#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.display.has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
- ((dev_priv)->info.overlay_needs_physical)
+ ((dev_priv)->info.display.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
@@ -2624,31 +2461,31 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
!(IS_I915G(dev_priv) || \
IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv)
-#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
+#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.display.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.display.has_hotplug)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
-#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
+#define HAS_FBC(dev_priv) ((dev_priv)->info.display.has_fbc)
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
-#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst)
+#define HAS_DP_MST(dev_priv) ((dev_priv)->info.display.has_dp_mst)
-#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
+#define HAS_DDI(dev_priv) ((dev_priv)->info.display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
-#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
+#define HAS_PSR(dev_priv) ((dev_priv)->info.display.has_psr)
#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
-#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
+#define HAS_CSR(dev_priv) ((dev_priv)->info.display.has_csr)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
-#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc)
+#define HAS_IPC(dev_priv) ((dev_priv)->info.display.has_ipc)
/*
* For now, anything with a GuC requires uCode loading, and then supports
@@ -2709,7 +2546,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
-#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
+#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display)
#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
@@ -2721,6 +2558,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
+
#include "i915_trace.h"
static inline bool intel_vtd_active(void)
@@ -2743,9 +2582,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
return IS_BROXTON(dev_priv) && intel_vtd_active();
}
-int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
- int enable_ppgtt);
-
/* i915_drv.c */
void __printf(3, 4)
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -3230,7 +3066,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
+#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
int __must_check
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
@@ -3462,6 +3298,7 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port);
bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
enum port port);
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
/* intel_acpi.c */
#ifdef CONFIG_ACPI
@@ -3483,8 +3320,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
extern void intel_modeset_init_hw(struct drm_device *dev);
extern int intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
-extern int intel_connector_register(struct drm_connector *);
-extern void intel_connector_unregister(struct drm_connector *);
extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
bool state);
extern void intel_display_resume(struct drm_device *dev);
@@ -3497,6 +3332,9 @@ extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
bool interactive);
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
+void intel_dsc_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -3584,6 +3422,12 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
+/* intel_combo_phy.c */
+void icl_combo_phys_init(struct drm_i915_private *dev_priv);
+void icl_combo_phys_uninit(struct drm_i915_private *dev_priv);
+void cnl_combo_phys_init(struct drm_i915_private *dev_priv);
+void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv);
+
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
@@ -3871,4 +3715,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
return I915_HWS_CSB_WRITE_INDEX;
}
+static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
+{
+ return i915_ggtt_offset(i915->gt.scratch);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h
new file mode 100644
index 000000000000..591dd89ba7af
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_fixed.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_FIXED_H_
+#define _I915_FIXED_H_
+
+typedef struct {
+ u32 val;
+} uint_fixed_16_16_t;
+
+#define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX })
+
+static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
+{
+ return val.val == 0;
+}
+
+static inline uint_fixed_16_16_t u32_to_fixed16(u32 val)
+{
+ uint_fixed_16_16_t fp = { .val = val << 16 };
+
+ WARN_ON(val > U16_MAX);
+
+ return fp;
+}
+
+static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
+{
+ return DIV_ROUND_UP(fp.val, 1 << 16);
+}
+
+static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp)
+{
+ return fp.val >> 16;
+}
+
+static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
+ uint_fixed_16_16_t min2)
+{
+ uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) };
+
+ return min;
+}
+
+static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
+ uint_fixed_16_16_t max2)
+{
+ uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) };
+
+ return max;
+}
+
+static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val)
+{
+ uint_fixed_16_16_t fp = { .val = (u32)val };
+
+ WARN_ON(val > U32_MAX);
+
+ return fp;
+}
+
+static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val,
+ uint_fixed_16_16_t d)
+{
+ return DIV_ROUND_UP(val.val, d.val);
+}
+
+static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
+{
+ u64 tmp;
+
+ tmp = (u64)val * mul.val;
+ tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
+ WARN_ON(tmp > U32_MAX);
+
+ return (u32)tmp;
+}
+
+static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
+ uint_fixed_16_16_t mul)
+{
+ u64 tmp;
+
+ tmp = (u64)val.val * mul.val;
+ tmp = tmp >> 16;
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d)
+{
+ u64 tmp;
+
+ tmp = (u64)val << 16;
+ tmp = DIV_ROUND_UP_ULL(tmp, d);
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d)
+{
+ u64 tmp;
+
+ tmp = (u64)val << 16;
+ tmp = DIV_ROUND_UP_ULL(tmp, d.val);
+ WARN_ON(tmp > U32_MAX);
+
+ return (u32)tmp;
+}
+
+static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
+{
+ u64 tmp;
+
+ tmp = (u64)val * mul.val;
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
+ uint_fixed_16_16_t add2)
+{
+ u64 tmp;
+
+ tmp = (u64)add1.val + add2.val;
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
+ u32 add2)
+{
+ uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2);
+ u64 tmp;
+
+ tmp = (u64)add1.val + tmp_add2.val;
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+#endif /* _I915_FIXED_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c8aa57ce83b..d36a9755ad91 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1740,6 +1740,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
*/
err = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_PRIORITY |
(write_domain ? I915_WAIT_ALL : 0),
MAX_SCHEDULE_TIMEOUT,
to_rps_client(file));
@@ -2381,11 +2382,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
invalidate_mapping_pages(mapping, 0, (loff_t)-1);
}
+/*
+ * Move pages to appropriate lru and release the pagevec, decrementing the
+ * ref count of those pages.
+ */
+static void check_release_pagevec(struct pagevec *pvec)
+{
+ check_move_unevictable_pages(pvec);
+ __pagevec_release(pvec);
+ cond_resched();
+}
+
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct sgt_iter sgt_iter;
+ struct pagevec pvec;
struct page *page;
__i915_gem_object_release_shmem(obj, pages, true);
@@ -2395,6 +2408,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj, pages);
+ mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
+
+ pagevec_init(&pvec);
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
set_page_dirty(page);
@@ -2402,8 +2418,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
- put_page(page);
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
}
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
obj->mm.dirty = false;
sg_free_table(pages);
@@ -2483,7 +2502,7 @@ unlock:
mutex_unlock(&obj->mm.lock);
}
-static bool i915_sg_trim(struct sg_table *orig_st)
+bool i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
struct scatterlist *sg, *new_sg;
@@ -2524,6 +2543,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment = i915_sg_segment_size();
unsigned int sg_page_sizes;
+ struct pagevec pvec;
gfp_t noreclaim;
int ret;
@@ -2559,6 +2579,7 @@ rebuild_st:
* Fail silently without starting the shrinker
*/
mapping = obj->base.filp->f_mapping;
+ mapping_set_unevictable(mapping);
noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
@@ -2573,6 +2594,7 @@ rebuild_st:
gfp_t gfp = noreclaim;
do {
+ cond_resched();
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (likely(!IS_ERR(page)))
break;
@@ -2583,7 +2605,6 @@ rebuild_st:
}
i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
- cond_resched();
/*
* We've tried hard to allocate the memory by reaping
@@ -2673,8 +2694,14 @@ rebuild_st:
err_sg:
sg_mark_end(sg);
err_pages:
- for_each_sgt_page(page, sgt_iter, st)
- put_page(page);
+ mapping_clear_unevictable(mapping);
+ pagevec_init(&pvec);
+ for_each_sgt_page(page, sgt_iter, st) {
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
+ }
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
sg_free_table(st);
kfree(st);
@@ -3282,16 +3309,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct i915_request *request)
{
- GEM_TRACE("%s fence %llx:%d -> -EIO\n",
- request->engine->name,
- request->fence.context, request->fence.seqno);
- dma_fence_set_error(&request->fence, -EIO);
-
- i915_request_submit(request);
-}
-
-static void nop_complete_submit_request(struct i915_request *request)
-{
unsigned long flags;
GEM_TRACE("%s fence %llx:%d -> -EIO\n",
@@ -3327,57 +3344,33 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, i915, id)
i915_gem_reset_prepare_engine(engine);
- engine->submit_request = nop_submit_request;
- engine->schedule = NULL;
- }
- i915->caps.scheduler = 0;
-
/* Even if the GPU reset fails, it should still stop the engines */
if (INTEL_GEN(i915) >= 5)
intel_gpu_reset(i915, ALL_ENGINES);
- /*
- * Make sure no one is running the old callback before we proceed with
- * cancelling requests and resetting the completion tracking. Otherwise
- * we might submit a request to the hardware which never completes.
- */
- synchronize_rcu();
-
for_each_engine(engine, i915, id) {
- /* Mark all executing requests as skipped */
- engine->cancel_requests(engine);
-
- /*
- * Only once we've force-cancelled all in-flight requests can we
- * start to complete all requests.
- */
- engine->submit_request = nop_complete_submit_request;
+ engine->submit_request = nop_submit_request;
+ engine->schedule = NULL;
}
+ i915->caps.scheduler = 0;
/*
* Make sure no request can slip through without getting completed by
* either this call here to intel_engine_init_global_seqno, or the one
- * in nop_complete_submit_request.
+ * in nop_submit_request.
*/
synchronize_rcu();
- for_each_engine(engine, i915, id) {
- unsigned long flags;
-
- /*
- * Mark all pending requests as complete so that any concurrent
- * (lockless) lookup doesn't try and wait upon the request as we
- * reset it.
- */
- spin_lock_irqsave(&engine->timeline.lock, flags);
- intel_engine_init_global_seqno(engine,
- intel_engine_last_submit(engine));
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ /* Mark all executing requests as skipped */
+ for_each_engine(engine, i915, id)
+ engine->cancel_requests(engine);
+ for_each_engine(engine, i915, id) {
i915_gem_reset_finish_engine(engine);
+ intel_engine_wakeup(engine);
}
out:
@@ -3530,6 +3523,8 @@ static void __sleep_rcu(struct rcu_head *rcu)
struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
struct drm_i915_private *i915 = s->i915;
+ destroy_rcu_head(&s->rcu);
+
if (same_epoch(i915, s->epoch)) {
INIT_WORK(&s->work, __sleep_work);
queue_work(i915->wq, &s->work);
@@ -3646,6 +3641,7 @@ out_rearm:
if (same_epoch(dev_priv, epoch)) {
struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s) {
+ init_rcu_head(&s->rcu);
s->i915 = dev_priv;
s->epoch = epoch;
call_rcu(&s->rcu, __sleep_rcu);
@@ -3743,7 +3739,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
start = ktime_get();
ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_PRIORITY |
+ I915_WAIT_ALL,
to_wait_timeout(args->timeout_ns),
to_rps_client(file));
@@ -4710,6 +4708,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
+ init_rcu_head(&obj->rcu);
+
obj->ops = ops;
reservation_object_init(&obj->__builtin_resv);
@@ -4977,6 +4977,13 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
/*
+ * We reuse obj->rcu for the freed list, so we had better not treat
+ * it like a rcu_head from this point forwards. And we expect all
+ * objects to be freed via this path.
+ */
+ destroy_rcu_head(&obj->rcu);
+
+ /*
* Since we require blocking on struct_mutex to unbind the freed
* object from the GPU before releasing resources back to the
* system, we can not do that directly from the RCU callback (which may
@@ -5293,19 +5300,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
- if (HAS_PCH_NOP(dev_priv)) {
- if (IS_IVYBRIDGE(dev_priv)) {
- u32 temp = I915_READ(GEN7_MSG_CTL);
- temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
- I915_WRITE(GEN7_MSG_CTL, temp);
- } else if (INTEL_GEN(dev_priv) >= 7) {
- u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
- temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
- }
- }
-
- intel_gt_workarounds_apply(dev_priv);
+ /* Apply the GT workarounds... */
+ intel_gt_apply_workarounds(dev_priv);
+ /* ...and determine whether they are sticking. */
+ intel_gt_verify_workarounds(dev_priv, "init");
i915_gem_init_swizzling(dev_priv);
@@ -5500,6 +5498,44 @@ err_active:
goto out_ctx;
}
+static int
+i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (!obj)
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_unref;
+
+ i915->gt.scratch = vma;
+ return 0;
+
+err_unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static void i915_gem_fini_scratch(struct drm_i915_private *i915)
+{
+ i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -5546,12 +5582,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
- ret = i915_gem_contexts_init(dev_priv);
+ ret = i915_gem_init_scratch(dev_priv,
+ IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_ggtt;
}
+ ret = i915_gem_contexts_init(dev_priv);
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_scratch;
+ }
+
ret = intel_engines_init(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
@@ -5624,6 +5667,8 @@ err_pm:
err_context:
if (ret != -EIO)
i915_gem_contexts_fini(dev_priv);
+err_scratch:
+ i915_gem_fini_scratch(dev_priv);
err_ggtt:
err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5675,8 +5720,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
+ i915_gem_fini_scratch(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_wa_list_free(&dev_priv->gt_wa_list);
+
intel_cleanup_gt_powersave(dev_priv);
intel_uc_fini_misc(dev_priv);
@@ -5951,7 +5999,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
* the bits.
*/
BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
- sizeof(atomic_t) * BITS_PER_BYTE);
+ BITS_PER_TYPE(atomic_t));
if (old) {
WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 599c4f6eb1ea..b0e4b976880c 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -47,17 +47,19 @@ struct drm_i915_private;
#define GEM_DEBUG_DECL(var) var
#define GEM_DEBUG_EXEC(expr) expr
#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
+#define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr)
#else
#define GEM_SHOW_DEBUG() (0)
#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
-#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0)
+#define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); })
#define GEM_DEBUG_DECL(var)
#define GEM_DEBUG_EXEC(expr) do { } while (0)
#define GEM_DEBUG_BUG_ON(expr)
+#define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; })
#endif
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f772593b99ab..371c07087095 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
- ctx->sched.priority = I915_PRIORITY_NORMAL;
+ ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
@@ -414,7 +414,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
if (IS_ERR(ctx))
return ctx;
- if (USES_FULL_PPGTT(dev_priv)) {
+ if (HAS_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
ppgtt = i915_ppgtt_create(dev_priv, file_priv);
@@ -457,7 +457,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
if (ret)
return ERR_PTR(ret);
- ctx = __create_hw_context(to_i915(dev), NULL);
+ ctx = i915_gem_create_context(to_i915(dev), NULL);
if (IS_ERR(ctx))
goto out;
@@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
}
i915_gem_context_clear_bannable(ctx);
- ctx->sched.priority = prio;
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
ctx->ring_size = PAGE_SIZE;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -535,16 +535,12 @@ static bool needs_preempt_context(struct drm_i915_private *i915)
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
- int ret;
/* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
- ret = intel_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
-
+ intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
init_contexts(dev_priv);
/* lowest priority; idle task */
@@ -879,7 +875,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = i915_gem_context_is_bannable(ctx);
break;
case I915_CONTEXT_PARAM_PRIORITY:
- args->value = ctx->sched.priority;
+ args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
break;
default:
ret = -EINVAL;
@@ -948,7 +944,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
!capable(CAP_SYS_NICE))
ret = -EPERM;
else
- ctx->sched.priority = priority;
+ ctx->sched.priority =
+ I915_USER_PRIORITY(priority);
}
break;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 08165f6a0a84..f6d870b1f73e 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -163,6 +163,7 @@ struct i915_gem_context {
/** engine: per-engine logical HW state */
struct intel_context {
struct i915_gem_context *gem_context;
+ struct intel_engine_cs *active;
struct i915_vma *state;
struct intel_ring *ring;
u32 *lrc_reg_state;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 09187286d346..10a4afb4f235 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -460,7 +460,7 @@ eb_validate_vma(struct i915_execbuffer *eb,
* any non-page-aligned or non-canonical addresses.
*/
if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
- entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
+ entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
return -EINVAL;
/* pad_to_size was once a reserved field, so sanitize it */
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
else if (gen >= 4)
len = 4;
else
- len = 3;
+ len = 6;
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
@@ -1309,6 +1309,11 @@ relocate_entry(struct i915_vma *vma,
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
*batch++ = target_offset;
+
+ /* And again for good measure (blb/pnv) */
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *batch++ = addr;
+ *batch++ = target_offset;
}
goto out;
@@ -2186,7 +2191,7 @@ signal_fence_array(struct i915_execbuffer *eb,
if (!(flags & I915_EXEC_FENCE_SIGNAL))
continue;
- drm_syncobj_replace_fence(syncobj, 0, fence);
+ drm_syncobj_replace_fence(syncobj, fence);
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 56c7f8637311..add1fe7aeb93 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -133,55 +133,6 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
i915->ggtt.invalidate(i915);
}
-int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
- int enable_ppgtt)
-{
- bool has_full_ppgtt;
- bool has_full_48bit_ppgtt;
-
- if (!dev_priv->info.has_aliasing_ppgtt)
- return 0;
-
- has_full_ppgtt = dev_priv->info.has_full_ppgtt;
- has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
-
- if (intel_vgpu_active(dev_priv)) {
- /* GVT-g has no support for 32bit ppgtt */
- has_full_ppgtt = false;
- has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
- }
-
- /*
- * We don't allow disabling PPGTT for gen9+ as it's a requirement for
- * execlists, the sole mechanism available to submit work.
- */
- if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
- return 0;
-
- if (enable_ppgtt == 1)
- return 1;
-
- if (enable_ppgtt == 2 && has_full_ppgtt)
- return 2;
-
- if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
- return 3;
-
- /* Disable ppgtt on SNB if VT-d is on. */
- if (IS_GEN6(dev_priv) && intel_vtd_active()) {
- DRM_INFO("Disabling PPGTT because VT-d is on\n");
- return 0;
- }
-
- if (has_full_48bit_ppgtt)
- return 3;
-
- if (has_full_ppgtt)
- return 2;
-
- return 1;
-}
-
static int ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
@@ -235,9 +186,9 @@ static void clear_pages(struct i915_vma *vma)
memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
}
-static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
+static u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
@@ -274,9 +225,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
#define gen8_pdpe_encode gen8_pde_encode
#define gen8_pml4e_encode gen8_pde_encode
-static gen6_pte_t snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
+static u64 snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -296,9 +247,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
+static u64 ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -320,9 +271,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_pte_t byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
+static u64 byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -336,9 +287,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
+static u64 hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -349,9 +300,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_pte_t iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
+static u64 iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -629,10 +580,9 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* region, including any PTEs which happen to point to scratch.
*
* This is only relevant for the 48b PPGTT where we support
- * huge-gtt-pages, see also i915_vma_insert().
- *
- * TODO: we should really consider write-protecting the scratch-page and
- * sharing between ppgtt
+ * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
+ * scratch (read-only) between all vm, we create one 64k scratch page
+ * for all.
*/
size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_48bit(vm) &&
@@ -715,14 +665,13 @@ static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
static void gen8_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
- fill_px(vm, pt,
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+ fill_px(vm, pt, vm->scratch_pte);
}
-static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
+static void gen6_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
- fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
+ fill32_px(vm, pt, vm->scratch_pte);
}
static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
@@ -856,15 +805,13 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
/* Removes entries from a single page table, releasing it if it's empty.
* Caller can use the return value to update higher-level entries.
*/
-static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
struct i915_page_table *pt,
u64 start, u64 length)
{
unsigned int num_entries = gen8_pte_count(start, length);
unsigned int pte = gen8_pte_index(start);
unsigned int pte_end = pte + num_entries;
- const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t *vaddr;
GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -875,7 +822,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
vaddr = kmap_atomic_px(pt);
while (pte < pte_end)
- vaddr[pte++] = scratch_pte;
+ vaddr[pte++] = vm->scratch_pte;
kunmap_atomic(vaddr);
return false;
@@ -1208,7 +1155,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
u16 i;
- encode = pte_encode | vma->vm->scratch_page.daddr;
+ encode = vma->vm->scratch_pte;
vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
for (i = 1; i < index; i += 16)
@@ -1261,10 +1208,35 @@ static int gen8_init_scratch(struct i915_address_space *vm)
{
int ret;
+ /*
+ * If everybody agrees to not to write into the scratch page,
+ * we can reuse it for all vm, keeping contexts and processes separate.
+ */
+ if (vm->has_read_only &&
+ vm->i915->kernel_context &&
+ vm->i915->kernel_context->ppgtt) {
+ struct i915_address_space *clone =
+ &vm->i915->kernel_context->ppgtt->vm;
+
+ GEM_BUG_ON(!clone->has_read_only);
+
+ vm->scratch_page.order = clone->scratch_page.order;
+ vm->scratch_pte = clone->scratch_pte;
+ vm->scratch_pt = clone->scratch_pt;
+ vm->scratch_pd = clone->scratch_pd;
+ vm->scratch_pdp = clone->scratch_pdp;
+ return 0;
+ }
+
ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
+ vm->scratch_pte =
+ gen8_pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_LLC,
+ PTE_READ_ONLY);
+
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
ret = PTR_ERR(vm->scratch_pt);
@@ -1336,6 +1308,9 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
static void gen8_free_scratch(struct i915_address_space *vm)
{
+ if (!vm->scratch_page.daddr)
+ return;
+
if (use_4lvl(vm))
free_pdp(vm, vm->scratch_pdp);
free_pd(vm, vm->scratch_pd);
@@ -1573,8 +1548,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct i915_address_space *vm = &ppgtt->vm;
- const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ const gen8_pte_t scratch_pte = vm->scratch_pte;
u64 start = 0, length = ppgtt->vm.total;
if (use_4lvl(vm)) {
@@ -1647,16 +1621,12 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = &i915->drm.pdev->dev;
- ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
+ ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ?
1ULL << 48 :
1ULL << 32;
- /*
- * From bdw, there is support for read-only pages in the PPGTT.
- *
- * XXX GVT is not honouring the lack of RW in the PTE bits.
- */
- ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
+ /* From bdw, there is support for read-only pages in the PPGTT. */
+ ppgtt->vm.has_read_only = true;
i915_address_space_init(&ppgtt->vm, i915);
@@ -1721,7 +1691,7 @@ err_free:
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
- const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+ const gen6_pte_t scratch_pte = base->vm.scratch_pte;
struct i915_page_table *pt;
u32 pte, pde;
@@ -1757,7 +1727,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
if (i == 4)
continue;
- seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
+ seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
pde, pte,
(pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
for (i = 0; i < 4; i++) {
@@ -1782,19 +1752,6 @@ static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
ppgtt->pd_addr + pde);
}
-static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id) {
- u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
- GEN8_GFX_PPGTT_48B : 0;
- I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
- }
-}
-
static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
@@ -1834,7 +1791,8 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
/* PPGTT support for Sandybdrige/Gen6 and later */
@@ -1846,7 +1804,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+ const gen6_pte_t scratch_pte = vm->scratch_pte;
while (num_entries) {
struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
@@ -1937,7 +1895,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
if (IS_ERR(pt))
goto unwind_out;
- gen6_initialize_pt(ppgtt, pt);
+ gen6_initialize_pt(vm, pt);
ppgtt->base.pd.page_table[pde] = pt;
if (i915_vma_is_bound(ppgtt->vma,
@@ -1975,9 +1933,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
if (ret)
return ret;
- ppgtt->scratch_pte =
- vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_NONE, PTE_READ_ONLY);
+ vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_NONE,
+ PTE_READ_ONLY);
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
@@ -1985,7 +1943,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
return PTR_ERR(vm->scratch_pt);
}
- gen6_initialize_pt(ppgtt, vm->scratch_pt);
+ gen6_initialize_pt(vm, vm->scratch_pt);
gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
@@ -2237,23 +2195,10 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
{
gtt_write_workarounds(dev_priv);
- /* In the case of execlists, PPGTT is enabled by the context descriptor
- * and the PDPs are contained within the context itself. We don't
- * need to do anything here. */
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
- return 0;
-
- if (!USES_PPGTT(dev_priv))
- return 0;
-
if (IS_GEN6(dev_priv))
gen6_ppgtt_enable(dev_priv);
else if (IS_GEN7(dev_priv))
gen7_ppgtt_enable(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 8)
- gen8_ppgtt_enable(dev_priv);
- else
- MISSING_CASE(INTEL_GEN(dev_priv));
return 0;
}
@@ -2543,8 +2488,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start / I915_GTT_PAGE_SIZE;
unsigned num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ const gen8_pte_t scratch_pte = vm->scratch_pte;
gen8_pte_t __iomem *gtt_base =
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2669,8 +2613,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, 0);
+ scratch_pte = vm->scratch_pte;
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
@@ -2952,7 +2895,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
/* And finally clear the reserved guard page */
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
- if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
+ if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
ret = i915_gem_init_aliasing_ppgtt(dev_priv);
if (ret)
goto err;
@@ -3076,6 +3019,10 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return ret;
}
+ ggtt->vm.scratch_pte =
+ ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
+ I915_CACHE_NONE, 0);
+
return 0;
}
@@ -3275,7 +3222,7 @@ static void bdw_setup_private_ppat(struct intel_ppat *ppat)
ppat->match = bdw_private_pat_match;
ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
- if (!USES_PPGTT(ppat->i915)) {
+ if (!HAS_PPGTT(ppat->i915)) {
/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
@@ -3402,7 +3349,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->vm.insert_page = gen8_ggtt_insert_page;
ggtt->vm.clear_range = nop_clear_range;
- if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
+ if (intel_scanout_needs_vtd_wa(dev_priv))
ggtt->vm.clear_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
@@ -3413,6 +3360,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
if (ggtt->vm.clear_range != nop_clear_range)
ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+
+ /* Prevent recursively calling stop_machine() and deadlocks. */
+ dev_info(dev_priv->drm.dev,
+ "Disabling error capture for VT-d workaround\n");
+ i915_disable_error_state(dev_priv, -ENODEV);
}
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -3422,6 +3374,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
ggtt->vm.vma_ops.clear_pages = clear_pages;
+ ggtt->vm.pte_encode = gen8_pte_encode;
+
setup_private_pat(dev_priv);
return ggtt_probe_common(ggtt, size);
@@ -3609,7 +3563,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
/* Only VLV supports read-only GGTT mappings */
ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
- if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
+ if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3711,7 +3665,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
}
static struct scatterlist *
-rotate_pages(const dma_addr_t *in, unsigned int offset,
+rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
unsigned int width, unsigned int height,
unsigned int stride,
struct sg_table *st, struct scatterlist *sg)
@@ -3720,7 +3674,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
unsigned int src_idx;
for (column = 0; column < width; column++) {
- src_idx = stride * (height - 1) + column;
+ src_idx = stride * (height - 1) + column + offset;
for (row = 0; row < height; row++) {
st->nents++;
/* We don't need the pages, but need to initialize
@@ -3728,7 +3682,8 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
* The only thing we need are DMA addresses.
*/
sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
- sg_dma_address(sg) = in[offset + src_idx];
+ sg_dma_address(sg) =
+ i915_gem_object_get_dma_address(obj, src_idx);
sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
sg = sg_next(sg);
src_idx -= stride;
@@ -3742,22 +3697,11 @@ static noinline struct sg_table *
intel_rotate_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
- const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
unsigned int size = intel_rotation_info_size(rot_info);
- struct sgt_iter sgt_iter;
- dma_addr_t dma_addr;
- unsigned long i;
- dma_addr_t *page_addr_list;
struct sg_table *st;
struct scatterlist *sg;
int ret = -ENOMEM;
-
- /* Allocate a temporary list of source pages for random access. */
- page_addr_list = kvmalloc_array(n_pages,
- sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!page_addr_list)
- return ERR_PTR(ret);
+ int i;
/* Allocate target SG list. */
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -3768,29 +3712,20 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
if (ret)
goto err_sg_alloc;
- /* Populate source page list from the object. */
- i = 0;
- for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
- page_addr_list[i++] = dma_addr;
-
- GEM_BUG_ON(i != n_pages);
st->nents = 0;
sg = st->sgl;
for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
- sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
+ sg = rotate_pages(obj, rot_info->plane[i].offset,
rot_info->plane[i].width, rot_info->plane[i].height,
rot_info->plane[i].stride, st, sg);
}
- kvfree(page_addr_list);
-
return st;
err_sg_alloc:
kfree(st);
err_st_alloc:
- kvfree(page_addr_list);
DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
@@ -3835,6 +3770,8 @@ intel_partial_pages(const struct i915_ggtt_view *view,
count -= len >> PAGE_SHIFT;
if (count == 0) {
sg_mark_end(sg);
+ i915_sg_trim(st); /* Drop any unused tail entries. */
+
return st;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 7e2af5f4f39b..4874da09a3c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -42,13 +42,15 @@
#include "i915_selftest.h"
#include "i915_timeline.h"
-#define I915_GTT_PAGE_SIZE_4K BIT(12)
-#define I915_GTT_PAGE_SIZE_64K BIT(16)
-#define I915_GTT_PAGE_SIZE_2M BIT(21)
+#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
+#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
+#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
+#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
+
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
#define I915_FENCE_REG_NONE -1
@@ -287,6 +289,7 @@ struct i915_address_space {
struct mutex mutex; /* protects vma and our lists */
+ u64 scratch_pte;
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@@ -333,12 +336,11 @@ struct i915_address_space {
/* Some systems support read-only mappings for GGTT and/or PPGTT */
bool has_read_only:1;
- /* FIXME: Need a more generic return type */
- gen6_pte_t (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags); /* Create a valid PTE */
- /* flags for pte_encode */
+ u64 (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags); /* Create a valid PTE */
#define PTE_READ_ONLY (1<<0)
+
int (*allocate_va_range)(struct i915_address_space *vm,
u64 start, u64 length);
void (*clear_range)(struct i915_address_space *vm,
@@ -420,7 +422,6 @@ struct gen6_hw_ppgtt {
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
- gen6_pte_t scratch_pte;
unsigned int pin_count;
bool scan_for_unused_pt;
@@ -659,20 +660,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
u64 start, u64 end, unsigned int flags);
/* Flags used by pin/bind&friends. */
-#define PIN_NONBLOCK BIT(0)
-#define PIN_MAPPABLE BIT(1)
-#define PIN_ZONE_4G BIT(2)
-#define PIN_NONFAULT BIT(3)
-#define PIN_NOEVICT BIT(4)
-
-#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */
-#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */
-#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */
-#define PIN_UPDATE BIT(8)
-
-#define PIN_HIGH BIT(9)
-#define PIN_OFFSET_BIAS BIT(10)
-#define PIN_OFFSET_FIXED BIT(11)
+#define PIN_NONBLOCK BIT_ULL(0)
+#define PIN_MAPPABLE BIT_ULL(1)
+#define PIN_ZONE_4G BIT_ULL(2)
+#define PIN_NONFAULT BIT_ULL(3)
+#define PIN_NOEVICT BIT_ULL(4)
+
+#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */
+#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */
+#define PIN_UPDATE BIT_ULL(8)
+
+#define PIN_HIGH BIT_ULL(9)
+#define PIN_OFFSET_BIAS BIT_ULL(10)
+#define PIN_OFFSET_FIXED BIT_ULL(11)
#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8762d17b6659..07465123c166 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -27,11 +27,14 @@
*
*/
-#include <generated/utsrelease.h>
+#include <linux/ascii85.h>
+#include <linux/nmi.h>
+#include <linux/scatterlist.h>
#include <linux/stop_machine.h>
+#include <linux/utsname.h>
#include <linux/zlib.h>
+
#include <drm/drm_print.h>
-#include <linux/ascii85.h>
#include "i915_gpu_error.h"
#include "i915_drv.h"
@@ -77,112 +80,110 @@ static const char *purgeable_flag(int purgeable)
return purgeable ? " purgeable" : "";
}
-static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+static void __sg_set_buf(struct scatterlist *sg,
+ void *addr, unsigned int len, loff_t it)
{
-
- if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
- e->err = -ENOSPC;
- return false;
- }
-
- if (e->bytes == e->size - 1 || e->err)
- return false;
-
- return true;
+ sg->page_link = (unsigned long)virt_to_page(addr);
+ sg->offset = offset_in_page(addr);
+ sg->length = len;
+ sg->dma_address = it;
}
-static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
- unsigned len)
+static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
{
- if (e->pos + len <= e->start) {
- e->pos += len;
+ if (!len)
return false;
- }
- /* First vsnprintf needs to fit in its entirety for memmove */
- if (len >= e->size) {
- e->err = -EIO;
- return false;
- }
+ if (e->bytes + len + 1 <= e->size)
+ return true;
- return true;
-}
+ if (e->bytes) {
+ __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
+ e->iter += e->bytes;
+ e->buf = NULL;
+ e->bytes = 0;
+ }
-static void __i915_error_advance(struct drm_i915_error_state_buf *e,
- unsigned len)
-{
- /* If this is first printf in this window, adjust it so that
- * start position matches start of the buffer
- */
+ if (e->cur == e->end) {
+ struct scatterlist *sgl;
- if (e->pos < e->start) {
- const size_t off = e->start - e->pos;
+ sgl = (typeof(sgl))__get_free_page(GFP_KERNEL);
+ if (!sgl) {
+ e->err = -ENOMEM;
+ return false;
+ }
- /* Should not happen but be paranoid */
- if (off > len || e->bytes) {
- e->err = -EIO;
- return;
+ if (e->cur) {
+ e->cur->offset = 0;
+ e->cur->length = 0;
+ e->cur->page_link =
+ (unsigned long)sgl | SG_CHAIN;
+ } else {
+ e->sgl = sgl;
}
- memmove(e->buf, e->buf + off, len - off);
- e->bytes = len - off;
- e->pos = e->start;
- return;
+ e->cur = sgl;
+ e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
}
- e->bytes += len;
- e->pos += len;
+ e->size = ALIGN(len + 1, SZ_64K);
+ e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!e->buf) {
+ e->size = PAGE_ALIGN(len + 1);
+ e->buf = kmalloc(e->size, GFP_KERNEL);
+ }
+ if (!e->buf) {
+ e->err = -ENOMEM;
+ return false;
+ }
+
+ return true;
}
__printf(2, 0)
static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
- const char *f, va_list args)
+ const char *fmt, va_list args)
{
- unsigned len;
+ va_list ap;
+ int len;
- if (!__i915_error_ok(e))
+ if (e->err)
return;
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- va_list tmp;
-
- va_copy(tmp, args);
- len = vsnprintf(NULL, 0, f, tmp);
- va_end(tmp);
-
- if (!__i915_error_seek(e, len))
- return;
+ va_copy(ap, args);
+ len = vsnprintf(NULL, 0, fmt, ap);
+ va_end(ap);
+ if (len <= 0) {
+ e->err = len;
+ return;
}
- len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
+ if (!__i915_error_grow(e, len))
+ return;
- __i915_error_advance(e, len);
+ GEM_BUG_ON(e->bytes >= e->size);
+ len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
+ if (len < 0) {
+ e->err = len;
+ return;
+ }
+ e->bytes += len;
}
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
- const char *str)
+static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
{
unsigned len;
- if (!__i915_error_ok(e))
+ if (e->err || !str)
return;
len = strlen(str);
+ if (!__i915_error_grow(e, len))
+ return;
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- if (!__i915_error_seek(e, len))
- return;
- }
-
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
+ GEM_BUG_ON(e->bytes + len > e->size);
memcpy(e->buf + e->bytes, str, len);
-
- __i915_error_advance(e, len);
+ e->bytes += len;
}
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
@@ -268,6 +269,8 @@ static int compress_page(struct compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
+
+ touch_nmi_watchdog();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@@ -512,7 +515,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " SYNC_2: 0x%08x\n",
ee->semaphore_mboxes[2]);
}
- if (USES_PPGTT(m->i915)) {
+ if (HAS_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
if (INTEL_GEN(m->i915) >= 8) {
@@ -635,22 +638,33 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
}
-int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
- const struct i915_gpu_state *error)
+static void err_free_sgl(struct scatterlist *sgl)
+{
+ while (sgl) {
+ struct scatterlist *sg;
+
+ for (sg = sgl; !sg_is_chain(sg); sg++) {
+ kfree(sg_virt(sg));
+ if (sg_is_last(sg))
+ break;
+ }
+
+ sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
+ free_page((unsigned long)sgl);
+ sgl = sg;
+ }
+}
+
+static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
+ struct i915_gpu_state *error)
{
- struct drm_i915_private *dev_priv = m->i915;
struct drm_i915_error_object *obj;
struct timespec64 ts;
int i, j;
- if (!error) {
- err_printf(m, "No error state collected\n");
- return 0;
- }
-
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
- err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ err_printf(m, "Kernel: %s\n", init_utsname()->release);
ts = ktime_to_timespec64(error->time);
err_printf(m, "Time: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -680,12 +694,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
- err_print_pciid(m, error->i915);
+ err_print_pciid(m, m->i915);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- if (HAS_CSR(dev_priv)) {
- struct intel_csr *csr = &dev_priv->csr;
+ if (HAS_CSR(m->i915)) {
+ struct intel_csr *csr = &m->i915->csr;
err_printf(m, "DMC loaded: %s\n",
yesno(csr->dmc_payload != NULL));
@@ -705,22 +719,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
err_printf(m, "CCID: 0x%08x\n", error->ccid);
- err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
+ err_printf(m, "Missed interrupts: 0x%08lx\n",
+ m->i915->gpu_error.missed_irq_rings);
for (i = 0; i < error->nfence; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
- if (INTEL_GEN(dev_priv) >= 8)
+ if (INTEL_GEN(m->i915) >= 8)
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
error->fault_data1, error->fault_data0);
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (IS_GEN7(dev_priv))
+ if (IS_GEN7(m->i915))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -742,7 +757,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
len += scnprintf(buf + len, sizeof(buf), "%s%s",
first ? "" : ", ",
- dev_priv->engine[j]->name);
+ m->i915->engine[j]->name);
first = 0;
}
scnprintf(buf + len, sizeof(buf), ")");
@@ -760,7 +775,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
obj = ee->batchbuffer;
if (obj) {
- err_puts(m, dev_priv->engine[i]->name);
+ err_puts(m, m->i915->engine[i]->name);
if (ee->context.pid)
err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
ee->context.comm,
@@ -772,16 +787,16 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
- print_error_obj(m, dev_priv->engine[i], NULL, obj);
+ print_error_obj(m, m->i915->engine[i], NULL, obj);
}
for (j = 0; j < ee->user_bo_count; j++)
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"user", ee->user_bo[j]);
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
- dev_priv->engine[i]->name,
+ m->i915->engine[i]->name,
ee->num_requests);
for (j = 0; j < ee->num_requests; j++)
error_print_request(m, " ",
@@ -791,10 +806,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (IS_ERR(ee->waiters)) {
err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
- dev_priv->engine[i]->name);
+ m->i915->engine[i]->name);
} else if (ee->num_waiters) {
err_printf(m, "%s --- %d waiters\n",
- dev_priv->engine[i]->name,
+ m->i915->engine[i]->name,
ee->num_waiters);
for (j = 0; j < ee->num_waiters; j++) {
err_printf(m, " seqno 0x%08x for %s [%d]\n",
@@ -804,22 +819,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"ringbuffer", ee->ringbuffer);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"HW Status", ee->hws_page);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"HW context", ee->ctx);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"WA context", ee->wa_ctx);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"WA batchbuffer", ee->wa_batchbuffer);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"NULL context", ee->default_state);
}
@@ -832,43 +847,107 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_print_capabilities(m, &error->device_info, &error->driver_caps);
err_print_params(m, &error->params);
err_print_uc(m, &error->uc);
+}
+
+static int err_print_to_sgl(struct i915_gpu_state *error)
+{
+ struct drm_i915_error_state_buf m;
+
+ if (IS_ERR(error))
+ return PTR_ERR(error);
+
+ if (READ_ONCE(error->sgl))
+ return 0;
+
+ memset(&m, 0, sizeof(m));
+ m.i915 = error->i915;
+
+ __err_print_to_sgl(&m, error);
- if (m->bytes == 0 && m->err)
- return m->err;
+ if (m.buf) {
+ __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
+ m.bytes = 0;
+ m.buf = NULL;
+ }
+ if (m.cur) {
+ GEM_BUG_ON(m.end < m.cur);
+ sg_mark_end(m.cur - 1);
+ }
+ GEM_BUG_ON(m.sgl && !m.cur);
+
+ if (m.err) {
+ err_free_sgl(m.sgl);
+ return m.err;
+ }
+
+ if (cmpxchg(&error->sgl, NULL, m.sgl))
+ err_free_sgl(m.sgl);
return 0;
}
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
- struct drm_i915_private *i915,
- size_t count, loff_t pos)
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+ char *buf, loff_t off, size_t rem)
{
- memset(ebuf, 0, sizeof(*ebuf));
- ebuf->i915 = i915;
+ struct scatterlist *sg;
+ size_t count;
+ loff_t pos;
+ int err;
- /* We need to have enough room to store any i915_error_state printf
- * so that we can move it to start position.
- */
- ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
- ebuf->buf = kmalloc(ebuf->size,
- GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+ if (!error || !rem)
+ return 0;
- if (ebuf->buf == NULL) {
- ebuf->size = PAGE_SIZE;
- ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
- }
+ err = err_print_to_sgl(error);
+ if (err)
+ return err;
- if (ebuf->buf == NULL) {
- ebuf->size = 128;
- ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
- }
+ sg = READ_ONCE(error->fit);
+ if (!sg || off < sg->dma_address)
+ sg = error->sgl;
+ if (!sg)
+ return 0;
- if (ebuf->buf == NULL)
- return -ENOMEM;
+ pos = sg->dma_address;
+ count = 0;
+ do {
+ size_t len, start;
- ebuf->start = pos;
+ if (sg_is_chain(sg)) {
+ sg = sg_chain_ptr(sg);
+ GEM_BUG_ON(sg_is_chain(sg));
+ }
- return 0;
+ len = sg->length;
+ if (pos + len <= off) {
+ pos += len;
+ continue;
+ }
+
+ start = sg->offset;
+ if (pos < off) {
+ GEM_BUG_ON(off - pos > len);
+ len -= off - pos;
+ start += off - pos;
+ pos = off;
+ }
+
+ len = min(len, rem);
+ GEM_BUG_ON(!len || len > sg->length);
+
+ memcpy(buf, page_address(sg_page(sg)) + start, len);
+
+ count += len;
+ pos += len;
+
+ buf += len;
+ rem -= len;
+ if (!rem) {
+ WRITE_ONCE(error->fit, sg);
+ break;
+ }
+ } while (!sg_is_last(sg++));
+
+ return count;
}
static void i915_error_object_free(struct drm_i915_error_object *obj)
@@ -941,6 +1020,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
cleanup_params(error);
cleanup_uc_state(error);
+ err_free_sgl(error->sgl);
kfree(error);
}
@@ -999,7 +1079,6 @@ i915_error_object_create(struct drm_i915_private *i915,
}
compress_fini(&compress, dst);
- ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst;
}
@@ -1268,7 +1347,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
engine);
- if (USES_PPGTT(dev_priv)) {
+ if (HAS_PPGTT(dev_priv)) {
int i;
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
@@ -1492,7 +1571,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
if (HAS_BROKEN_CS_TLB(i915))
ee->wa_batchbuffer =
i915_error_object_create(i915,
- engine->scratch);
+ i915->gt.scratch);
request_record_user_bo(request, ee);
ee->ctx =
@@ -1785,6 +1864,14 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
return epoch;
}
+static void capture_finish(struct i915_gpu_state *error)
+{
+ struct i915_ggtt *ggtt = &error->i915->ggtt;
+ const u64 slot = ggtt->error_capture.start;
+
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+}
+
static int capture(void *data)
{
struct i915_gpu_state *error = data;
@@ -1809,6 +1896,7 @@ static int capture(void *data)
error->epoch = capture_find_epoch(error);
+ capture_finish(error);
return 0;
}
@@ -1859,6 +1947,7 @@ void i915_capture_error_state(struct drm_i915_private *i915,
error = i915_capture_gpu_state(i915);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+ i915_disable_error_state(i915, -ENOMEM);
return;
}
@@ -1914,5 +2003,14 @@ void i915_reset_error_state(struct drm_i915_private *i915)
i915->gpu_error.first_error = NULL;
spin_unlock_irq(&i915->gpu_error.lock);
- i915_gpu_state_put(error);
+ if (!IS_ERR(error))
+ i915_gpu_state_put(error);
+}
+
+void i915_disable_error_state(struct drm_i915_private *i915, int err)
+{
+ spin_lock_irq(&i915->gpu_error.lock);
+ if (!i915->gpu_error.first_error)
+ i915->gpu_error.first_error = ERR_PTR(err);
+ spin_unlock_irq(&i915->gpu_error.lock);
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 8710fb18ed74..ff2652bbb0b0 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -192,6 +192,8 @@ struct i915_gpu_state {
} *active_bo[I915_NUM_ENGINES], *pinned_bo;
u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
struct i915_address_space *active_vm[I915_NUM_ENGINES];
+
+ struct scatterlist *sgl, *fit;
};
struct i915_gpu_error {
@@ -298,29 +300,20 @@ struct i915_gpu_error {
struct drm_i915_error_state_buf {
struct drm_i915_private *i915;
- unsigned int bytes;
- unsigned int size;
+ struct scatterlist *sgl, *cur, *end;
+
+ char *buf;
+ size_t bytes;
+ size_t size;
+ loff_t iter;
+
int err;
- u8 *buf;
- loff_t start;
- loff_t pos;
};
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
- const struct i915_gpu_state *gpu);
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
- struct drm_i915_private *i915,
- size_t count, loff_t pos);
-
-static inline void
-i915_error_state_buf_release(struct drm_i915_error_state_buf *eb)
-{
- kfree(eb->buf);
-}
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
void i915_capture_error_state(struct drm_i915_private *dev_priv,
@@ -334,6 +327,9 @@ i915_gpu_state_get(struct i915_gpu_state *gpu)
return gpu;
}
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+ char *buf, loff_t offset, size_t count);
+
void __i915_gpu_state_free(struct kref *kref);
static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
{
@@ -343,6 +339,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
void i915_reset_error_state(struct drm_i915_private *i915);
+void i915_disable_error_state(struct drm_i915_private *i915, int err);
#else
@@ -355,13 +352,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
static inline struct i915_gpu_state *
i915_first_error_state(struct drm_i915_private *i915)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline void i915_reset_error_state(struct drm_i915_private *i915)
{
}
+static inline void i915_disable_error_state(struct drm_i915_private *i915,
+ int err)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
#endif /* _I915_GPU_ERROR_H_ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2e242270e270..d447d7d508f4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2887,21 +2887,39 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
return ret;
}
+static inline u32 gen8_master_intr_disable(void __iomem * const regs)
+{
+ raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
+
+ /*
+ * Now with master disabled, get a sample of level indications
+ * for this interrupt. Indications will be cleared on related acks.
+ * New indications can and will light up during processing,
+ * and will generate new interrupt after enabling master.
+ */
+ return raw_reg_read(regs, GEN8_MASTER_IRQ);
+}
+
+static inline void gen8_master_intr_enable(void __iomem * const regs)
+{
+ raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+}
+
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = to_i915(arg);
+ void __iomem * const regs = dev_priv->regs;
u32 master_ctl;
u32 gt_iir[4];
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
- master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
- master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
- if (!master_ctl)
+ master_ctl = gen8_master_intr_disable(regs);
+ if (!master_ctl) {
+ gen8_master_intr_enable(regs);
return IRQ_NONE;
-
- I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
+ }
/* Find, clear, then process each source of interrupt */
gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
@@ -2913,7 +2931,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(dev_priv);
}
- I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ gen8_master_intr_enable(regs);
gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
@@ -3111,6 +3129,24 @@ gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
intel_opregion_asle_intr(dev_priv);
}
+static inline u32 gen11_master_intr_disable(void __iomem * const regs)
+{
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
+
+ /*
+ * Now with master disabled, get a sample of level indications
+ * for this interrupt. Indications will be cleared on related acks.
+ * New indications can and will light up during processing,
+ * and will generate new interrupt after enabling master.
+ */
+ return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
+}
+
+static inline void gen11_master_intr_enable(void __iomem * const regs)
+{
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
+}
+
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
@@ -3121,13 +3157,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
if (!intel_irqs_enabled(i915))
return IRQ_NONE;
- master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
- master_ctl &= ~GEN11_MASTER_IRQ;
- if (!master_ctl)
+ master_ctl = gen11_master_intr_disable(regs);
+ if (!master_ctl) {
+ gen11_master_intr_enable(regs);
return IRQ_NONE;
-
- /* Disable interrupts. */
- raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
+ }
/* Find, clear, then process each source of interrupt. */
gen11_gt_irq_handler(i915, master_ctl);
@@ -3147,8 +3181,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
- /* Acknowledge and enable interrupts. */
- raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
+ gen11_master_intr_enable(regs);
gen11_gu_misc_irq_handler(i915, gu_misc_iir);
@@ -3598,8 +3631,7 @@ static void gen8_irq_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- POSTING_READ(GEN8_MASTER_IRQ);
+ gen8_master_intr_disable(dev_priv->regs);
gen8_gt_irq_reset(dev_priv);
@@ -3641,13 +3673,15 @@ static void gen11_irq_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ gen11_master_intr_disable(dev_priv->regs);
gen11_gt_irq_reset(dev_priv);
I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
+ I915_WRITE(EDP_PSR_IMR, 0xffffffff);
+ I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
@@ -4244,8 +4278,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
- I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ(GEN8_MASTER_IRQ);
+ gen8_master_intr_enable(dev_priv->regs);
return 0;
}
@@ -4307,8 +4340,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ gen11_master_intr_enable(dev_priv->regs);
return 0;
}
@@ -4834,6 +4866,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = false;
dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ /* If we have MST support, we want to avoid doing short HPD IRQ storm
+ * detection, as short HPD storms will occur as a natural part of
+ * sideband messaging with MST.
+ * On older platforms however, IRQ storms can occur with both long and
+ * short pulses, as seen on some G4x systems.
+ */
+ dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c
index 4abd2e8b5083..4acdb94555b7 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h
index b812d16162ac..0e667f1a8aa1 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.h
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_BDW_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c
index cb6f304ec16a..a44195c39923 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h
index 690b963a2383..679e92cf4f1d 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.h
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_BXT_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
index 8641ae30e343..7f60d51b8761 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
index 1f3268ef2ea2..4d6025559bbe 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_CFLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
index 792facdb6702..a92c38e3a0ce 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
index c13b5aac01b9..0697f4077402 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_CFLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c
index 556febb2c3c8..71ec889a0114 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/i915_oa_chv.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h
index b9622496979e..0986eae3135f 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.h
+++ b/drivers/gpu/drm/i915/i915_oa_chv.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_CHV_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
index ba9140c87cc0..5c23d883d6c9 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h
index fb918b131105..e830a406aff2 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.h
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_CNL_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c
index 971db587957c..4bdda66df7d2 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/i915_oa_glk.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h
index 63bd113f4bc9..06dedf991edb 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.h
+++ b/drivers/gpu/drm/i915/i915_oa_glk.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_GLK_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c
index 434a9b96d7ab..cc6526fdd2bd 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h
index 74d03439c157..3d0c870cd0bd 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.h
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_HSW_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/i915_oa_icl.c
index a5667926e3de..baa51427a543 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.c
+++ b/drivers/gpu/drm/i915/i915_oa_icl.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h
index ae1c24aafe4f..24eaa97d61ba 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.h
+++ b/drivers/gpu/drm/i915/i915_oa_icl.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_ICL_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
index 2fa98a40bbc8..168e49ab0d4d 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
index 25b803546dc1..a55398a904de 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_KBLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
index f3cb6679a1bc..6ffa553c388e 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
index d5b5b5c1923e..3ddd3483b7cc 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_KBLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
index bf8b8cd8a50d..7ce6ee851d43 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
index fe1aa2c03958..be6256037239 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_SKLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
index ae534c7c8135..086ca2631e1c 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
index 06746b2616c8..650beb068e56 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_SKLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
index 817fba2d82df..b291a6eb8a87 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
index 944fd525c8b1..8dcf849d131e 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_SKLGT4_H__
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 295e981e4a39..2e0356561839 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -82,10 +82,6 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0644,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-i915_param_named_unsafe(enable_ppgtt, int, 0400,
- "Override PPGTT usage. "
- "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
-
i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR "
"(0=disabled, 1=enabled) "
@@ -171,8 +167,10 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400,
i915_param_named(enable_dpcd_backlight, bool, 0600,
"Enable support for DPCD backlight control (default:false)");
+#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
+#endif
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
@@ -188,7 +186,8 @@ static __always_inline void _print_param(struct drm_printer *p,
else if (!__builtin_strcmp(type, "char *"))
drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
else
- BUILD_BUG();
+ WARN_ONCE(1, "no printer defined for param type %s (i915.%s)\n",
+ type, name);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 6c4d4a21474b..7e56c516c815 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -41,7 +41,6 @@ struct drm_printer;
param(int, vbt_sdvo_panel_type, -1) \
param(int, enable_dc, -1) \
param(int, enable_fbc, -1) \
- param(int, enable_ppgtt, -1) \
param(int, enable_psr, -1) \
param(int, disable_power_well, -1) \
param(int, enable_ips, 1) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index d6f7b9fe1d26..6350db5503cd 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -33,19 +33,30 @@
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
#define GEN_DEFAULT_PIPEOFFSETS \
- .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
- PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
- .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
- TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
- .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
+ }
#define GEN_CHV_PIPEOFFSETS \
- .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
- CHV_PIPE_C_OFFSET }, \
- .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
- CHV_TRANSCODER_C_OFFSET, }, \
- .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
- CHV_PALETTE_C_OFFSET }
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = CHV_PIPE_C_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
+ }
#define CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
@@ -68,8 +79,9 @@
#define GEN2_FEATURES \
GEN(2), \
.num_pipes = 1, \
- .has_overlay = 1, .overlay_needs_physical = 1, \
- .has_gmch_display = 1, \
+ .display.has_overlay = 1, \
+ .display.overlay_needs_physical = 1, \
+ .display.has_gmch_display = 1, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
@@ -82,7 +94,8 @@
static const struct intel_device_info intel_i830_info = {
GEN2_FEATURES,
PLATFORM(INTEL_I830),
- .is_mobile = 1, .cursor_needs_physical = 1,
+ .is_mobile = 1,
+ .display.cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
@@ -96,8 +109,8 @@ static const struct intel_device_info intel_i85x_info = {
PLATFORM(INTEL_I85X),
.is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
- .cursor_needs_physical = 1,
- .has_fbc = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_fbc = 1,
};
static const struct intel_device_info intel_i865g_info = {
@@ -108,7 +121,7 @@ static const struct intel_device_info intel_i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
.num_pipes = 2, \
- .has_gmch_display = 1, \
+ .display.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -120,8 +133,9 @@ static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915G),
.has_coherent_ggtt = false,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -130,10 +144,11 @@ static const struct intel_device_info intel_i915gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915GM),
.is_mobile = 1,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
+ .display.supports_tv = 1,
+ .display.has_fbc = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -141,8 +156,10 @@ static const struct intel_device_info intel_i915gm_info = {
static const struct intel_device_info intel_i945g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945G),
- .has_hotplug = 1, .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
+ .display.has_hotplug = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -151,10 +168,12 @@ static const struct intel_device_info intel_i945gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945GM),
.is_mobile = 1,
- .has_hotplug = 1, .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
+ .display.has_hotplug = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
+ .display.supports_tv = 1,
+ .display.has_fbc = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -162,23 +181,23 @@ static const struct intel_device_info intel_i945gm_info = {
static const struct intel_device_info intel_g33_info = {
GEN3_FEATURES,
PLATFORM(INTEL_G33),
- .has_hotplug = 1,
- .has_overlay = 1,
+ .display.has_hotplug = 1,
+ .display.has_overlay = 1,
};
static const struct intel_device_info intel_pineview_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.is_mobile = 1,
- .has_hotplug = 1,
- .has_overlay = 1,
+ .display.has_hotplug = 1,
+ .display.has_overlay = 1,
};
#define GEN4_FEATURES \
GEN(4), \
.num_pipes = 2, \
- .has_hotplug = 1, \
- .has_gmch_display = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -189,7 +208,7 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965G),
- .has_overlay = 1,
+ .display.has_overlay = 1,
.hws_needs_physical = 1,
.has_snoop = false,
};
@@ -197,9 +216,10 @@ static const struct intel_device_info intel_i965g_info = {
static const struct intel_device_info intel_i965gm_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965GM),
- .is_mobile = 1, .has_fbc = 1,
- .has_overlay = 1,
- .supports_tv = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
+ .display.has_overlay = 1,
+ .display.supports_tv = 1,
.hws_needs_physical = 1,
.has_snoop = false,
};
@@ -213,15 +233,16 @@ static const struct intel_device_info intel_g45_info = {
static const struct intel_device_info intel_gm45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_GM45),
- .is_mobile = 1, .has_fbc = 1,
- .supports_tv = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
+ .display.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
#define GEN5_FEATURES \
GEN(5), \
.num_pipes = 2, \
- .has_hotplug = 1, \
+ .display.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -239,20 +260,21 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
GEN5_FEATURES,
PLATFORM(INTEL_IRONLAKE),
- .is_mobile = 1, .has_fbc = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
};
#define GEN6_FEATURES \
GEN(6), \
.num_pipes = 2, \
- .has_hotplug = 1, \
- .has_fbc = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .has_aliasing_ppgtt = 1, \
+ .ppgtt = INTEL_PPGTT_ALIASING, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
@@ -290,15 +312,14 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
.num_pipes = 3, \
- .has_hotplug = 1, \
- .has_fbc = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .has_aliasing_ppgtt = 1, \
- .has_full_ppgtt = 1, \
+ .ppgtt = INTEL_PPGTT_FULL, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
IVB_CURSOR_OFFSETS
@@ -349,10 +370,9 @@ static const struct intel_device_info intel_valleyview_info = {
.num_pipes = 2,
.has_runtime_pm = 1,
.has_rc6 = 1,
- .has_gmch_display = 1,
- .has_hotplug = 1,
- .has_aliasing_ppgtt = 1,
- .has_full_ppgtt = 1,
+ .display.has_gmch_display = 1,
+ .display.has_hotplug = 1,
+ .ppgtt = INTEL_PPGTT_FULL,
.has_snoop = true,
.has_coherent_ggtt = false,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
@@ -365,10 +385,10 @@ static const struct intel_device_info intel_valleyview_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
- .has_ddi = 1, \
+ .display.has_ddi = 1, \
.has_fpga_dbg = 1, \
- .has_psr = 1, \
- .has_dp_mst = 1, \
+ .display.has_psr = 1, \
+ .display.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
.has_runtime_pm = 1
@@ -399,7 +419,7 @@ static const struct intel_device_info intel_haswell_gt3_info = {
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
- .has_full_48bit_ppgtt = 1, \
+ .ppgtt = INTEL_PPGTT_FULL_4LVL, \
.has_64bit_reloc = 1, \
.has_reset_engine = 1
@@ -435,16 +455,15 @@ static const struct intel_device_info intel_cherryview_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
.num_pipes = 3,
- .has_hotplug = 1,
+ .display.has_hotplug = 1,
.is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_logical_ring_contexts = 1,
- .has_gmch_display = 1,
- .has_aliasing_ppgtt = 1,
- .has_full_ppgtt = 1,
+ .display.has_gmch_display = 1,
+ .ppgtt = INTEL_PPGTT_FULL,
.has_reset_engine = 1,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -465,13 +484,15 @@ static const struct intel_device_info intel_cherryview_info = {
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
.has_logical_ring_preemption = 1, \
- .has_csr = 1, \
+ .display.has_csr = 1, \
.has_guc = 1, \
- .has_ipc = 1, \
+ .display.has_ipc = 1, \
.ddb_size = 896
#define SKL_PLATFORM \
GEN9_FEATURES, \
+ /* Display WA #0477 WaDisableIPC: skl */ \
+ .display.has_ipc = 0, \
PLATFORM(INTEL_SKYLAKE)
static const struct intel_device_info intel_skylake_gt1_info = {
@@ -502,29 +523,27 @@ static const struct intel_device_info intel_skylake_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
.is_lp = 1, \
- .has_hotplug = 1, \
+ .display.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.num_pipes = 3, \
.has_64bit_reloc = 1, \
- .has_ddi = 1, \
+ .display.has_ddi = 1, \
.has_fpga_dbg = 1, \
- .has_fbc = 1, \
- .has_psr = 1, \
+ .display.has_fbc = 1, \
+ .display.has_psr = 1, \
.has_runtime_pm = 1, \
.has_pooled_eu = 0, \
- .has_csr = 1, \
+ .display.has_csr = 1, \
.has_rc6 = 1, \
- .has_dp_mst = 1, \
+ .display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
.has_guc = 1, \
- .has_aliasing_ppgtt = 1, \
- .has_full_ppgtt = 1, \
- .has_full_48bit_ppgtt = 1, \
+ .ppgtt = INTEL_PPGTT_FULL_4LVL, \
.has_reset_engine = 1, \
.has_snoop = true, \
.has_coherent_ggtt = false, \
- .has_ipc = 1, \
+ .display.has_ipc = 1, \
GEN9_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS, \
@@ -598,6 +617,22 @@ static const struct intel_device_info intel_cannonlake_info = {
#define GEN11_FEATURES \
GEN10_FEATURES, \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
+ [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
+ [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
+ }, \
GEN(11), \
.ddb_size = 2048, \
.has_logical_ring_elsq = 1
@@ -663,7 +698,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
- INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info),
+ INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
@@ -671,6 +706,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 664b96bb65a3..4529edfdcfc8 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -890,8 +890,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
dev_priv->perf.oa.period_exponent);
- dev_priv->perf.oa.ops.oa_disable(dev_priv);
- dev_priv->perf.oa.ops.oa_enable(dev_priv);
+ dev_priv->perf.oa.ops.oa_disable(stream);
+ dev_priv->perf.oa.ops.oa_enable(stream);
/*
* Note: .oa_enable() is expected to re-init the oabuffer and
@@ -1114,8 +1114,8 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
dev_priv->perf.oa.period_exponent);
- dev_priv->perf.oa.ops.oa_disable(dev_priv);
- dev_priv->perf.oa.ops.oa_enable(dev_priv);
+ dev_priv->perf.oa.ops.oa_disable(stream);
+ dev_priv->perf.oa.ops.oa_enable(stream);
oastatus1 = I915_READ(GEN7_OASTATUS1);
}
@@ -1528,8 +1528,6 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
goto err_unpin;
}
- dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
-
DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
dev_priv->perf.oa.oa_buffer.vaddr);
@@ -1563,9 +1561,11 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
}
}
-static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config)
+static int hsw_enable_metric_set(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ const struct i915_oa_config *oa_config = stream->oa_config;
+
/* PRM:
*
* OA unit is using “crclk” for its functionality. When trunk
@@ -1767,9 +1767,10 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
return 0;
}
-static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config)
+static int gen8_enable_metric_set(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ const struct i915_oa_config *oa_config = stream->oa_config;
int ret;
/*
@@ -1837,10 +1838,10 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
}
-static void gen7_oa_enable(struct drm_i915_private *dev_priv)
+static void gen7_oa_enable(struct i915_perf_stream *stream)
{
- struct i915_gem_context *ctx =
- dev_priv->perf.oa.exclusive_stream->ctx;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_gem_context *ctx = stream->ctx;
u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
bool periodic = dev_priv->perf.oa.periodic;
u32 period_exponent = dev_priv->perf.oa.period_exponent;
@@ -1867,8 +1868,9 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv)
GEN7_OACONTROL_ENABLE);
}
-static void gen8_oa_enable(struct drm_i915_private *dev_priv)
+static void gen8_oa_enable(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
u32 report_format = dev_priv->perf.oa.oa_buffer.format;
/*
@@ -1905,7 +1907,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- dev_priv->perf.oa.ops.oa_enable(dev_priv);
+ dev_priv->perf.oa.ops.oa_enable(stream);
if (dev_priv->perf.oa.periodic)
hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
@@ -1913,8 +1915,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
HRTIMER_MODE_REL_PINNED);
}
-static void gen7_oa_disable(struct drm_i915_private *dev_priv)
+static void gen7_oa_disable(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
I915_WRITE(GEN7_OACONTROL, 0);
if (intel_wait_for_register(dev_priv,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
@@ -1922,8 +1926,10 @@ static void gen7_oa_disable(struct drm_i915_private *dev_priv)
DRM_ERROR("wait for OA to be disabled timed out\n");
}
-static void gen8_oa_disable(struct drm_i915_private *dev_priv)
+static void gen8_oa_disable(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
I915_WRITE(GEN8_OACONTROL, 0);
if (intel_wait_for_register(dev_priv,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
@@ -1943,7 +1949,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- dev_priv->perf.oa.ops.oa_disable(dev_priv);
+ dev_priv->perf.oa.ops.oa_disable(stream);
if (dev_priv->perf.oa.periodic)
hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
@@ -1998,7 +2004,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
- if (!dev_priv->perf.oa.ops.init_oa_buffer) {
+ if (!dev_priv->perf.oa.ops.enable_metric_set) {
DRM_DEBUG("OA unit not supported\n");
return -ENODEV;
}
@@ -2092,8 +2098,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (ret)
goto err_lock;
- ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
- stream->oa_config);
+ ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
@@ -3387,7 +3392,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.is_valid_mux_reg =
hsw_is_valid_mux_addr;
dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
- dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
@@ -3406,7 +3410,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
*/
dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
- dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
dev_priv->perf.oa.ops.read = gen8_oa_read;
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 3f502eef2431..6fc4b8eeab42 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -27,8 +27,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
slice_length = sizeof(sseu->slice_mask);
subslice_length = sseu->max_slices *
- DIV_ROUND_UP(sseu->max_subslices,
- sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
+ DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
eu_length = sseu->max_slices * sseu->max_subslices *
DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7c491ea3d052..0a7d60509ca7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -157,20 +157,37 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/*
* Named helper wrappers around _PICK_EVEN() and _PICK().
*/
-#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
-#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
-#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
-#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
-#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
-#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
-#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
-#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
-#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
-#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
-#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
-#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
+#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
+#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
+#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
+#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
+
+#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
+#define _MMIO_PLANE(plane, a, b) _MMIO(_PLANE(plane, a, b))
+#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
+#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
+#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
+
+#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
+
+#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
+#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
+#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+
+/*
+ * Device info offset array based helpers for groups of registers with unevenly
+ * spaced base offsets.
+ */
+#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
+ dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
+#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
+ dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
+#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
+ dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
#define _MASKED_FIELD(mask, value) ({ \
@@ -1631,35 +1648,6 @@ enum i915_power_well_id {
#define PHY_RESERVED (1 << 7)
#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
-#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
-#define CL_POWER_DOWN_ENABLE (1 << 4)
-#define SUS_CLOCK_CONFIG (3 << 0)
-
-#define _ICL_PORT_CL_DW5_A 0x162014
-#define _ICL_PORT_CL_DW5_B 0x6C014
-#define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
- _ICL_PORT_CL_DW5_B)
-
-#define _CNL_PORT_CL_DW10_A 0x162028
-#define _ICL_PORT_CL_DW10_B 0x6c028
-#define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \
- _CNL_PORT_CL_DW10_A, \
- _ICL_PORT_CL_DW10_B)
-#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
-#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
-#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
-#define PWR_UP_ALL_LANES (0x0 << 4)
-#define PWR_DOWN_LN_3_2_1 (0xe << 4)
-#define PWR_DOWN_LN_3_2 (0xc << 4)
-#define PWR_DOWN_LN_3 (0x8 << 4)
-#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
-#define PWR_DOWN_LN_1_0 (0x3 << 4)
-#define PWR_DOWN_LN_1 (0x2 << 4)
-#define PWR_DOWN_LN_3_1 (0xa << 4)
-#define PWR_DOWN_LN_3_1_0 (0xb << 4)
-#define PWR_DOWN_LN_MASK (0xf << 4)
-#define PWR_DOWN_LN_SHIFT 4
-
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
@@ -1672,13 +1660,6 @@ enum i915_power_well_id {
#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
-#define _ICL_PORT_CL_DW12_A 0x162030
-#define _ICL_PORT_CL_DW12_B 0x6C030
-#define ICL_LANE_ENABLE_AUX (1 << 0)
-#define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \
- _ICL_PORT_CL_DW12_A, \
- _ICL_PORT_CL_DW12_B)
-
#define _PORT_CL1CM_DW28_A 0x162070
#define _PORT_CL1CM_DW28_BC 0x6C070
#define OCL1_POWER_DOWN_EN (1 << 23)
@@ -1691,6 +1672,74 @@ enum i915_power_well_id {
#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
+/*
+ * CNL/ICL Port/COMBO-PHY Registers
+ */
+#define _ICL_COMBOPHY_A 0x162000
+#define _ICL_COMBOPHY_B 0x6C000
+#define _ICL_COMBOPHY(port) _PICK(port, _ICL_COMBOPHY_A, \
+ _ICL_COMBOPHY_B)
+
+/* CNL/ICL Port CL_DW registers */
+#define _ICL_PORT_CL_DW(dw, port) (_ICL_COMBOPHY(port) + \
+ 4 * (dw))
+
+#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
+#define ICL_PORT_CL_DW5(port) _MMIO(_ICL_PORT_CL_DW(5, port))
+#define CL_POWER_DOWN_ENABLE (1 << 4)
+#define SUS_CLOCK_CONFIG (3 << 0)
+
+#define ICL_PORT_CL_DW10(port) _MMIO(_ICL_PORT_CL_DW(10, port))
+#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
+#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
+#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
+#define PWR_UP_ALL_LANES (0x0 << 4)
+#define PWR_DOWN_LN_3_2_1 (0xe << 4)
+#define PWR_DOWN_LN_3_2 (0xc << 4)
+#define PWR_DOWN_LN_3 (0x8 << 4)
+#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
+#define PWR_DOWN_LN_1_0 (0x3 << 4)
+#define PWR_DOWN_LN_1 (0x2 << 4)
+#define PWR_DOWN_LN_3_1 (0xa << 4)
+#define PWR_DOWN_LN_3_1_0 (0xb << 4)
+#define PWR_DOWN_LN_MASK (0xf << 4)
+#define PWR_DOWN_LN_SHIFT 4
+
+#define ICL_PORT_CL_DW12(port) _MMIO(_ICL_PORT_CL_DW(12, port))
+#define ICL_LANE_ENABLE_AUX (1 << 0)
+
+/* CNL/ICL Port COMP_DW registers */
+#define _ICL_PORT_COMP 0x100
+#define _ICL_PORT_COMP_DW(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_COMP + 4 * (dw))
+
+#define CNL_PORT_COMP_DW0 _MMIO(0x162100)
+#define ICL_PORT_COMP_DW0(port) _MMIO(_ICL_PORT_COMP_DW(0, port))
+#define COMP_INIT (1 << 31)
+
+#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
+#define ICL_PORT_COMP_DW1(port) _MMIO(_ICL_PORT_COMP_DW(1, port))
+
+#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
+#define ICL_PORT_COMP_DW3(port) _MMIO(_ICL_PORT_COMP_DW(3, port))
+#define PROCESS_INFO_DOT_0 (0 << 26)
+#define PROCESS_INFO_DOT_1 (1 << 26)
+#define PROCESS_INFO_DOT_4 (2 << 26)
+#define PROCESS_INFO_MASK (7 << 26)
+#define PROCESS_INFO_SHIFT 26
+#define VOLTAGE_INFO_0_85V (0 << 24)
+#define VOLTAGE_INFO_0_95V (1 << 24)
+#define VOLTAGE_INFO_1_05V (2 << 24)
+#define VOLTAGE_INFO_MASK (3 << 24)
+#define VOLTAGE_INFO_SHIFT 24
+
+#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
+#define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port))
+
+#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
+#define ICL_PORT_COMP_DW10(port) _MMIO(_ICL_PORT_COMP_DW(10, port))
+
+/* CNL/ICL Port PCS registers */
#define _CNL_PORT_PCS_DW1_GRP_AE 0x162304
#define _CNL_PORT_PCS_DW1_GRP_B 0x162384
#define _CNL_PORT_PCS_DW1_GRP_C 0x162B04
@@ -1708,7 +1757,6 @@ enum i915_power_well_id {
_CNL_PORT_PCS_DW1_GRP_D, \
_CNL_PORT_PCS_DW1_GRP_AE, \
_CNL_PORT_PCS_DW1_GRP_F))
-
#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_B, \
@@ -1717,24 +1765,21 @@ enum i915_power_well_id {
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_F))
-#define _ICL_PORT_PCS_DW1_GRP_A 0x162604
-#define _ICL_PORT_PCS_DW1_GRP_B 0x6C604
-#define _ICL_PORT_PCS_DW1_LN0_A 0x162804
-#define _ICL_PORT_PCS_DW1_LN0_B 0x6C804
-#define _ICL_PORT_PCS_DW1_AUX_A 0x162304
-#define _ICL_PORT_PCS_DW1_AUX_B 0x6c304
-#define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\
- _ICL_PORT_PCS_DW1_GRP_A, \
- _ICL_PORT_PCS_DW1_GRP_B)
-#define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \
- _ICL_PORT_PCS_DW1_LN0_A, \
- _ICL_PORT_PCS_DW1_LN0_B)
-#define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \
- _ICL_PORT_PCS_DW1_AUX_A, \
- _ICL_PORT_PCS_DW1_AUX_B)
+#define _ICL_PORT_PCS_AUX 0x300
+#define _ICL_PORT_PCS_GRP 0x600
+#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100)
+#define _ICL_PORT_PCS_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_PCS_AUX + 4 * (dw))
+#define _ICL_PORT_PCS_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_PCS_GRP + 4 * (dw))
+#define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_PCS_LN(ln) + 4 * (dw))
+#define ICL_PORT_PCS_DW1_AUX(port) _MMIO(_ICL_PORT_PCS_DW_AUX(1, port))
+#define ICL_PORT_PCS_DW1_GRP(port) _MMIO(_ICL_PORT_PCS_DW_GRP(1, port))
+#define ICL_PORT_PCS_DW1_LN0(port) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port))
#define COMMON_KEEPER_EN (1 << 26)
-/* CNL Port TX registers */
+/* CNL/ICL Port TX registers */
#define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340
#define _CNL_PORT_TX_B_GRP_OFFSET 0x1623C0
#define _CNL_PORT_TX_C_GRP_OFFSET 0x162B40
@@ -1762,23 +1807,22 @@ enum i915_power_well_id {
_CNL_PORT_TX_F_LN0_OFFSET) + \
4 * (dw))
-#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
-#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
-#define _ICL_PORT_TX_DW2_GRP_A 0x162688
-#define _ICL_PORT_TX_DW2_GRP_B 0x6C688
-#define _ICL_PORT_TX_DW2_LN0_A 0x162888
-#define _ICL_PORT_TX_DW2_LN0_B 0x6C888
-#define _ICL_PORT_TX_DW2_AUX_A 0x162388
-#define _ICL_PORT_TX_DW2_AUX_B 0x6c388
-#define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW2_GRP_A, \
- _ICL_PORT_TX_DW2_GRP_B)
-#define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW2_LN0_A, \
- _ICL_PORT_TX_DW2_LN0_B)
-#define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW2_AUX_A, \
- _ICL_PORT_TX_DW2_AUX_B)
+#define _ICL_PORT_TX_AUX 0x380
+#define _ICL_PORT_TX_GRP 0x680
+#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100)
+
+#define _ICL_PORT_TX_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_TX_AUX + 4 * (dw))
+#define _ICL_PORT_TX_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_TX_GRP + 4 * (dw))
+#define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_TX_LN(ln) + 4 * (dw))
+
+#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(2, port))
+#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(2, port))
+#define ICL_PORT_TX_DW2_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(2, port))
+#define ICL_PORT_TX_DW2_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(2, port))
+#define ICL_PORT_TX_DW2_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, port))
#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
#define SWING_SEL_UPPER_MASK (1 << 15)
#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
@@ -1795,24 +1839,10 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
-#define _ICL_PORT_TX_DW4_GRP_A 0x162690
-#define _ICL_PORT_TX_DW4_GRP_B 0x6C690
-#define _ICL_PORT_TX_DW4_LN0_A 0x162890
-#define _ICL_PORT_TX_DW4_LN1_A 0x162990
-#define _ICL_PORT_TX_DW4_LN0_B 0x6C890
-#define _ICL_PORT_TX_DW4_AUX_A 0x162390
-#define _ICL_PORT_TX_DW4_AUX_B 0x6c390
-#define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW4_GRP_A, \
- _ICL_PORT_TX_DW4_GRP_B)
-#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \
- _ICL_PORT_TX_DW4_LN0_A, \
- _ICL_PORT_TX_DW4_LN0_B) + \
- ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \
- _ICL_PORT_TX_DW4_LN0_A)))
-#define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW4_AUX_A, \
- _ICL_PORT_TX_DW4_AUX_B)
+#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
+#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port))
+#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
+#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1821,23 +1851,11 @@ enum i915_power_well_id {
#define CURSOR_COEFF(x) ((x) << 0)
#define CURSOR_COEFF_MASK (0x3F << 0)
-#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 5))
-#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 5))
-#define _ICL_PORT_TX_DW5_GRP_A 0x162694
-#define _ICL_PORT_TX_DW5_GRP_B 0x6C694
-#define _ICL_PORT_TX_DW5_LN0_A 0x162894
-#define _ICL_PORT_TX_DW5_LN0_B 0x6C894
-#define _ICL_PORT_TX_DW5_AUX_A 0x162394
-#define _ICL_PORT_TX_DW5_AUX_B 0x6c394
-#define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW5_GRP_A, \
- _ICL_PORT_TX_DW5_GRP_B)
-#define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW5_LN0_A, \
- _ICL_PORT_TX_DW5_LN0_B)
-#define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW5_AUX_A, \
- _ICL_PORT_TX_DW5_AUX_B)
+#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(5, port))
+#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(5, port))
+#define ICL_PORT_TX_DW5_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(5, port))
+#define ICL_PORT_TX_DW5_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(5, port))
+#define ICL_PORT_TX_DW5_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, port))
#define TX_TRAINING_EN (1 << 31)
#define TAP2_DISABLE (1 << 30)
#define TAP3_DISABLE (1 << 29)
@@ -2054,49 +2072,16 @@ enum i915_power_well_id {
#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
-#define CNL_PORT_COMP_DW0 _MMIO(0x162100)
-#define COMP_INIT (1 << 31)
-#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
-#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
-#define PROCESS_INFO_DOT_0 (0 << 26)
-#define PROCESS_INFO_DOT_1 (1 << 26)
-#define PROCESS_INFO_DOT_4 (2 << 26)
-#define PROCESS_INFO_MASK (7 << 26)
-#define PROCESS_INFO_SHIFT 26
-#define VOLTAGE_INFO_0_85V (0 << 24)
-#define VOLTAGE_INFO_0_95V (1 << 24)
-#define VOLTAGE_INFO_1_05V (2 << 24)
-#define VOLTAGE_INFO_MASK (3 << 24)
-#define VOLTAGE_INFO_SHIFT 24
-#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
-#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
-
-#define _ICL_PORT_COMP_DW0_A 0x162100
-#define _ICL_PORT_COMP_DW0_B 0x6C100
-#define ICL_PORT_COMP_DW0(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW0_A, \
- _ICL_PORT_COMP_DW0_B)
-#define _ICL_PORT_COMP_DW1_A 0x162104
-#define _ICL_PORT_COMP_DW1_B 0x6C104
-#define ICL_PORT_COMP_DW1(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW1_A, \
- _ICL_PORT_COMP_DW1_B)
-#define _ICL_PORT_COMP_DW3_A 0x16210C
-#define _ICL_PORT_COMP_DW3_B 0x6C10C
-#define ICL_PORT_COMP_DW3(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW3_A, \
- _ICL_PORT_COMP_DW3_B)
-#define _ICL_PORT_COMP_DW9_A 0x162124
-#define _ICL_PORT_COMP_DW9_B 0x6C124
-#define ICL_PORT_COMP_DW9(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW9_A, \
- _ICL_PORT_COMP_DW9_B)
-#define _ICL_PORT_COMP_DW10_A 0x162128
-#define _ICL_PORT_COMP_DW10_B 0x6C128
-#define ICL_PORT_COMP_DW10(port) _MMIO_PORT(port, \
- _ICL_PORT_COMP_DW10_A, \
- _ICL_PORT_COMP_DW10_B)
+#define FIA1_BASE 0x163000
/* ICL PHY DFLEX registers */
-#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0)
-#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n)))
-#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n)))
+#define PORT_TX_DFLEXDPMLE1 _MMIO(FIA1_BASE + 0x008C0)
+#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port)))
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
@@ -2413,6 +2398,7 @@ enum i915_power_well_id {
#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
+#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
@@ -2573,6 +2559,7 @@ enum i915_power_well_id {
/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+#define GEN11_ENABLE_32_PLANE_MODE (1 << 7)
/* WaClearTdlStateAckDirtyBits */
#define GEN8_STATE_ACK _MMIO(0x20F0)
@@ -3475,11 +3462,13 @@ enum i915_power_well_id {
/*
* Palette regs
*/
-#define PALETTE_A_OFFSET 0xa000
-#define PALETTE_B_OFFSET 0xa800
-#define CHV_PALETTE_C_OFFSET 0xc000
-#define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] + \
- dev_priv->info.display_mmio_offset + (i) * 4)
+#define _PALETTE_A 0xa000
+#define _PALETTE_B 0xa800
+#define _CHV_PALETTE_C 0xc000
+#define PALETTE(pipe, i) _MMIO(dev_priv->info.display_mmio_offset + \
+ _PICK((pipe), _PALETTE_A, \
+ _PALETTE_B, _CHV_PALETTE_C) + \
+ (i) * 4)
/* MCH MMIO space */
@@ -4061,15 +4050,27 @@ enum {
#define _VSYNCSHIFT_B 0x61028
#define _PIPE_MULT_B 0x6102c
+/* DSI 0 timing regs */
+#define _HTOTAL_DSI0 0x6b000
+#define _HSYNC_DSI0 0x6b008
+#define _VTOTAL_DSI0 0x6b00c
+#define _VSYNC_DSI0 0x6b014
+#define _VSYNCSHIFT_DSI0 0x6b028
+
+/* DSI 1 timing regs */
+#define _HTOTAL_DSI1 0x6b800
+#define _HSYNC_DSI1 0x6b808
+#define _VTOTAL_DSI1 0x6b80c
+#define _VSYNC_DSI1 0x6b814
+#define _VSYNCSHIFT_DSI1 0x6b828
+
#define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000
#define TRANSCODER_C_OFFSET 0x62000
#define CHV_TRANSCODER_C_OFFSET 0x63000
#define TRANSCODER_EDP_OFFSET 0x6f000
-
-#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
- dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
+#define TRANSCODER_DSI0_OFFSET 0x6b000
+#define TRANSCODER_DSI1_OFFSET 0x6b800
#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A)
#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A)
@@ -4149,9 +4150,13 @@ enum {
/* Bspec claims those aren't shifted but stay at 0x64800 */
#define EDP_PSR_IMR _MMIO(0x64834)
#define EDP_PSR_IIR _MMIO(0x64838)
-#define EDP_PSR_ERROR(trans) (1 << (((trans) * 8 + 10) & 31))
-#define EDP_PSR_POST_EXIT(trans) (1 << (((trans) * 8 + 9) & 31))
-#define EDP_PSR_PRE_ENTRY(trans) (1 << (((trans) * 8 + 8) & 31))
+#define EDP_PSR_ERROR(shift) (1 << ((shift) + 2))
+#define EDP_PSR_POST_EXIT(shift) (1 << ((shift) + 1))
+#define EDP_PSR_PRE_ENTRY(shift) (1 << (shift))
+#define EDP_PSR_TRANSCODER_C_SHIFT 24
+#define EDP_PSR_TRANSCODER_B_SHIFT 16
+#define EDP_PSR_TRANSCODER_A_SHIFT 8
+#define EDP_PSR_TRANSCODER_EDP_SHIFT 0
#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26)
@@ -4195,7 +4200,7 @@ enum {
#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
#define EDP_PSR_DEBUG_MASK_HPD (1 << 25)
-#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16)
+#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */
#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
#define EDP_PSR2_CTL _MMIO(0x6f900)
@@ -4232,7 +4237,7 @@ enum {
#define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9)
#define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8)
#define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6)
-#define PSR_EVENT_REGISTER_UPDATE (1 << 5)
+#define PSR_EVENT_REGISTER_UPDATE (1 << 5) /* Reserved in ICL+ */
#define PSR_EVENT_HDCP_ENABLE (1 << 4)
#define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3)
#define PSR_EVENT_VBI_ENABLE (1 << 2)
@@ -4565,6 +4570,7 @@ enum {
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_VSC_DATA_SIZE 36
+#define VIDEO_DIP_PPS_DATA_SIZE 132
#define VIDEO_DIP_CTL _MMIO(0x61170)
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
@@ -4584,6 +4590,15 @@ enum {
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
#define VIDEO_DIP_FREQ_MASK (3 << 16)
/* HSW and later: */
+#define DRM_DIP_ENABLE (1 << 28)
+#define PSR_VSC_BIT_7_SET (1 << 27)
+#define VSC_SELECT_MASK (0x3 << 25)
+#define VSC_SELECT_SHIFT 25
+#define VSC_DIP_HW_HEA_DATA (0 << 25)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
+#define VSC_DIP_SW_HEA_DATA (3 << 25)
+#define VDIP_ENABLE_PPS (1 << 24)
#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
@@ -4591,16 +4606,6 @@ enum {
#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
-#define DRM_DIP_ENABLE (1 << 28)
-#define PSR_VSC_BIT_7_SET (1 << 27)
-#define VSC_SELECT_MASK (0x3 << 26)
-#define VSC_SELECT_SHIFT 26
-#define VSC_DIP_HW_HEA_DATA (0 << 26)
-#define VSC_DIP_HW_HEA_SW_DATA (1 << 26)
-#define VSC_DIP_HW_DATA_SW_HEA (2 << 26)
-#define VSC_DIP_SW_HEA_DATA (3 << 26)
-#define VDIP_ENABLE_PPS (1 << 24)
-
/* Panel power sequencing */
#define PPS_BASE 0x61200
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
@@ -4613,6 +4618,17 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
#define PP_ON (1 << 31)
+
+#define _PP_CONTROL_1 0xc7204
+#define _PP_CONTROL_2 0xc7304
+#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
+ _PP_CONTROL_2)
+#define POWER_CYCLE_DELAY_MASK (0x1f << 4)
+#define POWER_CYCLE_DELAY_SHIFT 4
+#define VDD_OVERRIDE_FORCE (1 << 3)
+#define BACKLIGHT_ENABLE (1 << 2)
+#define PWR_DOWN_ON_RESET (1 << 1)
+#define PWR_STATE_TARGET (1 << 0)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -5636,9 +5652,9 @@ enum {
*/
#define PIPE_EDP_OFFSET 0x7f000
-#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
- dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
+/* ICL DSI 0 and 1 */
+#define PIPE_DSI0_OFFSET 0x7b000
+#define PIPE_DSI1_OFFSET 0x7b800
#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF)
#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
@@ -6087,10 +6103,6 @@ enum {
#define _CURBBASE_IVB 0x71084
#define _CURBPOS_IVB 0x71088
-#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
- dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
-
#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
@@ -6224,6 +6236,10 @@ enum {
#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4)
#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC)
+/* ICL DSI 0 and 1 */
+#define _PIPEDSI0CONF 0x7b008
+#define _PIPEDSI1CONF 0x7b808
+
/* Sprite A control */
#define _DVSACNTR 0x72180
#define DVS_ENABLE (1 << 31)
@@ -6511,6 +6527,7 @@ enum {
#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21)
#define PLANE_CTL_ORDER_BGRX (0 << 20)
#define PLANE_CTL_ORDER_RGBX (1 << 20)
+#define PLANE_CTL_YUV420_Y_PLANE (1 << 19)
#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18)
#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
#define PLANE_CTL_YUV422_YUYV (0 << 16)
@@ -6554,17 +6571,33 @@ enum {
#define _PLANE_KEYVAL_2_A 0x70294
#define _PLANE_KEYMSK_1_A 0x70198
#define _PLANE_KEYMSK_2_A 0x70298
+#define PLANE_KEYMSK_ALPHA_ENABLE (1 << 31)
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
+#define PLANE_KEYMAX_ALPHA(a) ((a) << 24)
#define _PLANE_AUX_DIST_1_A 0x701c0
#define _PLANE_AUX_DIST_2_A 0x702c0
#define _PLANE_AUX_OFFSET_1_A 0x701c4
#define _PLANE_AUX_OFFSET_2_A 0x702c4
+#define _PLANE_CUS_CTL_1_A 0x701c8
+#define _PLANE_CUS_CTL_2_A 0x702c8
+#define PLANE_CUS_ENABLE (1 << 31)
+#define PLANE_CUS_PLANE_6 (0 << 30)
+#define PLANE_CUS_PLANE_7 (1 << 30)
+#define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19)
+#define PLANE_CUS_HPHASE_0 (0 << 16)
+#define PLANE_CUS_HPHASE_0_25 (1 << 16)
+#define PLANE_CUS_HPHASE_0_5 (2 << 16)
+#define PLANE_CUS_VPHASE_SIGN_NEGATIVE (1 << 15)
+#define PLANE_CUS_VPHASE_0 (0 << 12)
+#define PLANE_CUS_VPHASE_0_25 (1 << 12)
+#define PLANE_CUS_VPHASE_0_5 (2 << 12)
#define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */
#define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */
#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */
#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */
#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
+#define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */
#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */
#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17)
#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17)
@@ -6581,6 +6614,55 @@ enum {
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
#define _PLANE_NV12_BUF_CFG_2_A 0x70378
+/* Input CSC Register Definitions */
+#define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0
+#define _PLANE_INPUT_CSC_RY_GY_2_A 0x702E0
+
+#define _PLANE_INPUT_CSC_RY_GY_1_B 0x711E0
+#define _PLANE_INPUT_CSC_RY_GY_2_B 0x712E0
+
+#define _PLANE_INPUT_CSC_RY_GY_1(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_1_A, \
+ _PLANE_INPUT_CSC_RY_GY_1_B)
+#define _PLANE_INPUT_CSC_RY_GY_2(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \
+ _PLANE_INPUT_CSC_RY_GY_2_B)
+
+#define PLANE_INPUT_CSC_COEFF(pipe, plane, index) \
+ _MMIO_PLANE(plane, _PLANE_INPUT_CSC_RY_GY_1(pipe) + (index) * 4, \
+ _PLANE_INPUT_CSC_RY_GY_2(pipe) + (index) * 4)
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1_A 0x701F8
+#define _PLANE_INPUT_CSC_PREOFF_HI_2_A 0x702F8
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1_B 0x711F8
+#define _PLANE_INPUT_CSC_PREOFF_HI_2_B 0x712F8
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_1_A, \
+ _PLANE_INPUT_CSC_PREOFF_HI_1_B)
+#define _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_2_A, \
+ _PLANE_INPUT_CSC_PREOFF_HI_2_B)
+#define PLANE_INPUT_CSC_PREOFF(pipe, plane, index) \
+ _MMIO_PLANE(plane, _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) + (index) * 4, \
+ _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) + (index) * 4)
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1_A 0x70204
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2_A 0x70304
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1_B 0x71204
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2_B 0x71304
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_1_A, \
+ _PLANE_INPUT_CSC_POSTOFF_HI_1_B)
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_2_A, \
+ _PLANE_INPUT_CSC_POSTOFF_HI_2_B)
+#define PLANE_INPUT_CSC_POSTOFF(pipe, plane, index) \
+ _MMIO_PLANE(plane, _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) + (index) * 4, \
+ _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) + (index) * 4)
#define _PLANE_CTL_1_B 0x71180
#define _PLANE_CTL_2_B 0x71280
@@ -6697,6 +6779,15 @@ enum {
#define PLANE_AUX_OFFSET(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe))
+#define _PLANE_CUS_CTL_1_B 0x711c8
+#define _PLANE_CUS_CTL_2_B 0x712c8
+#define _PLANE_CUS_CTL_1(pipe) \
+ _PIPE(pipe, _PLANE_CUS_CTL_1_A, _PLANE_CUS_CTL_1_B)
+#define _PLANE_CUS_CTL_2(pipe) \
+ _PIPE(pipe, _PLANE_CUS_CTL_2_A, _PLANE_CUS_CTL_2_B)
+#define PLANE_CUS_CTL(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_CUS_CTL_1(pipe), _PLANE_CUS_CTL_2(pipe))
+
#define _PLANE_COLOR_CTL_1_B 0x711CC
#define _PLANE_COLOR_CTL_2_B 0x712CC
#define _PLANE_COLOR_CTL_3_B 0x713CC
@@ -6850,11 +6941,12 @@ enum {
#define _PS_2B_CTRL 0x68A80
#define _PS_1C_CTRL 0x69180
#define PS_SCALER_EN (1 << 31)
-#define PS_SCALER_MODE_MASK (3 << 28)
-#define PS_SCALER_MODE_DYN (0 << 28)
-#define PS_SCALER_MODE_HQ (1 << 28)
+#define SKL_PS_SCALER_MODE_MASK (3 << 28)
+#define SKL_PS_SCALER_MODE_DYN (0 << 28)
+#define SKL_PS_SCALER_MODE_HQ (1 << 28)
#define SKL_PS_SCALER_MODE_NV12 (2 << 28)
#define PS_SCALER_MODE_PLANAR (1 << 29)
+#define PS_SCALER_MODE_NORMAL (0 << 29)
#define PS_PLANE_SEL_MASK (7 << 25)
#define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
#define PS_FILTER_MASK (3 << 23)
@@ -6871,6 +6963,8 @@ enum {
#define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5)
#define PS_VADAPT_MODE_MOD_ADAPT (1 << 5)
#define PS_VADAPT_MODE_MOST_ADAPT (3 << 5)
+#define PS_PLANE_Y_SEL_MASK (7 << 5)
+#define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5)
#define _PS_PWR_GATE_1A 0x68160
#define _PS_PWR_GATE_2A 0x68260
@@ -7317,9 +7411,10 @@ enum {
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
-#define CHICKEN_TRANS_A 0x420c0
-#define CHICKEN_TRANS_B 0x420c4
-#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
+#define CHICKEN_TRANS_A _MMIO(0x420c0)
+#define CHICKEN_TRANS_B _MMIO(0x420c4)
+#define CHICKEN_TRANS_C _MMIO(0x420c8)
+#define CHICKEN_TRANS_EDP _MMIO(0x420cc)
#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
@@ -7409,6 +7504,10 @@ enum {
#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
+#define GEN7_SARCHKMD _MMIO(0xB000)
+#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31)
+#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30)
+
#define GEN7_L3SQCREG1 _MMIO(0xB010)
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -7663,6 +7762,7 @@ enum {
#define ICP_DDIB_HPD_LONG_DETECT (2 << 4)
#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4)
#define ICP_DDIA_HPD_ENABLE (1 << 3)
+#define ICP_DDIA_HPD_OP_DRIVE_1 (1 << 2)
#define ICP_DDIA_HPD_STATUS_MASK (3 << 0)
#define ICP_DDIA_HPD_NO_DETECT (0 << 0)
#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0)
@@ -7824,8 +7924,7 @@ enum {
#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
#define CNP_RAWCLK_DIV(div) ((div) << 16)
#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
-#define CNP_RAWCLK_FRAC(frac) ((frac) << 26)
-#define ICP_RAWCLK_DEN(den) ((den) << 26)
+#define CNP_RAWCLK_DEN(den) ((den) << 26)
#define ICP_RAWCLK_NUM(num) ((num) << 11)
#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
@@ -8625,8 +8724,7 @@ enum {
#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9)
#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
-#define GAMW_ECO_DEV_RW_IA_REG _MMIO(0x4080)
-#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
+#define GEN10_SAMPLER_MODE _MMIO(0xE18C)
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
@@ -8927,6 +9025,15 @@ enum skl_power_gate {
#define CNL_AUX_ANAOVRD1_ENABLE (1 << 16)
#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23)
+#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
+#define _ICL_AUX_ANAOVRD1_A 0x162398
+#define _ICL_AUX_ANAOVRD1_B 0x6C398
+#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
+ _ICL_AUX_ANAOVRD1_A, \
+ _ICL_AUX_ANAOVRD1_B))
+#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
+#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
+
/* HDCP Key Registers */
#define HDCP_KEY_CONF _MMIO(0x66c00)
#define HDCP_AKSV_SEND_TRIGGER BIT(31)
@@ -9009,11 +9116,45 @@ enum skl_power_gate {
#define HDCP_STATUS_CIPHER BIT(16)
#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
+/* HDCP2.2 Registers */
+#define _PORTA_HDCP2_BASE 0x66800
+#define _PORTB_HDCP2_BASE 0x66500
+#define _PORTC_HDCP2_BASE 0x66600
+#define _PORTD_HDCP2_BASE 0x66700
+#define _PORTE_HDCP2_BASE 0x66A00
+#define _PORTF_HDCP2_BASE 0x66900
+#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \
+ _PORTA_HDCP2_BASE, \
+ _PORTB_HDCP2_BASE, \
+ _PORTC_HDCP2_BASE, \
+ _PORTD_HDCP2_BASE, \
+ _PORTE_HDCP2_BASE, \
+ _PORTF_HDCP2_BASE) + (x))
+
+#define HDCP2_AUTH_DDI(port) _PORT_HDCP2_BASE(port, 0x98)
+#define AUTH_LINK_AUTHENTICATED BIT(31)
+#define AUTH_LINK_TYPE BIT(30)
+#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
+#define AUTH_CLR_KEYS BIT(18)
+
+#define HDCP2_CTL_DDI(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define CTL_LINK_ENCRYPTION_REQ BIT(31)
+
+#define HDCP2_STATUS_DDI(port) _PORT_HDCP2_BASE(port, 0xB4)
+#define STREAM_ENCRYPTION_STATUS_A BIT(31)
+#define STREAM_ENCRYPTION_STATUS_B BIT(30)
+#define STREAM_ENCRYPTION_STATUS_C BIT(29)
+#define LINK_TYPE_STATUS BIT(22)
+#define LINK_AUTH_STATUS BIT(21)
+#define LINK_ENCRYPTION_STATUS BIT(20)
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
#define _TRANS_DDI_FUNC_CTL_C 0x62400
#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
+#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
+#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
#define TRANS_DDI_FUNC_ENABLE (1 << 31)
@@ -9051,11 +9192,25 @@ enum skl_power_gate {
| TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
| TRANS_DDI_HDMI_SCRAMBLING)
+#define _TRANS_DDI_FUNC_CTL2_A 0x60404
+#define _TRANS_DDI_FUNC_CTL2_B 0x61404
+#define _TRANS_DDI_FUNC_CTL2_C 0x62404
+#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
+#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
+#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
+#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
+ _TRANS_DDI_FUNC_CTL2_A)
+#define PORT_SYNC_MODE_ENABLE (1 << 4)
+#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0)
+#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
+#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
+
/* DisplayPort Transport Control */
#define _DP_TP_CTL_A 0x64040
#define _DP_TP_CTL_B 0x64140
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
#define DP_TP_CTL_ENABLE (1 << 31)
+#define DP_TP_CTL_FEC_ENABLE (1 << 30)
#define DP_TP_CTL_MODE_SST (0 << 27)
#define DP_TP_CTL_MODE_MST (1 << 27)
#define DP_TP_CTL_FORCE_ACT (1 << 25)
@@ -9074,6 +9229,7 @@ enum skl_power_gate {
#define _DP_TP_STATUS_A 0x64044
#define _DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
#define DP_TP_STATUS_IDLE_DONE (1 << 25)
#define DP_TP_STATUS_ACT_SENT (1 << 24)
#define DP_TP_STATUS_MODE_STATUS_MST (1 << 23)
@@ -9222,6 +9378,8 @@ enum skl_power_gate {
#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
#define TRANS_MSA_SYNC_CLK (1 << 0)
+#define TRANS_MSA_SAMPLING_444 (2 << 1)
+#define TRANS_MSA_CLRSP_YCBCR (2 << 3)
#define TRANS_MSA_6_BPC (0 << 5)
#define TRANS_MSA_8_BPC (1 << 5)
#define TRANS_MSA_10_BPC (2 << 5)
@@ -9789,6 +9947,10 @@ enum skl_power_gate {
#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
+/* Gen11 DSI */
+#define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \
+ dsi0, dsi1)
+
#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF
#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
@@ -9952,6 +10114,39 @@ enum skl_power_gate {
_ICL_DSI_IO_MODECTL_1)
#define COMBO_PHY_MODE_DSI (1 << 0)
+/* Display Stream Splitter Control */
+#define DSS_CTL1 _MMIO(0x67400)
+#define SPLITTER_ENABLE (1 << 31)
+#define JOINER_ENABLE (1 << 30)
+#define DUAL_LINK_MODE_INTERLEAVE (1 << 24)
+#define DUAL_LINK_MODE_FRONTBACK (0 << 24)
+#define OVERLAP_PIXELS_MASK (0xf << 16)
+#define OVERLAP_PIXELS(pixels) ((pixels) << 16)
+#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
+
+#define DSS_CTL2 _MMIO(0x67404)
+#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
+#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
+#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+
+#define _ICL_PIPE_DSS_CTL1_PB 0x78200
+#define _ICL_PIPE_DSS_CTL1_PC 0x78400
+#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL1_PB, \
+ _ICL_PIPE_DSS_CTL1_PC)
+#define BIG_JOINER_ENABLE (1 << 29)
+#define MASTER_BIG_JOINER_ENABLE (1 << 28)
+#define VGA_CENTERING_ENABLE (1 << 27)
+
+#define _ICL_PIPE_DSS_CTL2_PB 0x78204
+#define _ICL_PIPE_DSS_CTL2_PC 0x78404
+#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL2_PB, \
+ _ICL_PIPE_DSS_CTL2_PC)
+
#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
#define STAP_SELECT (1 << 0)
@@ -10288,6 +10483,235 @@ enum skl_power_gate {
_ICL_DSI_T_INIT_MASTER_0,\
_ICL_DSI_T_INIT_MASTER_1)
+#define _DPHY_CLK_TIMING_PARAM_0 0x162180
+#define _DPHY_CLK_TIMING_PARAM_1 0x6c180
+#define DPHY_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_CLK_TIMING_PARAM_0,\
+ _DPHY_CLK_TIMING_PARAM_1)
+#define _DSI_CLK_TIMING_PARAM_0 0x6b080
+#define _DSI_CLK_TIMING_PARAM_1 0x6b880
+#define DSI_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_CLK_TIMING_PARAM_0,\
+ _DSI_CLK_TIMING_PARAM_1)
+#define CLK_PREPARE_OVERRIDE (1 << 31)
+#define CLK_PREPARE(x) ((x) << 28)
+#define CLK_PREPARE_MASK (0x7 << 28)
+#define CLK_PREPARE_SHIFT 28
+#define CLK_ZERO_OVERRIDE (1 << 27)
+#define CLK_ZERO(x) ((x) << 20)
+#define CLK_ZERO_MASK (0xf << 20)
+#define CLK_ZERO_SHIFT 20
+#define CLK_PRE_OVERRIDE (1 << 19)
+#define CLK_PRE(x) ((x) << 16)
+#define CLK_PRE_MASK (0x3 << 16)
+#define CLK_PRE_SHIFT 16
+#define CLK_POST_OVERRIDE (1 << 15)
+#define CLK_POST(x) ((x) << 8)
+#define CLK_POST_MASK (0x7 << 8)
+#define CLK_POST_SHIFT 8
+#define CLK_TRAIL_OVERRIDE (1 << 7)
+#define CLK_TRAIL(x) ((x) << 0)
+#define CLK_TRAIL_MASK (0xf << 0)
+#define CLK_TRAIL_SHIFT 0
+
+#define _DPHY_DATA_TIMING_PARAM_0 0x162184
+#define _DPHY_DATA_TIMING_PARAM_1 0x6c184
+#define DPHY_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_DATA_TIMING_PARAM_0,\
+ _DPHY_DATA_TIMING_PARAM_1)
+#define _DSI_DATA_TIMING_PARAM_0 0x6B084
+#define _DSI_DATA_TIMING_PARAM_1 0x6B884
+#define DSI_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_DATA_TIMING_PARAM_0,\
+ _DSI_DATA_TIMING_PARAM_1)
+#define HS_PREPARE_OVERRIDE (1 << 31)
+#define HS_PREPARE(x) ((x) << 24)
+#define HS_PREPARE_MASK (0x7 << 24)
+#define HS_PREPARE_SHIFT 24
+#define HS_ZERO_OVERRIDE (1 << 23)
+#define HS_ZERO(x) ((x) << 16)
+#define HS_ZERO_MASK (0xf << 16)
+#define HS_ZERO_SHIFT 16
+#define HS_TRAIL_OVERRIDE (1 << 15)
+#define HS_TRAIL(x) ((x) << 8)
+#define HS_TRAIL_MASK (0x7 << 8)
+#define HS_TRAIL_SHIFT 8
+#define HS_EXIT_OVERRIDE (1 << 7)
+#define HS_EXIT(x) ((x) << 0)
+#define HS_EXIT_MASK (0x7 << 0)
+#define HS_EXIT_SHIFT 0
+
+#define _DPHY_TA_TIMING_PARAM_0 0x162188
+#define _DPHY_TA_TIMING_PARAM_1 0x6c188
+#define DPHY_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_TA_TIMING_PARAM_0,\
+ _DPHY_TA_TIMING_PARAM_1)
+#define _DSI_TA_TIMING_PARAM_0 0x6b098
+#define _DSI_TA_TIMING_PARAM_1 0x6b898
+#define DSI_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_TA_TIMING_PARAM_0,\
+ _DSI_TA_TIMING_PARAM_1)
+#define TA_SURE_OVERRIDE (1 << 31)
+#define TA_SURE(x) ((x) << 16)
+#define TA_SURE_MASK (0x1f << 16)
+#define TA_SURE_SHIFT 16
+#define TA_GO_OVERRIDE (1 << 15)
+#define TA_GO(x) ((x) << 8)
+#define TA_GO_MASK (0xf << 8)
+#define TA_GO_SHIFT 8
+#define TA_GET_OVERRIDE (1 << 7)
+#define TA_GET(x) ((x) << 0)
+#define TA_GET_MASK (0xf << 0)
+#define TA_GET_SHIFT 0
+
+/* DSI transcoder configuration */
+#define _DSI_TRANS_FUNC_CONF_0 0x6b030
+#define _DSI_TRANS_FUNC_CONF_1 0x6b830
+#define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \
+ _DSI_TRANS_FUNC_CONF_0,\
+ _DSI_TRANS_FUNC_CONF_1)
+#define OP_MODE_MASK (0x3 << 28)
+#define OP_MODE_SHIFT 28
+#define CMD_MODE_NO_GATE (0x0 << 28)
+#define CMD_MODE_TE_GATE (0x1 << 28)
+#define VIDEO_MODE_SYNC_EVENT (0x2 << 28)
+#define VIDEO_MODE_SYNC_PULSE (0x3 << 28)
+#define LINK_READY (1 << 20)
+#define PIX_FMT_MASK (0x3 << 16)
+#define PIX_FMT_SHIFT 16
+#define PIX_FMT_RGB565 (0x0 << 16)
+#define PIX_FMT_RGB666_PACKED (0x1 << 16)
+#define PIX_FMT_RGB666_LOOSE (0x2 << 16)
+#define PIX_FMT_RGB888 (0x3 << 16)
+#define PIX_FMT_RGB101010 (0x4 << 16)
+#define PIX_FMT_RGB121212 (0x5 << 16)
+#define PIX_FMT_COMPRESSED (0x6 << 16)
+#define BGR_TRANSMISSION (1 << 15)
+#define PIX_VIRT_CHAN(x) ((x) << 12)
+#define PIX_VIRT_CHAN_MASK (0x3 << 12)
+#define PIX_VIRT_CHAN_SHIFT 12
+#define PIX_BUF_THRESHOLD_MASK (0x3 << 10)
+#define PIX_BUF_THRESHOLD_SHIFT 10
+#define PIX_BUF_THRESHOLD_1_4 (0x0 << 10)
+#define PIX_BUF_THRESHOLD_1_2 (0x1 << 10)
+#define PIX_BUF_THRESHOLD_3_4 (0x2 << 10)
+#define PIX_BUF_THRESHOLD_FULL (0x3 << 10)
+#define CONTINUOUS_CLK_MASK (0x3 << 8)
+#define CONTINUOUS_CLK_SHIFT 8
+#define CLK_ENTER_LP_AFTER_DATA (0x0 << 8)
+#define CLK_HS_OR_LP (0x2 << 8)
+#define CLK_HS_CONTINUOUS (0x3 << 8)
+#define LINK_CALIBRATION_MASK (0x3 << 4)
+#define LINK_CALIBRATION_SHIFT 4
+#define CALIBRATION_DISABLED (0x0 << 4)
+#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4)
+#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4)
+#define S3D_ORIENTATION_LANDSCAPE (1 << 1)
+#define EOTP_DISABLED (1 << 0)
+
+#define _DSI_CMD_RXCTL_0 0x6b0d4
+#define _DSI_CMD_RXCTL_1 0x6b8d4
+#define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_RXCTL_0,\
+ _DSI_CMD_RXCTL_1)
+#define READ_UNLOADS_DW (1 << 16)
+#define RECEIVED_UNASSIGNED_TRIGGER (1 << 15)
+#define RECEIVED_ACKNOWLEDGE_TRIGGER (1 << 14)
+#define RECEIVED_TEAR_EFFECT_TRIGGER (1 << 13)
+#define RECEIVED_RESET_TRIGGER (1 << 12)
+#define RECEIVED_PAYLOAD_WAS_LOST (1 << 11)
+#define RECEIVED_CRC_WAS_LOST (1 << 10)
+#define NUMBER_RX_PLOAD_DW_MASK (0xff << 0)
+#define NUMBER_RX_PLOAD_DW_SHIFT 0
+
+#define _DSI_CMD_TXCTL_0 0x6b0d0
+#define _DSI_CMD_TXCTL_1 0x6b8d0
+#define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXCTL_0,\
+ _DSI_CMD_TXCTL_1)
+#define KEEP_LINK_IN_HS (1 << 24)
+#define FREE_HEADER_CREDIT_MASK (0x1f << 8)
+#define FREE_HEADER_CREDIT_SHIFT 0x8
+#define FREE_PLOAD_CREDIT_MASK (0xff << 0)
+#define FREE_PLOAD_CREDIT_SHIFT 0
+#define MAX_HEADER_CREDIT 0x10
+#define MAX_PLOAD_CREDIT 0x40
+
+#define _DSI_CMD_TXHDR_0 0x6b100
+#define _DSI_CMD_TXHDR_1 0x6b900
+#define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXHDR_0,\
+ _DSI_CMD_TXHDR_1)
+#define PAYLOAD_PRESENT (1 << 31)
+#define LP_DATA_TRANSFER (1 << 30)
+#define VBLANK_FENCE (1 << 29)
+#define PARAM_WC_MASK (0xffff << 8)
+#define PARAM_WC_LOWER_SHIFT 8
+#define PARAM_WC_UPPER_SHIFT 16
+#define VC_MASK (0x3 << 6)
+#define VC_SHIFT 6
+#define DT_MASK (0x3f << 0)
+#define DT_SHIFT 0
+
+#define _DSI_CMD_TXPYLD_0 0x6b104
+#define _DSI_CMD_TXPYLD_1 0x6b904
+#define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXPYLD_0,\
+ _DSI_CMD_TXPYLD_1)
+
+#define _DSI_LP_MSG_0 0x6b0d8
+#define _DSI_LP_MSG_1 0x6b8d8
+#define DSI_LP_MSG(tc) _MMIO_DSI(tc, \
+ _DSI_LP_MSG_0,\
+ _DSI_LP_MSG_1)
+#define LPTX_IN_PROGRESS (1 << 17)
+#define LINK_IN_ULPS (1 << 16)
+#define LINK_ULPS_TYPE_LP11 (1 << 8)
+#define LINK_ENTER_ULPS (1 << 0)
+
+/* DSI timeout registers */
+#define _DSI_HSTX_TO_0 0x6b044
+#define _DSI_HSTX_TO_1 0x6b844
+#define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \
+ _DSI_HSTX_TO_0,\
+ _DSI_HSTX_TO_1)
+#define HSTX_TIMEOUT_VALUE_MASK (0xffff << 16)
+#define HSTX_TIMEOUT_VALUE_SHIFT 16
+#define HSTX_TIMEOUT_VALUE(x) ((x) << 16)
+#define HSTX_TIMED_OUT (1 << 0)
+
+#define _DSI_LPRX_HOST_TO_0 0x6b048
+#define _DSI_LPRX_HOST_TO_1 0x6b848
+#define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \
+ _DSI_LPRX_HOST_TO_0,\
+ _DSI_LPRX_HOST_TO_1)
+#define LPRX_TIMED_OUT (1 << 16)
+#define LPRX_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define LPRX_TIMEOUT_VALUE_SHIFT 0
+#define LPRX_TIMEOUT_VALUE(x) ((x) << 0)
+
+#define _DSI_PWAIT_TO_0 0x6b040
+#define _DSI_PWAIT_TO_1 0x6b840
+#define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \
+ _DSI_PWAIT_TO_0,\
+ _DSI_PWAIT_TO_1)
+#define PRESET_TIMEOUT_VALUE_MASK (0xffff << 16)
+#define PRESET_TIMEOUT_VALUE_SHIFT 16
+#define PRESET_TIMEOUT_VALUE(x) ((x) << 16)
+#define PRESPONSE_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define PRESPONSE_TIMEOUT_VALUE_SHIFT 0
+#define PRESPONSE_TIMEOUT_VALUE(x) ((x) << 0)
+
+#define _DSI_TA_TO_0 0x6b04c
+#define _DSI_TA_TO_1 0x6b84c
+#define DSI_TA_TO(tc) _MMIO_DSI(tc, \
+ _DSI_TA_TO_0,\
+ _DSI_TA_TO_1)
+#define TA_TIMED_OUT (1 << 16)
+#define TA_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define TA_TIMEOUT_VALUE_SHIFT 0
+#define TA_TIMEOUT_VALUE(x) ((x) << 0)
+
/* bits 31:0 */
#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
@@ -10400,10 +10824,6 @@ enum skl_power_gate {
#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
-/* For UMS only (deprecated): */
-#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
-#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
-
/* MOCS (Memory Object Control State) registers */
#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
@@ -10689,6 +11109,7 @@ enum skl_power_gate {
#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
_ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
+#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20)
#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0)
@@ -10743,17 +11164,17 @@ enum skl_power_gate {
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
-#define PORT_TX_DFLEXDPSP _MMIO(0x1638A0)
+#define PORT_TX_DFLEXDPSP _MMIO(FIA1_BASE + 0x008A0)
#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6))
#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5))
#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8)
#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8))
#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8))
-#define PORT_TX_DFLEXDPPMS _MMIO(0x163890)
+#define PORT_TX_DFLEXDPPMS _MMIO(FIA1_BASE + 0x00890)
#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port))
-#define PORT_TX_DFLEXDPCSSS _MMIO(0x163894)
+#define PORT_TX_DFLEXDPCSSS _MMIO(FIA1_BASE + 0x00894)
#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port))
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a492385b2089..ca95ab2f4cfa 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -111,91 +111,6 @@ i915_request_remove_from_client(struct i915_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static struct i915_dependency *
-i915_dependency_alloc(struct drm_i915_private *i915)
-{
- return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
-}
-
-static void
-i915_dependency_free(struct drm_i915_private *i915,
- struct i915_dependency *dep)
-{
- kmem_cache_free(i915->dependencies, dep);
-}
-
-static void
-__i915_sched_node_add_dependency(struct i915_sched_node *node,
- struct i915_sched_node *signal,
- struct i915_dependency *dep,
- unsigned long flags)
-{
- INIT_LIST_HEAD(&dep->dfs_link);
- list_add(&dep->wait_link, &signal->waiters_list);
- list_add(&dep->signal_link, &node->signalers_list);
- dep->signaler = signal;
- dep->flags = flags;
-}
-
-static int
-i915_sched_node_add_dependency(struct drm_i915_private *i915,
- struct i915_sched_node *node,
- struct i915_sched_node *signal)
-{
- struct i915_dependency *dep;
-
- dep = i915_dependency_alloc(i915);
- if (!dep)
- return -ENOMEM;
-
- __i915_sched_node_add_dependency(node, signal, dep,
- I915_DEPENDENCY_ALLOC);
- return 0;
-}
-
-static void
-i915_sched_node_fini(struct drm_i915_private *i915,
- struct i915_sched_node *node)
-{
- struct i915_dependency *dep, *tmp;
-
- GEM_BUG_ON(!list_empty(&node->link));
-
- /*
- * Everyone we depended upon (the fences we wait to be signaled)
- * should retire before us and remove themselves from our list.
- * However, retirement is run independently on each timeline and
- * so we may be called out-of-order.
- */
- list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
- GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
- GEM_BUG_ON(!list_empty(&dep->dfs_link));
-
- list_del(&dep->wait_link);
- if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
- }
-
- /* Remove ourselves from everyone who depends upon us */
- list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
- GEM_BUG_ON(dep->signaler != node);
- GEM_BUG_ON(!list_empty(&dep->dfs_link));
-
- list_del(&dep->signal_link);
- if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
- }
-}
-
-static void
-i915_sched_node_init(struct i915_sched_node *node)
-{
- INIT_LIST_HEAD(&node->signalers_list);
- INIT_LIST_HEAD(&node->waiters_list);
- INIT_LIST_HEAD(&node->link);
- node->attr.priority = I915_PRIORITY_INVALID;
-}
-
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
struct intel_engine_cs *engine;
@@ -221,6 +136,11 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
intel_engine_get_seqno(engine),
seqno);
+ if (seqno == engine->timeline.seqno)
+ continue;
+
+ kthread_park(engine->breadcrumbs.signaler);
+
if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
/* Flush any waiters before we reuse the seqno */
intel_engine_disarm_breadcrumbs(engine);
@@ -235,6 +155,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
/* Finally reset hw state */
intel_engine_init_global_seqno(engine, seqno);
engine->timeline.seqno = seqno;
+
+ kthread_unpark(engine->breadcrumbs.signaler);
}
list_for_each_entry(timeline, &i915->gt.timelines, link)
@@ -740,17 +662,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
if (rq)
cond_synchronize_rcu(rq->rcustate);
- /*
- * We've forced the client to stall and catch up with whatever
- * backlog there might have been. As we are assuming that we
- * caused the mempressure, now is an opportune time to
- * recover as much memory from the request pool as is possible.
- * Having already penalized the client to stall, we spend
- * a little extra time to re-optimise page allocation.
- */
- kmem_cache_shrink(i915->requests);
- rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
-
rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
if (!rq) {
ret = -ENOMEM;
@@ -1127,8 +1038,20 @@ void i915_request_add(struct i915_request *request)
*/
local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
- if (engine->schedule)
- engine->schedule(request, &request->gem_context->sched);
+ if (engine->schedule) {
+ struct i915_sched_attr attr = request->gem_context->sched;
+
+ /*
+ * Boost priorities to new clients (new request flows).
+ *
+ * Allow interactive/synchronous clients to jump ahead of
+ * the bulk clients. (FQ_CODEL)
+ */
+ if (!prev || i915_request_completed(prev))
+ attr.priority |= I915_PRIORITY_NEWCLIENT;
+
+ engine->schedule(request, &attr);
+ }
rcu_read_unlock();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1310,6 +1233,8 @@ long i915_request_wait(struct i915_request *rq,
add_wait_queue(errq, &reset);
intel_wait_init(&wait);
+ if (flags & I915_WAIT_PRIORITY)
+ i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
restart:
do {
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 7fa94b024968..90e9d170a0cd 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -277,8 +277,9 @@ long i915_request_wait(struct i915_request *rq,
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
-#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
+#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */
+#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
+#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
u32 seqno);
@@ -332,14 +333,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
return __i915_request_completed(rq, seqno);
}
-static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
-{
- const struct i915_request *rq =
- container_of(node, const struct i915_request, sched);
-
- return i915_request_completed(rq);
-}
-
void i915_retire_requests(struct drm_i915_private *i915);
/*
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
new file mode 100644
index 000000000000..340faea6c08a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -0,0 +1,399 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/mutex.h>
+
+#include "i915_drv.h"
+#include "i915_request.h"
+#include "i915_scheduler.h"
+
+static DEFINE_SPINLOCK(schedule_lock);
+
+static const struct i915_request *
+node_to_request(const struct i915_sched_node *node)
+{
+ return container_of(node, const struct i915_request, sched);
+}
+
+static inline bool node_signaled(const struct i915_sched_node *node)
+{
+ return i915_request_completed(node_to_request(node));
+}
+
+void i915_sched_node_init(struct i915_sched_node *node)
+{
+ INIT_LIST_HEAD(&node->signalers_list);
+ INIT_LIST_HEAD(&node->waiters_list);
+ INIT_LIST_HEAD(&node->link);
+ node->attr.priority = I915_PRIORITY_INVALID;
+}
+
+static struct i915_dependency *
+i915_dependency_alloc(struct drm_i915_private *i915)
+{
+ return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+}
+
+static void
+i915_dependency_free(struct drm_i915_private *i915,
+ struct i915_dependency *dep)
+{
+ kmem_cache_free(i915->dependencies, dep);
+}
+
+bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+ struct i915_sched_node *signal,
+ struct i915_dependency *dep,
+ unsigned long flags)
+{
+ bool ret = false;
+
+ spin_lock(&schedule_lock);
+
+ if (!node_signaled(signal)) {
+ INIT_LIST_HEAD(&dep->dfs_link);
+ list_add(&dep->wait_link, &signal->waiters_list);
+ list_add(&dep->signal_link, &node->signalers_list);
+ dep->signaler = signal;
+ dep->flags = flags;
+
+ ret = true;
+ }
+
+ spin_unlock(&schedule_lock);
+
+ return ret;
+}
+
+int i915_sched_node_add_dependency(struct drm_i915_private *i915,
+ struct i915_sched_node *node,
+ struct i915_sched_node *signal)
+{
+ struct i915_dependency *dep;
+
+ dep = i915_dependency_alloc(i915);
+ if (!dep)
+ return -ENOMEM;
+
+ if (!__i915_sched_node_add_dependency(node, signal, dep,
+ I915_DEPENDENCY_ALLOC))
+ i915_dependency_free(i915, dep);
+
+ return 0;
+}
+
+void i915_sched_node_fini(struct drm_i915_private *i915,
+ struct i915_sched_node *node)
+{
+ struct i915_dependency *dep, *tmp;
+
+ GEM_BUG_ON(!list_empty(&node->link));
+
+ spin_lock(&schedule_lock);
+
+ /*
+ * Everyone we depended upon (the fences we wait to be signaled)
+ * should retire before us and remove themselves from our list.
+ * However, retirement is run independently on each timeline and
+ * so we may be called out-of-order.
+ */
+ list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
+ GEM_BUG_ON(!node_signaled(dep->signaler));
+ GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
+ list_del(&dep->wait_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+
+ /* Remove ourselves from everyone who depends upon us */
+ list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
+ GEM_BUG_ON(dep->signaler != node);
+ GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
+ list_del(&dep->signal_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+
+ spin_unlock(&schedule_lock);
+}
+
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
+static void assert_priolists(struct intel_engine_execlists * const execlists,
+ long queue_priority)
+{
+ struct rb_node *rb;
+ long last_prio, i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
+ rb_first(&execlists->queue.rb_root));
+
+ last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
+ for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+ const struct i915_priolist *p = to_priolist(rb);
+
+ GEM_BUG_ON(p->priority >= last_prio);
+ last_prio = p->priority;
+
+ GEM_BUG_ON(!p->used);
+ for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
+ if (list_empty(&p->requests[i]))
+ continue;
+
+ GEM_BUG_ON(!(p->used & BIT(i)));
+ }
+ }
+}
+
+struct list_head *
+i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_priolist *p;
+ struct rb_node **parent, *rb;
+ bool first = true;
+ int idx, i;
+
+ lockdep_assert_held(&engine->timeline.lock);
+ assert_priolists(execlists, INT_MAX);
+
+ /* buckets sorted from highest [in slot 0] to lowest priority */
+ idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
+ prio >>= I915_USER_PRIORITY_SHIFT;
+ if (unlikely(execlists->no_priolist))
+ prio = I915_PRIORITY_NORMAL;
+
+find_priolist:
+ /* most positive priority is scheduled first, equal priorities fifo */
+ rb = NULL;
+ parent = &execlists->queue.rb_root.rb_node;
+ while (*parent) {
+ rb = *parent;
+ p = to_priolist(rb);
+ if (prio > p->priority) {
+ parent = &rb->rb_left;
+ } else if (prio < p->priority) {
+ parent = &rb->rb_right;
+ first = false;
+ } else {
+ goto out;
+ }
+ }
+
+ if (prio == I915_PRIORITY_NORMAL) {
+ p = &execlists->default_priolist;
+ } else {
+ p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+ /* Convert an allocation failure to a priority bump */
+ if (unlikely(!p)) {
+ prio = I915_PRIORITY_NORMAL; /* recurses just once */
+
+ /* To maintain ordering with all rendering, after an
+ * allocation failure we have to disable all scheduling.
+ * Requests will then be executed in fifo, and schedule
+ * will ensure that dependencies are emitted in fifo.
+ * There will be still some reordering with existing
+ * requests, so if userspace lied about their
+ * dependencies that reordering may be visible.
+ */
+ execlists->no_priolist = true;
+ goto find_priolist;
+ }
+ }
+
+ p->priority = prio;
+ for (i = 0; i < ARRAY_SIZE(p->requests); i++)
+ INIT_LIST_HEAD(&p->requests[i]);
+ rb_link_node(&p->node, rb, parent);
+ rb_insert_color_cached(&p->node, &execlists->queue, first);
+ p->used = 0;
+
+out:
+ p->used |= BIT(idx);
+ return &p->requests[idx];
+}
+
+static struct intel_engine_cs *
+sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
+{
+ struct intel_engine_cs *engine = node_to_request(node)->engine;
+
+ GEM_BUG_ON(!locked);
+
+ if (engine != locked) {
+ spin_unlock(&locked->timeline.lock);
+ spin_lock(&engine->timeline.lock);
+ }
+
+ return engine;
+}
+
+static void __i915_schedule(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
+{
+ struct list_head *uninitialized_var(pl);
+ struct intel_engine_cs *engine, *last;
+ struct i915_dependency *dep, *p;
+ struct i915_dependency stack;
+ const int prio = attr->priority;
+ LIST_HEAD(dfs);
+
+ /* Needed in order to use the temporary link inside i915_dependency */
+ lockdep_assert_held(&schedule_lock);
+ GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
+
+ if (i915_request_completed(rq))
+ return;
+
+ if (prio <= READ_ONCE(rq->sched.attr.priority))
+ return;
+
+ stack.signaler = &rq->sched;
+ list_add(&stack.dfs_link, &dfs);
+
+ /*
+ * Recursively bump all dependent priorities to match the new request.
+ *
+ * A naive approach would be to use recursion:
+ * static void update_priorities(struct i915_sched_node *node, prio) {
+ * list_for_each_entry(dep, &node->signalers_list, signal_link)
+ * update_priorities(dep->signal, prio)
+ * queue_request(node);
+ * }
+ * but that may have unlimited recursion depth and so runs a very
+ * real risk of overunning the kernel stack. Instead, we build
+ * a flat list of all dependencies starting with the current request.
+ * As we walk the list of dependencies, we add all of its dependencies
+ * to the end of the list (this may include an already visited
+ * request) and continue to walk onwards onto the new dependencies. The
+ * end result is a topological list of requests in reverse order, the
+ * last element in the list is the request we must execute first.
+ */
+ list_for_each_entry(dep, &dfs, dfs_link) {
+ struct i915_sched_node *node = dep->signaler;
+
+ /*
+ * Within an engine, there can be no cycle, but we may
+ * refer to the same dependency chain multiple times
+ * (redundant dependencies are not eliminated) and across
+ * engines.
+ */
+ list_for_each_entry(p, &node->signalers_list, signal_link) {
+ GEM_BUG_ON(p == dep); /* no cycles! */
+
+ if (node_signaled(p->signaler))
+ continue;
+
+ GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
+ if (prio > READ_ONCE(p->signaler->attr.priority))
+ list_move_tail(&p->dfs_link, &dfs);
+ }
+ }
+
+ /*
+ * If we didn't need to bump any existing priorities, and we haven't
+ * yet submitted this request (i.e. there is no potential race with
+ * execlists_submit_request()), we can set our own priority and skip
+ * acquiring the engine locks.
+ */
+ if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ rq->sched.attr = *attr;
+
+ if (stack.dfs_link.next == stack.dfs_link.prev)
+ return;
+
+ __list_del_entry(&stack.dfs_link);
+ }
+
+ last = NULL;
+ engine = rq->engine;
+ spin_lock_irq(&engine->timeline.lock);
+
+ /* Fifo and depth-first replacement ensure our deps execute before us */
+ list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
+ struct i915_sched_node *node = dep->signaler;
+
+ INIT_LIST_HEAD(&dep->dfs_link);
+
+ engine = sched_lock_engine(node, engine);
+
+ /* Recheck after acquiring the engine->timeline.lock */
+ if (prio <= node->attr.priority || node_signaled(node))
+ continue;
+
+ node->attr.priority = prio;
+ if (!list_empty(&node->link)) {
+ if (last != engine) {
+ pl = i915_sched_lookup_priolist(engine, prio);
+ last = engine;
+ }
+ list_move_tail(&node->link, pl);
+ } else {
+ /*
+ * If the request is not in the priolist queue because
+ * it is not yet runnable, then it doesn't contribute
+ * to our preemption decisions. On the other hand,
+ * if the request is on the HW, it too is not in the
+ * queue; but in that case we may still need to reorder
+ * the inflight requests.
+ */
+ if (!i915_sw_fence_done(&node_to_request(node)->submit))
+ continue;
+ }
+
+ if (prio <= engine->execlists.queue_priority)
+ continue;
+
+ /*
+ * If we are already the currently executing context, don't
+ * bother evaluating if we should preempt ourselves.
+ */
+ if (node_to_request(node)->global_seqno &&
+ i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
+ node_to_request(node)->global_seqno))
+ continue;
+
+ /* Defer (tasklet) submission until after all of our updates. */
+ engine->execlists.queue_priority = prio;
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
+
+ spin_unlock_irq(&engine->timeline.lock);
+}
+
+void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
+{
+ spin_lock(&schedule_lock);
+ __i915_schedule(rq, attr);
+ spin_unlock(&schedule_lock);
+}
+
+void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
+{
+ struct i915_sched_attr attr;
+
+ GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
+
+ if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
+ return;
+
+ spin_lock_bh(&schedule_lock);
+
+ attr = rq->sched.attr;
+ attr.priority |= bump;
+ __i915_schedule(rq, &attr);
+
+ spin_unlock_bh(&schedule_lock);
+}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 70a42220358d..dbe9cb7ecd82 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -8,9 +8,14 @@
#define _I915_SCHEDULER_H_
#include <linux/bitops.h>
+#include <linux/kernel.h>
#include <uapi/drm/i915_drm.h>
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
+
enum {
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
@@ -19,6 +24,15 @@ enum {
I915_PRIORITY_INVALID = INT_MIN
};
+#define I915_USER_PRIORITY_SHIFT 2
+#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
+
+#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
+#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
+
+#define I915_PRIORITY_WAIT ((u8)BIT(0))
+#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
+
struct i915_sched_attr {
/**
* @priority: execution and service priority
@@ -69,4 +83,26 @@ struct i915_dependency {
#define I915_DEPENDENCY_ALLOC BIT(0)
};
+void i915_sched_node_init(struct i915_sched_node *node);
+
+bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+ struct i915_sched_node *signal,
+ struct i915_dependency *dep,
+ unsigned long flags);
+
+int i915_sched_node_add_dependency(struct drm_i915_private *i915,
+ struct i915_sched_node *node,
+ struct i915_sched_node *signal);
+
+void i915_sched_node_fini(struct drm_i915_private *i915,
+ struct i915_sched_node *node);
+
+void i915_schedule(struct i915_request *request,
+ const struct i915_sched_attr *attr);
+
+void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
+
+struct list_head *
+i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 6dbeed079ae5..fc2eeab823b7 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -1,10 +1,7 @@
/*
- * (C) Copyright 2016 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
+ * (C) Copyright 2016 Intel Corporation
*/
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index fe2ef4dadfc6..0e055ea0179f 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -1,10 +1,9 @@
/*
+ * SPDX-License-Identifier: MIT
+ *
* i915_sw_fence.h - library routines for N:M synchronisation points
*
* Copyright (C) 2016 Intel Corporation
- *
- * This file is released under the GPLv2.
- *
*/
#ifndef _I915_SW_FENCE_H_
diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c
index 58f8d0cc125c..60404dbb2e9f 100644
--- a/drivers/gpu/drm/i915/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/i915_syncmap.c
@@ -92,7 +92,7 @@ void i915_syncmap_init(struct i915_syncmap **root)
{
BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP);
BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT);
- BUILD_BUG_ON(KSYNCMAP > BITS_PER_BYTE * sizeof((*root)->bitmap));
+ BUILD_BUG_ON(KSYNCMAP > BITS_PER_TYPE((*root)->bitmap));
*root = NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e5e6f6bb2b05..535caebd9813 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -483,7 +483,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static const struct attribute *gen6_attrs[] = {
+static const struct attribute * const gen6_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_boost_freq_mhz.attr,
@@ -495,7 +495,7 @@ static const struct attribute *gen6_attrs[] = {
NULL,
};
-static const struct attribute *vlv_attrs[] = {
+static const struct attribute * const vlv_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_boost_freq_mhz.attr,
@@ -516,26 +516,21 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_i915_error_state_buf error_str;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct i915_gpu_state *gpu;
ssize_t ret;
- ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
- if (ret)
- return ret;
-
- gpu = i915_first_error_state(dev_priv);
- ret = i915_error_state_to_str(&error_str, gpu);
- if (ret)
- goto out;
-
- ret = count < error_str.bytes ? count : error_str.bytes;
- memcpy(buf, error_str.buf, ret);
+ gpu = i915_first_error_state(i915);
+ if (gpu) {
+ ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
+ i915_gpu_state_put(gpu);
+ } else {
+ const char *str = "No error state collected\n";
+ size_t len = strlen(str);
-out:
- i915_gpu_state_put(gpu);
- i915_error_state_buf_release(&error_str);
+ ret = min_t(size_t, count, len - off);
+ memcpy(buf, str + off, ret);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index a2c2c3ab5fb0..ebd71b487220 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -83,6 +83,25 @@ void i915_timeline_init(struct drm_i915_private *i915,
const char *name);
void i915_timeline_fini(struct i915_timeline *tl);
+static inline void
+i915_timeline_set_subclass(struct i915_timeline *timeline,
+ unsigned int subclass)
+{
+ lockdep_set_subclass(&timeline->lock, subclass);
+
+ /*
+ * Due to an interesting quirk in lockdep's internal debug tracking,
+ * after setting a subclass we must ensure the lock is used. Otherwise,
+ * nr_unused_locks is incremented once too often.
+ */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ local_irq_disable();
+ lock_map_acquire(&timeline->lock.dep_map);
+ lock_map_release(&timeline->lock.dep_map);
+ local_irq_enable();
+#endif
+}
+
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915, const char *name);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 395dd2511568..9726df37c4c4 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -44,16 +44,19 @@
__stringify(x), (long)(x))
#if defined(GCC_VERSION) && GCC_VERSION >= 70000
-#define add_overflows(A, B) \
- __builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
+#define add_overflows_t(T, A, B) \
+ __builtin_add_overflow_p((A), (B), (T)0)
#else
-#define add_overflows(A, B) ({ \
+#define add_overflows_t(T, A, B) ({ \
typeof(A) a = (A); \
typeof(B) b = (B); \
- a + b < a; \
+ (T)(a + b) < a; \
})
#endif
+#define add_overflows(A, B) \
+ add_overflows_t(typeof((A) + (B)), (A), (B))
+
#define range_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
typeof(size) size__ = (size); \
@@ -68,7 +71,7 @@
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
- (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
+ (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
#define ptr_mask_bits(ptr, n) ({ \
unsigned long __v = (unsigned long)(ptr); \
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 31efc971a3a8..5b4d78cdb4ca 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -305,12 +305,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->size > vma->node.size);
- if (GEM_WARN_ON(range_overflows(vma->node.start,
- vma->node.size,
- vma->vm->total)))
+ if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
+ vma->node.size,
+ vma->vm->total)))
return -ENODEV;
- if (GEM_WARN_ON(!flags))
+ if (GEM_DEBUG_WARN_ON(!flags))
return -EINVAL;
bind_flags = 0;
@@ -892,7 +892,7 @@ static void export_fence(struct i915_vma *vma,
reservation_object_lock(resv, NULL);
if (flags & EXEC_OBJECT_WRITE)
reservation_object_add_excl_fence(resv, &rq->fence);
- else if (reservation_object_reserve_shared(resv) == 0)
+ else if (reservation_object_reserve_shared(resv, 1) == 0)
reservation_object_add_shared_fence(resv, &rq->fence);
reservation_object_unlock(resv);
}
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 13830e43a4d1..4dd793b78996 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -25,8 +25,277 @@
* Jani Nikula <jani.nikula@intel.com>
*/
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_atomic_helper.h>
#include "intel_dsi.h"
+static inline int header_credits_available(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
+ >> FREE_HEADER_CREDIT_SHIFT;
+}
+
+static inline int payload_credits_available(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
+ >> FREE_PLOAD_CREDIT_SHIFT;
+}
+
+static void wait_for_header_credits(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
+ MAX_HEADER_CREDIT, 100))
+ DRM_ERROR("DSI header credits not released\n");
+}
+
+static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
+ MAX_PLOAD_CREDIT, 100))
+ DRM_ERROR("DSI payload credits not released\n");
+}
+
+static enum transcoder dsi_port_to_transcoder(enum port port)
+{
+ if (port == PORT_A)
+ return TRANSCODER_DSI_0;
+ else
+ return TRANSCODER_DSI_1;
+}
+
+static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi;
+ enum port port;
+ enum transcoder dsi_trans;
+ int ret;
+
+ /* wait for header/payload credits to be released */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ wait_for_header_credits(dev_priv, dsi_trans);
+ wait_for_payload_credits(dev_priv, dsi_trans);
+ }
+
+ /* send nop DCS command */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi = intel_dsi->dsi_hosts[port]->device;
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+ dsi->channel = 0;
+ ret = mipi_dsi_dcs_nop(dsi);
+ if (ret < 0)
+ DRM_ERROR("error sending DCS NOP command\n");
+ }
+
+ /* wait for header credits to be released */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ wait_for_header_credits(dev_priv, dsi_trans);
+ }
+
+ /* wait for LP TX in progress bit to be cleared */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) &
+ LPTX_IN_PROGRESS), 20))
+ DRM_ERROR("LPTX bit not cleared\n");
+ }
+}
+
+static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
+ u32 len)
+{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ int free_credits;
+ int i, j;
+
+ for (i = 0; i < len; i += 4) {
+ u32 tmp = 0;
+
+ free_credits = payload_credits_available(dev_priv, dsi_trans);
+ if (free_credits < 1) {
+ DRM_ERROR("Payload credit not available\n");
+ return false;
+ }
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ tmp |= *data++ << 8 * j;
+
+ I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp);
+ }
+
+ return true;
+}
+
+static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
+ struct mipi_dsi_packet pkt, bool enable_lpdt)
+{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ u32 tmp;
+ int free_credits;
+
+ /* check if header credit available */
+ free_credits = header_credits_available(dev_priv, dsi_trans);
+ if (free_credits < 1) {
+ DRM_ERROR("send pkt header failed, not enough hdr credits\n");
+ return -1;
+ }
+
+ tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans));
+
+ if (pkt.payload)
+ tmp |= PAYLOAD_PRESENT;
+ else
+ tmp &= ~PAYLOAD_PRESENT;
+
+ tmp &= ~VBLANK_FENCE;
+
+ if (enable_lpdt)
+ tmp |= LP_DATA_TRANSFER;
+
+ tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
+ tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT);
+ tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
+ tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
+ tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
+ I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp);
+
+ return 0;
+}
+
+static int dsi_send_pkt_payld(struct intel_dsi_host *host,
+ struct mipi_dsi_packet pkt)
+{
+ /* payload queue can accept *256 bytes*, check limit */
+ if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
+ DRM_ERROR("payload size exceeds max queue limit\n");
+ return -1;
+ }
+
+ /* load data into command payload queue */
+ if (!add_payld_to_queue(host, pkt.payload,
+ pkt.payload_length)) {
+ DRM_ERROR("adding payload to queue failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+ int lane;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+
+ /*
+ * Program voltage swing and pre-emphasis level values as per
+ * table in BSPEC under DDI buffer programing
+ */
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
+ tmp |= SCALING_MODE_SEL(0x2);
+ tmp |= TAP2_DISABLE | TAP3_DISABLE;
+ tmp |= RTERM_SELECT(0x6);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
+ tmp |= SCALING_MODE_SEL(0x2);
+ tmp |= TAP2_DISABLE | TAP3_DISABLE;
+ tmp |= RTERM_SELECT(0x6);
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+
+ tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK);
+ tmp |= SWING_SEL_UPPER(0x2);
+ tmp |= SWING_SEL_LOWER(0x2);
+ tmp |= RCOMP_SCALAR(0x98);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+
+ tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+ tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK);
+ tmp |= SWING_SEL_UPPER(0x2);
+ tmp |= SWING_SEL_LOWER(0x2);
+ tmp |= RCOMP_SCALAR(0x98);
+ I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
+
+ tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+ tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK);
+ tmp |= POST_CURSOR_1(0x0);
+ tmp |= POST_CURSOR_2(0x0);
+ tmp |= CURSOR_COEFF(0x3f);
+ I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+
+ for (lane = 0; lane <= 3; lane++) {
+ /* Bspec: must not use GRP register for write */
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+ tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK);
+ tmp |= POST_CURSOR_1(0x0);
+ tmp |= POST_CURSOR_2(0x0);
+ tmp |= CURSOR_COEFF(0x3f);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+ }
+ }
+}
+
+static void configure_dual_link_mode(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 dss_ctl1;
+
+ dss_ctl1 = I915_READ(DSS_CTL1);
+ dss_ctl1 |= SPLITTER_ENABLE;
+ dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
+ dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
+
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ u32 dss_ctl2;
+ u16 hactive = adjusted_mode->crtc_hdisplay;
+ u16 dl_buffer_depth;
+
+ dss_ctl1 &= ~DUAL_LINK_MODE_INTERLEAVE;
+ dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
+
+ if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
+ DRM_ERROR("DL buffer depth exceed max value\n");
+
+ dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
+ dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+ dss_ctl2 = I915_READ(DSS_CTL2);
+ dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
+ dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+ I915_WRITE(DSS_CTL2, dss_ctl2);
+ } else {
+ /* Interleave */
+ dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
+ }
+
+ I915_WRITE(DSS_CTL1, dss_ctl1);
+}
+
static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -105,23 +374,1079 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
}
}
-static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder)
+static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+ int lane;
+
+ /* Step 4b(i) set loadgen select for transmit and aux lanes */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+ tmp &= ~LOADGEN_SELECT;
+ I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+ for (lane = 0; lane <= 3; lane++) {
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+ tmp &= ~LOADGEN_SELECT;
+ if (lane != 2)
+ tmp |= LOADGEN_SELECT;
+ I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+ }
+ }
+
+ /* Step 4b(ii) set latency optimization for transmit and aux lanes */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+ tmp &= ~FRC_LATENCY_OPTIM_MASK;
+ tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
+ I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ tmp &= ~FRC_LATENCY_OPTIM_MASK;
+ tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+ }
+
+}
+
+static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ /* clear common keeper enable bit */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+ tmp &= ~COMMON_KEEPER_EN;
+ I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp);
+ tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port));
+ tmp &= ~COMMON_KEEPER_EN;
+ I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp);
+ }
+
+ /*
+ * Set SUS Clock Config bitfield to 11b
+ * Note: loadgen select program is done
+ * as part of lane phy sequence configuration
+ */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_CL_DW5(port));
+ tmp |= SUS_CLOCK_CONFIG;
+ I915_WRITE(ICL_PORT_CL_DW5(port), tmp);
+ }
+
+ /* Clear training enable to change swing values */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ tmp &= ~TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ tmp &= ~TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ }
+
+ /* Program swing and de-emphasis */
+ dsi_program_swing_and_deemphasis(encoder);
+
+ /* Set training enable to trigger update */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ tmp |= TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ tmp |= TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ }
+}
+
+static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp |= DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), tmp);
+
+ if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE),
+ 500))
+ DRM_ERROR("DDI port:%c buffer idle\n", port_name(port));
+ }
+}
+
+static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ /* Program T-INIT master registers */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port));
+ tmp &= ~MASTER_INIT_TIMER_MASK;
+ tmp |= intel_dsi->init_count;
+ I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp);
+ }
+
+ /* Program DPHY clock lanes timings */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+
+ /* shadow register inside display core */
+ I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+ }
+
+ /* Program DPHY data lanes timings */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(DPHY_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
+
+ /* shadow register inside display core */
+ I915_WRITE(DSI_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
+ }
+
+ /*
+ * If DSI link operating at or below an 800 MHz,
+ * TA_SURE should be override and programmed to
+ * a value '0' inside TA_PARAM_REGISTERS otherwise
+ * leave all fields at HW default values.
+ */
+ if (intel_dsi_bitrate(intel_dsi) <= 800000) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
+
+ /* shadow register inside display core */
+ tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+ }
+ }
+}
+
+static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ }
+
+ I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ }
+
+ I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_map_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpll_lock);
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+ }
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ POSTING_READ(DPCLKA_CFGCR0_ICL);
+
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void
+gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 tmp;
+ enum port port;
+ enum transcoder dsi_trans;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+
+ if (intel_dsi->eotp_pkt)
+ tmp &= ~EOTP_DISABLED;
+ else
+ tmp |= EOTP_DISABLED;
+
+ /* enable link calibration if freq > 1.5Gbps */
+ if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) {
+ tmp &= ~LINK_CALIBRATION_MASK;
+ tmp |= CALIBRATION_ENABLED_INITIAL_ONLY;
+ }
+
+ /* configure continuous clock */
+ tmp &= ~CONTINUOUS_CLK_MASK;
+ if (intel_dsi->clock_stop)
+ tmp |= CLK_ENTER_LP_AFTER_DATA;
+ else
+ tmp |= CLK_HS_CONTINUOUS;
+
+ /* configure buffer threshold limit to minimum */
+ tmp &= ~PIX_BUF_THRESHOLD_MASK;
+ tmp |= PIX_BUF_THRESHOLD_1_4;
+
+ /* set virtual channel to '0' */
+ tmp &= ~PIX_VIRT_CHAN_MASK;
+ tmp |= PIX_VIRT_CHAN(0);
+
+ /* program BGR transmission */
+ if (intel_dsi->bgr_enabled)
+ tmp |= BGR_TRANSMISSION;
+
+ /* select pixel format */
+ tmp &= ~PIX_FMT_MASK;
+ switch (intel_dsi->pixel_format) {
+ default:
+ MISSING_CASE(intel_dsi->pixel_format);
+ /* fallthrough */
+ case MIPI_DSI_FMT_RGB565:
+ tmp |= PIX_FMT_RGB565;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ tmp |= PIX_FMT_RGB666_PACKED;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ tmp |= PIX_FMT_RGB666_LOOSE;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ tmp |= PIX_FMT_RGB888;
+ break;
+ }
+
+ /* program DSI operation mode */
+ if (is_vid_mode(intel_dsi)) {
+ tmp &= ~OP_MODE_MASK;
+ switch (intel_dsi->video_mode_format) {
+ default:
+ MISSING_CASE(intel_dsi->video_mode_format);
+ /* fallthrough */
+ case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS:
+ tmp |= VIDEO_MODE_SYNC_EVENT;
+ break;
+ case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE:
+ tmp |= VIDEO_MODE_SYNC_PULSE;
+ break;
+ }
+ }
+
+ I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
+ }
+
+ /* enable port sync mode if dual link */
+ if (intel_dsi->dual_link) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp |= PORT_SYNC_MODE_ENABLE;
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ }
+
+ /* configure stream splitting */
+ configure_dual_link_mode(encoder, pipe_config);
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /* select data lane width */
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp &= ~DDI_PORT_WIDTH_MASK;
+ tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
+
+ /* select input pipe */
+ tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
+ switch (pipe) {
+ default:
+ MISSING_CASE(pipe);
+ /* fallthrough */
+ case PIPE_A:
+ tmp |= TRANS_DDI_EDP_INPUT_A_ON;
+ break;
+ case PIPE_B:
+ tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+ break;
+ case PIPE_C:
+ tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+ break;
+ }
+
+ /* enable DDI buffer */
+ tmp |= TRANS_DDI_FUNC_ENABLE;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ }
+
+ /* wait for link ready */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) &
+ LINK_READY), 2500))
+ DRM_ERROR("DSI link not ready\n");
+ }
+}
+
+static void
+gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ enum port port;
+ enum transcoder dsi_trans;
+ /* horizontal timings */
+ u16 htotal, hactive, hsync_start, hsync_end, hsync_size;
+ u16 hfront_porch, hback_porch;
+ /* vertical timings */
+ u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
+
+ hactive = adjusted_mode->crtc_hdisplay;
+ htotal = adjusted_mode->crtc_htotal;
+ hsync_start = adjusted_mode->crtc_hsync_start;
+ hsync_end = adjusted_mode->crtc_hsync_end;
+ hsync_size = hsync_end - hsync_start;
+ hfront_porch = (adjusted_mode->crtc_hsync_start -
+ adjusted_mode->crtc_hdisplay);
+ hback_porch = (adjusted_mode->crtc_htotal -
+ adjusted_mode->crtc_hsync_end);
+ vactive = adjusted_mode->crtc_vdisplay;
+ vtotal = adjusted_mode->crtc_vtotal;
+ vsync_start = adjusted_mode->crtc_vsync_start;
+ vsync_end = adjusted_mode->crtc_vsync_end;
+ vsync_shift = hsync_start - htotal / 2;
+
+ if (intel_dsi->dual_link) {
+ hactive /= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ hactive += intel_dsi->pixel_overlap;
+ htotal /= 2;
+ }
+
+ /* minimum hactive as per bspec: 256 pixels */
+ if (adjusted_mode->crtc_hdisplay < 256)
+ DRM_ERROR("hactive is less then 256 pixels\n");
+
+ /* if RGB666 format, then hactive must be multiple of 4 pixels */
+ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
+ DRM_ERROR("hactive pixels are not multiple of 4\n");
+
+ /* program TRANS_HTOTAL register */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(HTOTAL(dsi_trans),
+ (hactive - 1) | ((htotal - 1) << 16));
+ }
+
+ /* TRANS_HSYNC register to be programmed only for video mode */
+ if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
+ if (intel_dsi->video_mode_format ==
+ VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
+ /* BSPEC: hsync size should be atleast 16 pixels */
+ if (hsync_size < 16)
+ DRM_ERROR("hsync size < 16 pixels\n");
+ }
+
+ if (hback_porch < 16)
+ DRM_ERROR("hback porch < 16 pixels\n");
+
+ if (intel_dsi->dual_link) {
+ hsync_start /= 2;
+ hsync_end /= 2;
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(HSYNC(dsi_trans),
+ (hsync_start - 1) | ((hsync_end - 1) << 16));
+ }
+ }
+
+ /* program TRANS_VTOTAL register */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ /*
+ * FIXME: Programing this by assuming progressive mode, since
+ * non-interlaced info from VBT is not saved inside
+ * struct drm_display_mode.
+ * For interlace mode: program required pixel minus 2
+ */
+ I915_WRITE(VTOTAL(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
+ }
+
+ if (vsync_end < vsync_start || vsync_end > vtotal)
+ DRM_ERROR("Invalid vsync_end value\n");
+
+ if (vsync_start < vactive)
+ DRM_ERROR("vsync_start less than vactive\n");
+
+ /* program TRANS_VSYNC register */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(VSYNC(dsi_trans),
+ (vsync_start - 1) | ((vsync_end - 1) << 16));
+ }
+
+ /*
+ * FIXME: It has to be programmed only for interlaced
+ * modes. Put the check condition here once interlaced
+ * info available as described above.
+ * program TRANS_VSYNCSHIFT register
+ */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
+ }
+}
+
+static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp |= PIPECONF_ENABLE;
+ I915_WRITE(PIPECONF(dsi_trans), tmp);
+
+ /* wait for transcoder to be enabled */
+ if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+ I965_PIPECONF_ACTIVE,
+ I965_PIPECONF_ACTIVE, 10))
+ DRM_ERROR("DSI transcoder not enabled\n");
+ }
+}
+
+static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
+
+ /*
+ * escape clock count calculation:
+ * BYTE_CLK_COUNT = TIME_NS/(8 * UI)
+ * UI (nsec) = (10^6)/Bitrate
+ * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate
+ * ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS
+ */
+ divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000;
+ mul = 8 * 1000000;
+ hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,
+ divisor);
+ lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor);
+ ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /* program hst_tx_timeout */
+ tmp = I915_READ(DSI_HSTX_TO(dsi_trans));
+ tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
+ tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
+ I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp);
+
+ /* FIXME: DSI_CALIB_TO */
+
+ /* program lp_rx_host timeout */
+ tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans));
+ tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
+ tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
+ I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp);
+
+ /* FIXME: DSI_PWAIT_TO */
+
+ /* program turn around timeout */
+ tmp = I915_READ(DSI_TA_TO(dsi_trans));
+ tmp &= ~TA_TIMEOUT_VALUE_MASK;
+ tmp |= TA_TIMEOUT_VALUE(ta_timeout);
+ I915_WRITE(DSI_TA_TO(dsi_trans), tmp);
+ }
+}
+
+static void
+gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
{
/* step 4a: power up all lanes of the DDI used by DSI */
gen11_dsi_power_up_lanes(encoder);
+
+ /* step 4b: configure lane sequencing of the Combo-PHY transmitters */
+ gen11_dsi_config_phy_lanes_sequence(encoder);
+
+ /* step 4c: configure voltage swing and skew */
+ gen11_dsi_voltage_swing_program_seq(encoder);
+
+ /* enable DDI buffer */
+ gen11_dsi_enable_ddi_buffer(encoder);
+
+ /* setup D-PHY timings */
+ gen11_dsi_setup_dphy_timings(encoder);
+
+ /* step 4h: setup DSI protocol timeouts */
+ gen11_dsi_setup_timeouts(encoder);
+
+ /* Step (4h, 4i, 4j, 4k): Configure transcoder */
+ gen11_dsi_configure_transcoder(encoder, pipe_config);
+
+ /* Step 4l: Gate DDI clocks */
+ gen11_dsi_gate_clocks(encoder);
}
-static void __attribute__((unused))
-gen11_dsi_pre_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi;
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+ int ret;
+
+ /* set maximum return packet size */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /*
+ * FIXME: This uses the number of DW's currently in the payload
+ * receive queue. This is probably not what we want here.
+ */
+ tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans));
+ tmp &= NUMBER_RX_PLOAD_DW_MASK;
+ /* multiply "Number Rx Payload DW" by 4 to get max value */
+ tmp = tmp * 4;
+ dsi = intel_dsi->dsi_hosts[port]->device;
+ ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
+ if (ret < 0)
+ DRM_ERROR("error setting max return pkt size%d\n", tmp);
+ }
+
+ /* panel power on related mipi dsi vbt sequences */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+ intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
+ /* ensure all panel commands dispatched before enabling transcoder */
+ wait_for_cmds_dispatched_to_panel(encoder);
+}
+
+static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
/* step2: enable IO power */
gen11_dsi_enable_io_power(encoder);
/* step3: enable DSI PLL */
gen11_dsi_program_esc_clk_div(encoder);
+}
+
+static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ /* step3b */
+ gen11_dsi_map_pll(encoder, pipe_config);
/* step4: enable DSI port and DPHY */
- gen11_dsi_enable_port_and_phy(encoder);
+ gen11_dsi_enable_port_and_phy(encoder, pipe_config);
+
+ /* step5: program and powerup panel */
+ gen11_dsi_powerup_panel(encoder);
+
+ /* step6c: configure transcoder timings */
+ gen11_dsi_set_transcoder_timings(encoder, pipe_config);
+
+ /* step6d: enable dsi transcoder */
+ gen11_dsi_enable_transcoder(encoder);
+
+ /* step7: enable backlight */
+ intel_panel_enable_backlight(pipe_config, conn_state);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+}
+
+static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /* disable transcoder */
+ tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp &= ~PIPECONF_ENABLE;
+ I915_WRITE(PIPECONF(dsi_trans), tmp);
+
+ /* wait for transcoder to be disabled */
+ if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+ I965_PIPECONF_ACTIVE, 0, 50))
+ DRM_ERROR("DSI trancoder not disabled\n");
+ }
+}
+
+static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
+
+ /* ensure cmds dispatched to panel */
+ wait_for_cmds_dispatched_to_panel(encoder);
+}
+
+static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+
+ /* put dsi link in ULPS */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(DSI_LP_MSG(dsi_trans));
+ tmp |= LINK_ENTER_ULPS;
+ tmp &= ~LINK_ULPS_TYPE_LP11;
+ I915_WRITE(DSI_LP_MSG(dsi_trans), tmp);
+
+ if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) &
+ LINK_IN_ULPS),
+ 10))
+ DRM_ERROR("DSI link not in ULPS\n");
+ }
+
+ /* disable ddi function */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp &= ~TRANS_DDI_FUNC_ENABLE;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ }
+
+ /* disable port sync mode if dual link */
+ if (intel_dsi->dual_link) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp &= ~PORT_SYNC_MODE_ENABLE;
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ }
+ }
+}
+
+static void gen11_dsi_disable_port(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ gen11_dsi_ungate_clocks(encoder);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), tmp);
+
+ if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE),
+ 8))
+ DRM_ERROR("DDI port:%c buffer not idle\n",
+ port_name(port));
+ }
+ gen11_dsi_ungate_clocks(encoder);
+}
+
+static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO);
+
+ if (intel_dsi->dual_link)
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO);
+
+ /* set mode to DDI */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+ tmp &= ~COMBO_PHY_MODE_DSI;
+ I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+ }
+}
+
+static void gen11_dsi_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ /* step1: turn off backlight */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
+ intel_panel_disable_backlight(old_conn_state);
+
+ /* step2d,e: disable transcoder and wait */
+ gen11_dsi_disable_transcoder(encoder);
+
+ /* step2f,g: powerdown panel */
+ gen11_dsi_powerdown_panel(encoder);
+
+ /* step2h,i,j: deconfig trancoder */
+ gen11_dsi_deconfigure_trancoder(encoder);
+
+ /* step3: disable port */
+ gen11_dsi_disable_port(encoder);
+
+ /* step4: disable IO power */
+ gen11_dsi_disable_io_power(encoder);
+}
+
+static void gen11_dsi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 pll_id;
+
+ /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
+ pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+}
+
+static bool gen11_dsi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+ base);
+ struct intel_connector *intel_connector = intel_dsi->attached_connector;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ const struct drm_display_mode *fixed_mode =
+ intel_connector->panel.fixed_mode;
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
+
+ adjusted_mode->flags = 0;
+
+ /* Dual link goes to trancoder DSI'0' */
+ if (intel_dsi->ports == BIT(PORT_B))
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_1;
+ else
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_0;
+
+ pipe_config->clock_set = true;
+ pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5;
+
+ return true;
+}
+
+static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u64 domains = 0;
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports)
+ if (port == PORT_A)
+ domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
+ else
+ domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
+
+ return domains;
+}
+
+static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+ enum transcoder dsi_trans;
+ bool ret = false;
+
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain))
+ return false;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ *pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe = PIPE_C;
+ break;
+ default:
+ DRM_ERROR("Invalid PIPE input\n");
+ goto out;
+ }
+
+ tmp = I915_READ(PIPECONF(dsi_trans));
+ ret = tmp & PIPECONF_ENABLE;
+ }
+out:
+ intel_display_power_put(dev_priv, encoder->power_domain);
+ return ret;
+}
+
+static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = {
+ .destroy = gen11_dsi_encoder_destroy,
+};
+
+static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = {
+ .get_modes = intel_dsi_get_modes,
+ .mode_valid = intel_dsi_mode_valid,
+ .atomic_check = intel_digital_connector_atomic_check,
+};
+
+static int gen11_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static int gen11_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
+ struct mipi_dsi_packet dsi_pkt;
+ ssize_t ret;
+ bool enable_lpdt = false;
+
+ ret = mipi_dsi_create_packet(&dsi_pkt, msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+ enable_lpdt = true;
+
+ /* send packet header */
+ ret = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt);
+ if (ret < 0)
+ return ret;
+
+ /* only long packet contains payload */
+ if (mipi_dsi_packet_format_is_long(msg->type)) {
+ ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ //TODO: add payload receive code if needed
+
+ ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length;
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
+ .attach = gen11_dsi_host_attach,
+ .detach = gen11_dsi_host_detach,
+ .transfer = gen11_dsi_host_transfer,
+};
+
+void icl_dsi_init(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_dsi *intel_dsi;
+ struct intel_encoder *encoder;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ struct drm_display_mode *scan, *fixed_mode = NULL;
+ enum port port;
+
+ if (!intel_bios_is_dsi_present(dev_priv, &port))
+ return;
+
+ intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+ if (!intel_dsi)
+ return;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
+ kfree(intel_dsi);
+ return;
+ }
+
+ encoder = &intel_dsi->base;
+ intel_dsi->attached_connector = intel_connector;
+ connector = &intel_connector->base;
+
+ /* register DSI encoder with DRM subsystem */
+ drm_encoder_init(dev, &encoder->base, &gen11_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
+
+ encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
+ encoder->pre_enable = gen11_dsi_pre_enable;
+ encoder->disable = gen11_dsi_disable;
+ encoder->port = port;
+ encoder->get_config = gen11_dsi_get_config;
+ encoder->compute_config = gen11_dsi_compute_config;
+ encoder->get_hw_state = gen11_dsi_get_hw_state;
+ encoder->type = INTEL_OUTPUT_DSI;
+ encoder->cloneable = 0;
+ encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+ encoder->get_power_domains = gen11_dsi_get_power_domains;
+
+ /* register DSI connector with DRM subsystem */
+ drm_connector_init(dev, connector, &gen11_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ /* attach connector to encoder */
+ intel_connector_attach_encoder(intel_connector, encoder);
+
+ /* fill mode info from VBT */
+ mutex_lock(&dev->mode_config.mutex);
+ intel_dsi_vbt_get_modes(intel_dsi);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (!fixed_mode) {
+ DRM_ERROR("DSI fixed mode info missing\n");
+ goto err;
+ }
+
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
+ intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
+
+
+ if (dev_priv->vbt.dsi.config->dual_link)
+ intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
+ else
+ intel_dsi->ports = BIT(port);
+
+ intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
+ intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ struct intel_dsi_host *host;
+
+ host = intel_dsi_host_init(intel_dsi, &gen11_dsi_host_ops, port);
+ if (!host)
+ goto err;
+
+ intel_dsi->dsi_hosts[port] = host;
+ }
+
+ if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
+ DRM_DEBUG_KMS("no device found\n");
+ goto err;
+ }
+
+ return;
+
+err:
+ drm_encoder_cleanup(&encoder->base);
+ kfree(intel_dsi);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index b04952bacf77..8cb02f28d30c 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -184,6 +184,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->fifo_changed = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
+ crtc_state->update_planes = 0;
return &crtc_state->base;
}
@@ -203,6 +204,72 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
+static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+ int num_scalers_need, struct intel_crtc *intel_crtc,
+ const char *name, int idx,
+ struct intel_plane_state *plane_state,
+ int *scaler_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ int j;
+ u32 mode;
+
+ if (*scaler_id < 0) {
+ /* find a free scaler */
+ for (j = 0; j < intel_crtc->num_scalers; j++) {
+ if (scaler_state->scalers[j].in_use)
+ continue;
+
+ *scaler_id = j;
+ scaler_state->scalers[*scaler_id].in_use = 1;
+ break;
+ }
+ }
+
+ if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
+ return;
+
+ /* set scaler mode */
+ if (plane_state && plane_state->base.fb &&
+ plane_state->base.fb->format->is_yuv &&
+ plane_state->base.fb->format->num_planes > 1) {
+ if (IS_GEN9(dev_priv) &&
+ !IS_GEMINILAKE(dev_priv)) {
+ mode = SKL_PS_SCALER_MODE_NV12;
+ } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
+ /*
+ * On gen11+'s HDR planes we only use the scaler for
+ * scaling. They have a dedicated chroma upsampler, so
+ * we don't need the scaler to upsample the UV plane.
+ */
+ mode = PS_SCALER_MODE_NORMAL;
+ } else {
+ mode = PS_SCALER_MODE_PLANAR;
+
+ if (plane_state->linked_plane)
+ mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
+ }
+ } else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
+ mode = PS_SCALER_MODE_NORMAL;
+ } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
+ /*
+ * when only 1 scaler is in use on a pipe with 2 scalers
+ * scaler 0 operates in high quality (HQ) mode.
+ * In this case use scaler 0 to take advantage of HQ mode
+ */
+ scaler_state->scalers[*scaler_id].in_use = 0;
+ *scaler_id = 0;
+ scaler_state->scalers[0].in_use = 1;
+ mode = SKL_PS_SCALER_MODE_HQ;
+ } else {
+ mode = SKL_PS_SCALER_MODE_DYN;
+ }
+
+ DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
+ intel_crtc->pipe, *scaler_id, name, idx);
+ scaler_state->scalers[*scaler_id].mode = mode;
+}
+
/**
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
* @dev_priv: i915 device
@@ -232,7 +299,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct drm_atomic_state *drm_state = crtc_state->base.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
int num_scalers_need;
- int i, j;
+ int i;
num_scalers_need = hweight32(scaler_state->scaler_users);
@@ -304,59 +371,17 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
idx = plane->base.id;
/* plane on different crtc cannot be a scaler user of this crtc */
- if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
+ if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
continue;
- }
plane_state = intel_atomic_get_new_plane_state(intel_state,
intel_plane);
scaler_id = &plane_state->scaler_id;
}
- if (*scaler_id < 0) {
- /* find a free scaler */
- for (j = 0; j < intel_crtc->num_scalers; j++) {
- if (!scaler_state->scalers[j].in_use) {
- scaler_state->scalers[j].in_use = 1;
- *scaler_id = j;
- DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
- intel_crtc->pipe, *scaler_id, name, idx);
- break;
- }
- }
- }
-
- if (WARN_ON(*scaler_id < 0)) {
- DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
- continue;
- }
-
- /* set scaler mode */
- if ((INTEL_GEN(dev_priv) >= 9) &&
- plane_state && plane_state->base.fb &&
- plane_state->base.fb->format->format ==
- DRM_FORMAT_NV12) {
- if (INTEL_GEN(dev_priv) == 9 &&
- !IS_GEMINILAKE(dev_priv) &&
- !IS_SKYLAKE(dev_priv))
- scaler_state->scalers[*scaler_id].mode =
- SKL_PS_SCALER_MODE_NV12;
- else
- scaler_state->scalers[*scaler_id].mode =
- PS_SCALER_MODE_PLANAR;
- } else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
- /*
- * when only 1 scaler is in use on either pipe A or B,
- * scaler 0 operates in high quality (HQ) mode.
- * In this case use scaler 0 to take advantage of HQ mode
- */
- *scaler_id = 0;
- scaler_state->scalers[0].in_use = 1;
- scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
- scaler_state->scalers[1].in_use = 0;
- } else {
- scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
- }
+ intel_atomic_setup_scaler(scaler_state, num_scalers_need,
+ intel_crtc, name, idx,
+ plane_state, scaler_id);
}
return 0;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index aabebe0d2e9b..0a73e6e65c20 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -36,28 +36,31 @@
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
-/**
- * intel_create_plane_state - create plane state object
- * @plane: drm plane
- *
- * Allocates a fresh plane state for the given plane and sets some of
- * the state values to sensible initial values.
- *
- * Returns: A newly allocated plane state, or NULL on failure
- */
-struct intel_plane_state *
-intel_create_plane_state(struct drm_plane *plane)
+struct intel_plane *intel_plane_alloc(void)
{
- struct intel_plane_state *state;
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
- return NULL;
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
- state->base.plane = plane;
- state->base.rotation = DRM_MODE_ROTATE_0;
+ plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ if (!plane_state) {
+ kfree(plane);
+ return ERR_PTR(-ENOMEM);
+ }
- return state;
+ __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
+ plane_state->scaler_id = -1;
+
+ return plane;
+}
+
+void intel_plane_free(struct intel_plane *plane)
+{
+ intel_plane_destroy_state(&plane->base, plane->base.state);
+ kfree(plane);
}
/**
@@ -117,10 +120,14 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret;
+ crtc_state->active_planes &= ~BIT(intel_plane->id);
+ crtc_state->nv12_planes &= ~BIT(intel_plane->id);
+ intel_state->base.visible = false;
+
+ /* If this is a cursor plane, no further checks are needed. */
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
- intel_state->base.visible = false;
ret = intel_plane->check_plane(crtc_state, intel_state);
if (ret)
return ret;
@@ -128,13 +135,12 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
/* FIXME pre-g4x don't work like this */
if (state->visible)
crtc_state->active_planes |= BIT(intel_plane->id);
- else
- crtc_state->active_planes &= ~BIT(intel_plane->id);
if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
crtc_state->nv12_planes |= BIT(intel_plane->id);
- else
- crtc_state->nv12_planes &= ~BIT(intel_plane->id);
+
+ if (state->visible || old_plane_state->base.visible)
+ crtc_state->update_planes |= BIT(intel_plane->id);
return intel_plane_atomic_calc_changes(old_crtc_state,
&crtc_state->base,
@@ -152,6 +158,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
const struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
+ new_plane_state->visible = false;
if (!crtc)
return 0;
@@ -164,29 +171,123 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
to_intel_plane_state(new_plane_state));
}
-static void intel_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static struct intel_plane *
+skl_next_plane_to_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct skl_ddb_entry entries_y[I915_MAX_PLANES],
+ struct skl_ddb_entry entries_uv[I915_MAX_PLANES],
+ unsigned int *update_mask)
{
- struct intel_atomic_state *state = to_intel_atomic_state(old_state->state);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- const struct intel_plane_state *new_plane_state =
- intel_atomic_get_new_plane_state(state, intel_plane);
- struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc;
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ if (*update_mask == 0)
+ return NULL;
- if (new_plane_state->base.visible) {
- const struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc));
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ enum plane_id plane_id = plane->id;
- trace_intel_update_plane(plane,
- to_intel_crtc(crtc));
+ if (crtc->pipe != plane->pipe ||
+ !(*update_mask & BIT(plane_id)))
+ continue;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
+ entries_y,
+ I915_MAX_PLANES, plane_id) ||
+ skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id],
+ entries_uv,
+ I915_MAX_PLANES, plane_id))
+ continue;
+
+ *update_mask &= ~BIT(plane_id);
+ entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
+ entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id];
+
+ return plane;
+ }
- intel_plane->update_plane(intel_plane,
- new_crtc_state, new_plane_state);
- } else {
- trace_intel_disable_plane(plane,
- to_intel_crtc(crtc));
+ /* should never happen */
+ WARN_ON(1);
- intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
+ return NULL;
+}
+
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct skl_ddb_entry entries_y[I915_MAX_PLANES];
+ struct skl_ddb_entry entries_uv[I915_MAX_PLANES];
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane *plane;
+
+ memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y,
+ sizeof(old_crtc_state->wm.skl.plane_ddb_y));
+ memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv,
+ sizeof(old_crtc_state->wm.skl.plane_ddb_uv));
+
+ while ((plane = skl_next_plane_to_commit(state, crtc,
+ entries_y, entries_uv,
+ &update_mask))) {
+ struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+
+ if (new_plane_state->base.visible) {
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_plane(plane, new_crtc_state, new_plane_state);
+ } else if (new_plane_state->slave) {
+ struct intel_plane *master =
+ new_plane_state->linked_plane;
+
+ /*
+ * We update the slave plane from this function because
+ * programming it from the master plane's update_plane
+ * callback runs into issues when the Y plane is
+ * reassigned, disabled or used by a different plane.
+ *
+ * The slave plane is updated with the master plane's
+ * plane_state.
+ */
+ new_plane_state =
+ intel_atomic_get_new_plane_state(state, master);
+
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_slave(plane, new_crtc_state, new_plane_state);
+ } else {
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, new_crtc_state);
+ }
+ }
+}
+
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
+
+ if (new_plane_state->base.visible) {
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_plane(plane, new_crtc_state, new_plane_state);
+ } else {
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, new_crtc_state);
+ }
}
}
@@ -194,7 +295,6 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
.atomic_check = intel_plane_atomic_check,
- .atomic_update = intel_plane_atomic_update,
};
/**
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 769f3f586661..ae55a6865d5c 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -144,26 +144,43 @@ static const struct {
/* HDMI N/CTS table */
#define TMDS_297M 297000
#define TMDS_296M 296703
+#define TMDS_594M 594000
+#define TMDS_593M 593407
+
static const struct {
int sample_rate;
int clock;
int n;
int cts;
} hdmi_aud_ncts[] = {
- { 44100, TMDS_296M, 4459, 234375 },
- { 44100, TMDS_297M, 4704, 247500 },
- { 48000, TMDS_296M, 5824, 281250 },
- { 48000, TMDS_297M, 5120, 247500 },
{ 32000, TMDS_296M, 5824, 421875 },
{ 32000, TMDS_297M, 3072, 222750 },
+ { 32000, TMDS_593M, 5824, 843750 },
+ { 32000, TMDS_594M, 3072, 445500 },
+ { 44100, TMDS_296M, 4459, 234375 },
+ { 44100, TMDS_297M, 4704, 247500 },
+ { 44100, TMDS_593M, 8918, 937500 },
+ { 44100, TMDS_594M, 9408, 990000 },
{ 88200, TMDS_296M, 8918, 234375 },
{ 88200, TMDS_297M, 9408, 247500 },
- { 96000, TMDS_296M, 11648, 281250 },
- { 96000, TMDS_297M, 10240, 247500 },
+ { 88200, TMDS_593M, 17836, 937500 },
+ { 88200, TMDS_594M, 18816, 990000 },
{ 176400, TMDS_296M, 17836, 234375 },
{ 176400, TMDS_297M, 18816, 247500 },
+ { 176400, TMDS_593M, 35672, 937500 },
+ { 176400, TMDS_594M, 37632, 990000 },
+ { 48000, TMDS_296M, 5824, 281250 },
+ { 48000, TMDS_297M, 5120, 247500 },
+ { 48000, TMDS_593M, 5824, 562500 },
+ { 48000, TMDS_594M, 6144, 594000 },
+ { 96000, TMDS_296M, 11648, 281250 },
+ { 96000, TMDS_297M, 10240, 247500 },
+ { 96000, TMDS_593M, 11648, 562500 },
+ { 96000, TMDS_594M, 12288, 594000 },
{ 192000, TMDS_296M, 23296, 281250 },
{ 192000, TMDS_297M, 20480, 247500 },
+ { 192000, TMDS_593M, 23296, 562500 },
+ { 192000, TMDS_594M, 24576, 594000 },
};
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
@@ -912,6 +929,9 @@ static int i915_audio_component_bind(struct device *i915_kdev,
if (WARN_ON(acomp->base.ops || acomp->base.dev))
return -EEXIST;
+ if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
+ return -ENOMEM;
+
drm_modeset_lock_all(&dev_priv->drm);
acomp->base.ops = &i915_audio_component_ops;
acomp->base.dev = i915_kdev;
@@ -935,6 +955,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
acomp->base.dev = NULL;
dev_priv->audio_component = NULL;
drm_modeset_unlock_all(&dev_priv->drm);
+
+ device_link_remove(hda_kdev, i915_kdev);
}
static const struct component_ops i915_audio_component_bind_ops = {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1faa494e2bc9..6d3e0260d49c 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -420,6 +420,13 @@ parse_general_features(struct drm_i915_private *dev_priv,
intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
dev_priv->vbt.display_clock_mode = general->display_clock_mode;
dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ if (bdb->version >= 181) {
+ dev_priv->vbt.orientation = general->rotate_180 ?
+ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
+ DRM_MODE_PANEL_ORIENTATION_NORMAL;
+ } else {
+ dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ }
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
dev_priv->vbt.int_tv_support,
dev_priv->vbt.int_crt_support,
@@ -852,6 +859,30 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
parse_dsi_backlight_ports(dev_priv, bdb->version, port);
+ /* FIXME is the 90 vs. 270 correct? */
+ switch (config->rotation) {
+ case ENABLE_ROTATION_0:
+ /*
+ * Most (all?) VBTs claim 0 degrees despite having
+ * an upside down panel, thus we do not trust this.
+ */
+ dev_priv->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ break;
+ case ENABLE_ROTATION_90:
+ dev_priv->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
+ break;
+ case ENABLE_ROTATION_180:
+ dev_priv->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+ break;
+ case ENABLE_ROTATION_270:
+ dev_priv->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
+ break;
+ }
+
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
@@ -1721,7 +1752,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
- if (INTEL_INFO(dev_priv)->num_pipes == 0) {
+ if (!HAS_DISPLAY(dev_priv)) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
@@ -2039,17 +2070,17 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
dvo_port = child->dvo_port;
- switch (dvo_port) {
- case DVO_PORT_MIPIA:
- case DVO_PORT_MIPIC:
+ if (dvo_port == DVO_PORT_MIPIA ||
+ (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) ||
+ (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) {
if (port)
*port = dvo_port - DVO_PORT_MIPIA;
return true;
- case DVO_PORT_MIPIB:
- case DVO_PORT_MIPID:
+ } else if (dvo_port == DVO_PORT_MIPIB ||
+ dvo_port == DVO_PORT_MIPIC ||
+ dvo_port == DVO_PORT_MIPID) {
DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
- break;
}
}
@@ -2159,3 +2190,49 @@ intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
return false;
}
+
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum aux_ch aux_ch;
+
+ if (!info->alternate_aux_channel) {
+ aux_ch = (enum aux_ch)port;
+
+ DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
+ aux_ch_name(aux_ch), port_name(port));
+ return aux_ch;
+ }
+
+ switch (info->alternate_aux_channel) {
+ case DP_AUX_A:
+ aux_ch = AUX_CH_A;
+ break;
+ case DP_AUX_B:
+ aux_ch = AUX_CH_B;
+ break;
+ case DP_AUX_C:
+ aux_ch = AUX_CH_C;
+ break;
+ case DP_AUX_D:
+ aux_ch = AUX_CH_D;
+ break;
+ case DP_AUX_E:
+ aux_ch = AUX_CH_E;
+ break;
+ case DP_AUX_F:
+ aux_ch = AUX_CH_F;
+ break;
+ default:
+ MISSING_CASE(info->alternate_aux_channel);
+ aux_ch = AUX_CH_A;
+ break;
+ }
+
+ DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
+ aux_ch_name(aux_ch), port_name(port));
+
+ return aux_ch;
+}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 84bf8d827136..447c5256f63a 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -27,11 +27,7 @@
#include "i915_drv.h"
-#ifdef CONFIG_SMP
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
-#else
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
-#endif
+#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
{
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 29075c763428..25e3aba9cded 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2138,16 +2138,8 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
int pixel_rate)
{
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return DIV_ROUND_UP(pixel_rate, 2);
- else if (IS_GEMINILAKE(dev_priv))
- /*
- * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
- * as a temporary workaround. Use a higher cdclk instead. (Note that
- * intel_compute_max_dotclk() limits the max pixel clock to 99% of max
- * cdclk.)
- */
- return DIV_ROUND_UP(pixel_rate * 100, 2 * 99);
else if (IS_GEN9(dev_priv) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return pixel_rate;
@@ -2543,14 +2535,8 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return 2 * max_cdclk_freq;
- else if (IS_GEMINILAKE(dev_priv))
- /*
- * FIXME: Limiting to 99% as a temporary workaround. See
- * intel_min_cdclk() for details.
- */
- return 2 * max_cdclk_freq * 99 / 100;
else if (IS_GEN9(dev_priv) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return max_cdclk_freq;
@@ -2674,37 +2660,18 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
fraction = 200;
}
- rawclk = CNP_RAWCLK_DIV((divider / 1000) - 1);
- if (fraction)
- rawclk |= CNP_RAWCLK_FRAC(DIV_ROUND_CLOSEST(1000,
- fraction) - 1);
-
- I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
- return divider + fraction;
-}
+ rawclk = CNP_RAWCLK_DIV(divider / 1000);
+ if (fraction) {
+ int numerator = 1;
-static int icp_rawclk(struct drm_i915_private *dev_priv)
-{
- u32 rawclk;
- int divider, numerator, denominator, frequency;
-
- if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
- frequency = 24000;
- divider = 23;
- numerator = 0;
- denominator = 0;
- } else {
- frequency = 19200;
- divider = 18;
- numerator = 1;
- denominator = 4;
+ rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
+ fraction) - 1);
+ if (HAS_PCH_ICP(dev_priv))
+ rawclk |= ICP_RAWCLK_NUM(numerator);
}
- rawclk = CNP_RAWCLK_DIV(divider) | ICP_RAWCLK_NUM(numerator) |
- ICP_RAWCLK_DEN(denominator);
-
I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
- return frequency;
+ return divider + fraction;
}
static int pch_rawclk(struct drm_i915_private *dev_priv)
@@ -2754,9 +2721,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
*/
void intel_update_rawclk(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_ICP(dev_priv))
- dev_priv->rawclk_freq = icp_rawclk(dev_priv);
- else if (HAS_PCH_CNP(dev_priv))
+ if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv))
dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = pch_rawclk(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index c6a7beabd58d..5127da286a2b 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -149,7 +149,8 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
limited_color_range = intel_crtc_state->limited_color_range;
- if (intel_crtc_state->ycbcr420) {
+ if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
ilk_load_ycbcr_conversion_matrix(intel_crtc);
return;
} else if (crtc_state->ctm) {
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c
new file mode 100644
index 000000000000..3d0271cebf99
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_combo_phy.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "intel_drv.h"
+
+#define for_each_combo_port(__dev_priv, __port) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+ for_each_if(intel_port_is_combophy(__dev_priv, __port))
+
+#define for_each_combo_port_reverse(__dev_priv, __port) \
+ for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
+ for_each_if(intel_port_is_combophy(__dev_priv, __port))
+
+enum {
+ PROCMON_0_85V_DOT_0,
+ PROCMON_0_95V_DOT_0,
+ PROCMON_0_95V_DOT_1,
+ PROCMON_1_05V_DOT_0,
+ PROCMON_1_05V_DOT_1,
+};
+
+static const struct cnl_procmon {
+ u32 dw1, dw9, dw10;
+} cnl_procmon_values[] = {
+ [PROCMON_0_85V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
+ [PROCMON_0_95V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
+ [PROCMON_0_95V_DOT_1] =
+ { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
+ [PROCMON_1_05V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
+ [PROCMON_1_05V_DOT_1] =
+ { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
+};
+
+/*
+ * CNL has just one set of registers, while ICL has two sets: one for port A and
+ * the other for port B. The CNL registers are equivalent to the ICL port A
+ * registers, that's why we call the ICL macros even though the function has CNL
+ * on its name.
+ */
+static const struct cnl_procmon *
+cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
+{
+ const struct cnl_procmon *procmon;
+ u32 val;
+
+ val = I915_READ(ICL_PORT_COMP_DW3(port));
+ switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
+ default:
+ MISSING_CASE(val);
+ /* fall through */
+ case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
+ break;
+ case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
+ break;
+ case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
+ procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
+ break;
+ case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
+ break;
+ case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
+ procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
+ break;
+ }
+
+ return procmon;
+}
+
+static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct cnl_procmon *procmon;
+ u32 val;
+
+ procmon = cnl_get_procmon_ref_values(dev_priv, port);
+
+ val = I915_READ(ICL_PORT_COMP_DW1(port));
+ val &= ~((0xff << 16) | 0xff);
+ val |= procmon->dw1;
+ I915_WRITE(ICL_PORT_COMP_DW1(port), val);
+
+ I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
+ I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
+}
+
+static bool check_phy_reg(struct drm_i915_private *dev_priv,
+ enum port port, i915_reg_t reg, u32 mask,
+ u32 expected_val)
+{
+ u32 val = I915_READ(reg);
+
+ if ((val & mask) != expected_val) {
+ DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
+ "current %08x mask %08x expected %08x\n",
+ port_name(port),
+ reg.reg, val, mask, expected_val);
+ return false;
+ }
+
+ return true;
+}
+
+static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct cnl_procmon *procmon;
+ bool ret;
+
+ procmon = cnl_get_procmon_ref_values(dev_priv, port);
+
+ ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
+ (0xff << 16) | 0xff, procmon->dw1);
+ ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
+ -1U, procmon->dw9);
+ ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
+ -1U, procmon->dw10);
+
+ return ret;
+}
+
+static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
+{
+ return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
+ (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
+}
+
+static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
+{
+ enum port port = PORT_A;
+ bool ret;
+
+ if (!cnl_combo_phy_enabled(dev_priv))
+ return false;
+
+ ret = cnl_verify_procmon_ref_values(dev_priv, port);
+
+ ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
+ CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
+
+ return ret;
+}
+
+void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(CHICKEN_MISC_2);
+ val &= ~CNL_COMP_PWR_DOWN;
+ I915_WRITE(CHICKEN_MISC_2, val);
+
+ /* Dummy PORT_A to get the correct CNL register from the ICL macro */
+ cnl_set_procmon_ref_values(dev_priv, PORT_A);
+
+ val = I915_READ(CNL_PORT_COMP_DW0);
+ val |= COMP_INIT;
+ I915_WRITE(CNL_PORT_COMP_DW0, val);
+
+ val = I915_READ(CNL_PORT_CL1CM_DW5);
+ val |= CL_POWER_DOWN_ENABLE;
+ I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+}
+
+void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ if (!cnl_combo_phy_verify_state(dev_priv))
+ DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
+
+ val = I915_READ(CHICKEN_MISC_2);
+ val |= CNL_COMP_PWR_DOWN;
+ I915_WRITE(CHICKEN_MISC_2, val);
+}
+
+static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ return !(I915_READ(ICL_PHY_MISC(port)) &
+ ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
+ (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
+}
+
+static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ bool ret;
+
+ if (!icl_combo_phy_enabled(dev_priv, port))
+ return false;
+
+ ret = cnl_verify_procmon_ref_values(dev_priv, port);
+
+ ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
+ CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
+
+ return ret;
+}
+
+void icl_combo_phys_init(struct drm_i915_private *dev_priv)
+{
+ enum port port;
+
+ for_each_combo_port(dev_priv, port) {
+ u32 val;
+
+ if (icl_combo_phy_verify_state(dev_priv, port)) {
+ DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
+ port_name(port));
+ continue;
+ }
+
+ val = I915_READ(ICL_PHY_MISC(port));
+ val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+ I915_WRITE(ICL_PHY_MISC(port), val);
+
+ cnl_set_procmon_ref_values(dev_priv, port);
+
+ val = I915_READ(ICL_PORT_COMP_DW0(port));
+ val |= COMP_INIT;
+ I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+
+ val = I915_READ(ICL_PORT_CL_DW5(port));
+ val |= CL_POWER_DOWN_ENABLE;
+ I915_WRITE(ICL_PORT_CL_DW5(port), val);
+ }
+}
+
+void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+{
+ enum port port;
+
+ for_each_combo_port_reverse(dev_priv, port) {
+ u32 val;
+
+ if (!icl_combo_phy_verify_state(dev_priv, port))
+ DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
+ port_name(port));
+
+ val = I915_READ(ICL_PHY_MISC(port));
+ val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+ I915_WRITE(ICL_PHY_MISC(port), val);
+
+ val = I915_READ(ICL_PORT_COMP_DW0(port));
+ val &= ~COMP_INIT;
+ I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_connector.c
index ca44bf368e24..18e370f607bc 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -25,11 +25,140 @@
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drv.h"
+int intel_connector_init(struct intel_connector *connector)
+{
+ struct intel_digital_connector_state *conn_state;
+
+ /*
+ * Allocate enough memory to hold intel_digital_connector_state,
+ * This might be a few bytes too many, but for connectors that don't
+ * need it we'll free the state and allocate a smaller one on the first
+ * successful commit anyway.
+ */
+ conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
+ if (!conn_state)
+ return -ENOMEM;
+
+ __drm_atomic_helper_connector_reset(&connector->base,
+ &conn_state->base);
+
+ return 0;
+}
+
+struct intel_connector *intel_connector_alloc(void)
+{
+ struct intel_connector *connector;
+
+ connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return NULL;
+
+ if (intel_connector_init(connector) < 0) {
+ kfree(connector);
+ return NULL;
+ }
+
+ return connector;
+}
+
+/*
+ * Free the bits allocated by intel_connector_alloc.
+ * This should only be used after intel_connector_alloc has returned
+ * successfully, and before drm_connector_init returns successfully.
+ * Otherwise the destroy callbacks for the connector and the state should
+ * take care of proper cleanup/free (see intel_connector_destroy).
+ */
+void intel_connector_free(struct intel_connector *connector)
+{
+ kfree(to_intel_digital_connector_state(connector->base.state));
+ kfree(connector);
+}
+
+/*
+ * Connector type independent destroy hook for drm_connector_funcs.
+ */
+void intel_connector_destroy(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ kfree(intel_connector->detect_edid);
+
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ kfree(intel_connector->edid);
+
+ intel_panel_fini(&intel_connector->panel);
+
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+int intel_connector_register(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
+
+ ret = intel_backlight_device_register(intel_connector);
+ if (ret)
+ goto err;
+
+ if (i915_inject_load_failure()) {
+ ret = -EFAULT;
+ goto err_backlight;
+ }
+
+ return 0;
+
+err_backlight:
+ intel_backlight_device_unregister(intel_connector);
+err:
+ return ret;
+}
+
+void intel_connector_unregister(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ intel_backlight_device_unregister(intel_connector);
+}
+
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_connector_attach_encoder(&connector->base, &encoder->base);
+}
+
+/*
+ * Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector.
+ */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
+{
+ enum pipe pipe = 0;
+ struct intel_encoder *encoder = connector->encoder;
+
+ return encoder->get_hw_state(encoder, &pipe);
+}
+
+enum pipe intel_connector_get_pipe(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+ if (!connector->base.state->crtc)
+ return INVALID_PIPE;
+
+ return to_intel_crtc(connector->base.state->crtc)->pipe;
+}
+
/**
* intel_connector_update_modes - update connector from edid
* @connector: DRM connector device to use
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 0c6bf82bb059..68f2fb89ece3 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -354,6 +354,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return true;
}
@@ -368,6 +369,7 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
return false;
pipe_config->has_pch_encoder = true;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return true;
}
@@ -389,6 +391,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
return false;
pipe_config->has_pch_encoder = true;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) {
@@ -849,12 +852,6 @@ out:
return status;
}
-static void intel_crt_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -909,7 +906,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_crt_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index d48186e9ddad..a516697bf57d 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -34,34 +34,38 @@
* low-power state and comes back to normal.
*/
-#define I915_CSR_ICL "i915/icl_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_ICL);
-#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
-#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
-MODULE_FIRMWARE(I915_CSR_GLK);
-#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
+#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin"
+#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+#define ICL_CSR_MAX_FW_SIZE 0x6000
+MODULE_FIRMWARE(ICL_CSR_PATH);
-#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_CNL);
+#define CNL_CSR_PATH "i915/cnl_dmc_ver1_07.bin"
#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+#define CNL_CSR_MAX_FW_SIZE GLK_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(CNL_CSR_PATH);
+
+#define GLK_CSR_PATH "i915/glk_dmc_ver1_04.bin"
+#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
+#define GLK_CSR_MAX_FW_SIZE 0x4000
+MODULE_FIRMWARE(GLK_CSR_PATH);
-#define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin"
-MODULE_FIRMWARE(I915_CSR_KBL);
+#define KBL_CSR_PATH "i915/kbl_dmc_ver1_04.bin"
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
+#define KBL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(KBL_CSR_PATH);
-#define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin"
-MODULE_FIRMWARE(I915_CSR_SKL);
+#define SKL_CSR_PATH "i915/skl_dmc_ver1_27.bin"
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27)
+#define SKL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(SKL_CSR_PATH);
-#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_BXT);
+#define BXT_CSR_PATH "i915/bxt_dmc_ver1_07.bin"
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-
-
#define BXT_CSR_MAX_FW_SIZE 0x3000
-#define GLK_CSR_MAX_FW_SIZE 0x4000
-#define ICL_CSR_MAX_FW_SIZE 0x6000
+MODULE_FIRMWARE(BXT_CSR_PATH);
+
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
struct intel_css_header {
@@ -190,6 +194,12 @@ static const struct stepping_info bxt_stepping_info[] = {
{'B', '0'}, {'B', '1'}, {'B', '2'}
};
+static const struct stepping_info icl_stepping_info[] = {
+ {'A', '0'}, {'A', '1'}, {'A', '2'},
+ {'B', '0'}, {'B', '2'},
+ {'C', '0'}
+};
+
static const struct stepping_info no_stepping_info = { '*', '*' };
static const struct stepping_info *
@@ -198,7 +208,10 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
const struct stepping_info *si;
unsigned int size;
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ size = ARRAY_SIZE(icl_stepping_info);
+ si = icl_stepping_info;
+ } else if (IS_SKYLAKE(dev_priv)) {
size = ARRAY_SIZE(skl_stepping_info);
si = skl_stepping_info;
} else if (IS_BROXTON(dev_priv)) {
@@ -285,10 +298,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
struct intel_csr *csr = &dev_priv->csr;
const struct stepping_info *si = intel_get_stepping_info(dev_priv);
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
- uint32_t max_fw_size = 0;
uint32_t i;
uint32_t *dmc_payload;
- uint32_t required_version;
if (!fw)
return NULL;
@@ -303,38 +314,19 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
return NULL;
}
- csr->version = css_header->version;
-
- if (csr->fw_path == i915_modparams.dmc_firmware_path) {
- /* Bypass version check for firmware override. */
- required_version = csr->version;
- } else if (IS_ICELAKE(dev_priv)) {
- required_version = ICL_CSR_VERSION_REQUIRED;
- } else if (IS_CANNONLAKE(dev_priv)) {
- required_version = CNL_CSR_VERSION_REQUIRED;
- } else if (IS_GEMINILAKE(dev_priv)) {
- required_version = GLK_CSR_VERSION_REQUIRED;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- required_version = KBL_CSR_VERSION_REQUIRED;
- } else if (IS_SKYLAKE(dev_priv)) {
- required_version = SKL_CSR_VERSION_REQUIRED;
- } else if (IS_BROXTON(dev_priv)) {
- required_version = BXT_CSR_VERSION_REQUIRED;
- } else {
- MISSING_CASE(INTEL_REVID(dev_priv));
- required_version = 0;
- }
-
- if (csr->version != required_version) {
+ if (csr->required_version &&
+ css_header->version != csr->required_version) {
DRM_INFO("Refusing to load DMC firmware v%u.%u,"
" please use v%u.%u\n",
- CSR_VERSION_MAJOR(csr->version),
- CSR_VERSION_MINOR(csr->version),
- CSR_VERSION_MAJOR(required_version),
- CSR_VERSION_MINOR(required_version));
+ CSR_VERSION_MAJOR(css_header->version),
+ CSR_VERSION_MINOR(css_header->version),
+ CSR_VERSION_MAJOR(csr->required_version),
+ CSR_VERSION_MINOR(csr->required_version));
return NULL;
}
+ csr->version = css_header->version;
+
readcount += sizeof(struct intel_css_header);
/* Extract Package Header information*/
@@ -402,15 +394,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
nbytes = dmc_header->fw_size * 4;
- if (INTEL_GEN(dev_priv) >= 11)
- max_fw_size = ICL_CSR_MAX_FW_SIZE;
- else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
- max_fw_size = GLK_CSR_MAX_FW_SIZE;
- else if (IS_GEN9(dev_priv))
- max_fw_size = BXT_CSR_MAX_FW_SIZE;
- else
- MISSING_CASE(INTEL_REVID(dev_priv));
- if (nbytes > max_fw_size) {
+ if (nbytes > csr->max_fw_size) {
DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
return NULL;
}
@@ -475,27 +459,57 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- if (i915_modparams.dmc_firmware_path)
- csr->fw_path = i915_modparams.dmc_firmware_path;
- else if (IS_ICELAKE(dev_priv))
- csr->fw_path = I915_CSR_ICL;
- else if (IS_CANNONLAKE(dev_priv))
- csr->fw_path = I915_CSR_CNL;
- else if (IS_GEMINILAKE(dev_priv))
- csr->fw_path = I915_CSR_GLK;
- else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
- csr->fw_path = I915_CSR_KBL;
- else if (IS_SKYLAKE(dev_priv))
- csr->fw_path = I915_CSR_SKL;
- else if (IS_BROXTON(dev_priv))
- csr->fw_path = I915_CSR_BXT;
-
/*
- * Obtain a runtime pm reference, until CSR is loaded,
- * to avoid entering runtime-suspend.
+ * Obtain a runtime pm reference, until CSR is loaded, to avoid entering
+ * runtime-suspend.
+ *
+ * On error, we return with the rpm wakeref held to prevent runtime
+ * suspend as runtime suspend *requires* a working CSR for whatever
+ * reason.
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ /* Allow to load fw via parameter using the last known size */
+ csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ } else if (IS_ICELAKE(dev_priv)) {
+ csr->fw_path = ICL_CSR_PATH;
+ csr->required_version = ICL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ csr->fw_path = CNL_CSR_PATH;
+ csr->required_version = CNL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ csr->fw_path = GLK_CSR_PATH;
+ csr->required_version = GLK_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
+ } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+ csr->fw_path = KBL_CSR_PATH;
+ csr->required_version = KBL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ csr->fw_path = SKL_CSR_PATH;
+ csr->required_version = SKL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
+ } else if (IS_BROXTON(dev_priv)) {
+ csr->fw_path = BXT_CSR_PATH;
+ csr->required_version = BXT_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
+ }
+
+ if (i915_modparams.dmc_firmware_path) {
+ if (strlen(i915_modparams.dmc_firmware_path) == 0) {
+ csr->fw_path = NULL;
+ DRM_INFO("Disabling CSR firmware and runtime PM\n");
+ return;
+ }
+
+ csr->fw_path = i915_modparams.dmc_firmware_path;
+ /* Bypass version check for firmware override. */
+ csr->required_version = 0;
+ }
+
if (csr->fw_path == NULL) {
DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 5186cd7075f9..f3e1d6a0b7dd 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -28,6 +28,7 @@
#include <drm/drm_scdc_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_dsi.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -642,7 +643,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_KBL_ULX(dev_priv)) {
+ if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
return kbl_y_ddi_translations_dp;
} else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
@@ -658,7 +659,7 @@ static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
@@ -680,7 +681,7 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
return skl_y_ddi_translations_hdmi;
} else {
@@ -1060,10 +1061,10 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
}
static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
- const struct intel_shared_dpll *pll)
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- int clock = crtc->config->port_clock;
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ int clock = crtc_state->port_clock;
const enum intel_dpll_id id = pll->info->id;
switch (id) {
@@ -1363,8 +1364,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
return dco_freq / (p0 * p1 * p2 * 5);
}
-static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id)
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id pll_id)
{
uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock;
@@ -1517,7 +1518,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
else
dotclock = pipe_config->port_clock;
- if (pipe_config->ycbcr420)
+ if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
dotclock *= 2;
if (pipe_config->pixel_multiplier)
@@ -1737,16 +1738,16 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (INTEL_GEN(dev_priv) <= 8)
- hsw_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_BC(dev_priv))
- skl_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_clock_get(encoder, pipe_config);
+ if (IS_ICELAKE(dev_priv))
+ icl_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config);
- else if (IS_ICELAKE(dev_priv))
- icl_ddi_clock_get(encoder, pipe_config);
+ else if (IS_GEN9_LP(dev_priv))
+ bxt_ddi_clock_get(encoder, pipe_config);
+ else if (IS_GEN9_BC(dev_priv))
+ skl_ddi_clock_get(encoder, pipe_config);
+ else if (INTEL_GEN(dev_priv) <= 8)
+ hsw_ddi_clock_get(encoder, pipe_config);
}
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -1784,6 +1785,13 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
break;
}
+ /*
+ * As per DP 1.2 spec section 2.3.4.3 while sending
+ * YCBCR 444 signals we should program MSA MISC1/0 fields with
+ * colorspace information. The output colorspace encoding is BT601.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
@@ -1998,24 +2006,24 @@ out:
return ret;
}
-bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
- enum pipe *pipe)
+static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
+ u8 *pipe_mask, bool *is_dp_mst)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = encoder->port;
enum pipe p;
u32 tmp;
- bool ret;
+ u8 mst_pipe_mask;
+
+ *pipe_mask = 0;
+ *is_dp_mst = false;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
- return false;
-
- ret = false;
+ return;
tmp = I915_READ(DDI_BUF_CTL(port));
-
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
@@ -2023,44 +2031,58 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ default:
+ MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK);
+ /* fallthrough */
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF:
- *pipe = PIPE_A;
+ *pipe_mask = BIT(PIPE_A);
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
- *pipe = PIPE_B;
+ *pipe_mask = BIT(PIPE_B);
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
- *pipe = PIPE_C;
+ *pipe_mask = BIT(PIPE_C);
break;
}
- ret = true;
-
goto out;
}
+ mst_pipe_mask = 0;
for_each_pipe(dev_priv, p) {
- enum transcoder cpu_transcoder = (enum transcoder) p;
+ enum transcoder cpu_transcoder = (enum transcoder)p;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
- if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
- TRANS_DDI_MODE_SELECT_DP_MST)
- goto out;
+ if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port))
+ continue;
- *pipe = p;
- ret = true;
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
+ TRANS_DDI_MODE_SELECT_DP_MST)
+ mst_pipe_mask |= BIT(p);
- goto out;
- }
+ *pipe_mask |= BIT(p);
+ }
+
+ if (!*pipe_mask)
+ DRM_DEBUG_KMS("No pipe for ddi port %c found\n",
+ port_name(port));
+
+ if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
+ DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n",
+ port_name(port), *pipe_mask);
+ *pipe_mask = BIT(ffs(*pipe_mask) - 1);
}
- DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
+ if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
+ DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n",
+ port_name(port), *pipe_mask, mst_pipe_mask);
+ else
+ *is_dp_mst = mst_pipe_mask;
out:
- if (ret && IS_GEN9_LP(dev_priv)) {
+ if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
tmp = I915_READ(BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
@@ -2070,12 +2092,26 @@ out:
}
intel_display_power_put(dev_priv, encoder->power_domain);
+}
- return ret;
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ u8 pipe_mask;
+ bool is_mst;
+
+ intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
+
+ if (is_mst || !pipe_mask)
+ return false;
+
+ *pipe = ffs(pipe_mask) - 1;
+
+ return true;
}
static inline enum intel_display_power_domain
-intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
+intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
{
/* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
* DC states enabled at the same time, while for driver initiated AUX
@@ -2089,13 +2125,14 @@ intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
* Note that PSR is enabled only on Port A even though this function
* returns the correct domain for other ports too.
*/
- return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
- intel_dp->aux_power_domain;
+ return dig_port->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
+ intel_aux_power_domain(dig_port);
}
static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port;
u64 domains;
@@ -2110,12 +2147,19 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
dig_port = enc_to_dig_port(&encoder->base);
domains = BIT_ULL(dig_port->ddi_io_power_domain);
- /* AUX power is only needed for (e)DP mode, not for HDMI. */
- if (intel_crtc_has_dp_encoder(crtc_state)) {
- struct intel_dp *intel_dp = &dig_port->dp;
+ /*
+ * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
+ * ports.
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state) ||
+ intel_port_is_tc(dev_priv, encoder->port))
+ domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
- domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp));
- }
+ /*
+ * VDSC power is needed when DSC is enabled
+ */
+ if (crtc_state->dsc_params.compression_enable)
+ domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
return domains;
}
@@ -2748,77 +2792,130 @@ uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
return 0;
}
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+static void icl_map_plls_to_ports(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_connector_state *conn_state;
- struct drm_connector *conn;
- int i;
+ enum port port = encoder->port;
+ u32 val;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
- struct intel_encoder *encoder =
- to_intel_encoder(conn_state->best_encoder);
- enum port port;
- uint32_t val;
+ mutex_lock(&dev_priv->dpll_lock);
- if (conn_state->crtc != crtc)
- continue;
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
- port = encoder->port;
- mutex_lock(&dev_priv->dpll_lock);
+ if (intel_port_is_combophy(dev_priv, port)) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ POSTING_READ(DPCLKA_CFGCR0_ICL);
+ }
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
+ val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- if (intel_port_is_combophy(dev_priv, port)) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- POSTING_READ(DPCLKA_CFGCR0_ICL);
- }
+ mutex_unlock(&dev_priv->dpll_lock);
+}
- val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
- mutex_unlock(&dev_priv->dpll_lock);
- }
+ mutex_lock(&dev_priv->dpll_lock);
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+ mutex_unlock(&dev_priv->dpll_lock);
}
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_connector_state *old_conn_state;
- struct drm_connector *conn;
- int i;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val;
+ enum port port;
+ u32 port_mask;
+ bool ddi_clk_needed;
+
+ /*
+ * In case of DP MST, we sanitize the primary encoder only, not the
+ * virtual ones.
+ */
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ return;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
- struct intel_encoder *encoder =
- to_intel_encoder(old_conn_state->best_encoder);
- enum port port;
+ if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) {
+ u8 pipe_mask;
+ bool is_mst;
- if (old_conn_state->crtc != crtc)
+ intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
+ /*
+ * In the unlikely case that BIOS enables DP in MST mode, just
+ * warn since our MST HW readout is incomplete.
+ */
+ if (WARN_ON(is_mst))
+ return;
+ }
+
+ port_mask = BIT(encoder->port);
+ ddi_clk_needed = encoder->base.crtc;
+
+ if (encoder->type == INTEL_OUTPUT_DSI) {
+ struct intel_encoder *other_encoder;
+
+ port_mask = intel_dsi_encoder_ports(encoder);
+ /*
+ * Sanity check that we haven't incorrectly registered another
+ * encoder using any of the ports of this DSI encoder.
+ */
+ for_each_intel_encoder(&dev_priv->drm, other_encoder) {
+ if (other_encoder == encoder)
+ continue;
+
+ if (WARN_ON(port_mask & BIT(other_encoder->port)))
+ return;
+ }
+ /*
+ * DSI ports should have their DDI clock ungated when disabled
+ * and gated when enabled.
+ */
+ ddi_clk_needed = !encoder->base.crtc;
+ }
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_port_masked(port, port_mask) {
+ bool ddi_clk_ungated = !(val &
+ icl_dpclka_cfgcr0_clk_off(dev_priv,
+ port));
+
+ if (ddi_clk_needed == ddi_clk_ungated)
continue;
- port = encoder->port;
- mutex_lock(&dev_priv->dpll_lock);
- I915_WRITE(DPCLKA_CFGCR0_ICL,
- I915_READ(DPCLKA_CFGCR0_ICL) |
- icl_dpclka_cfgcr0_clk_off(dev_priv, port));
- mutex_unlock(&dev_priv->dpll_lock);
+ /*
+ * Punt on the case now where clock is gated, but it would
+ * be needed by the port. Something else is really broken then.
+ */
+ if (WARN_ON(ddi_clk_needed))
+ continue;
+
+ DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ port_name(port));
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
}
}
static void intel_ddi_clk_select(struct intel_encoder *encoder,
- const struct intel_shared_dpll *pll)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
uint32_t val;
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
if (WARN_ON(!pll))
return;
@@ -2828,7 +2925,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (IS_ICELAKE(dev_priv)) {
if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port),
- icl_pll_to_ddi_pll_sel(encoder, pll));
+ icl_pll_to_ddi_pll_sel(encoder, crtc_state));
} else if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@@ -2881,6 +2978,184 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
}
}
+static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+ u32 val;
+ int i;
+
+ if (tc_port == PORT_TC_NONE)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+ val = I915_READ(mg_regs[i]);
+ val |= MG_DP_MODE_CFG_TR2PWR_GATING |
+ MG_DP_MODE_CFG_TRPWR_GATING |
+ MG_DP_MODE_CFG_CLNPWR_GATING |
+ MG_DP_MODE_CFG_DIGPWR_GATING |
+ MG_DP_MODE_CFG_GAONPWR_GATING;
+ I915_WRITE(mg_regs[i], val);
+ }
+
+ val = I915_READ(MG_MISC_SUS0(tc_port));
+ val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
+ MG_MISC_SUS0_CFG_TR2PWR_GATING |
+ MG_MISC_SUS0_CFG_CL2PWR_GATING |
+ MG_MISC_SUS0_CFG_GAONPWR_GATING |
+ MG_MISC_SUS0_CFG_TRPWR_GATING |
+ MG_MISC_SUS0_CFG_CL1PWR_GATING |
+ MG_MISC_SUS0_CFG_DGPWR_GATING;
+ I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
+static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+ u32 val;
+ int i;
+
+ if (tc_port == PORT_TC_NONE)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+ val = I915_READ(mg_regs[i]);
+ val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
+ MG_DP_MODE_CFG_TRPWR_GATING |
+ MG_DP_MODE_CFG_CLNPWR_GATING |
+ MG_DP_MODE_CFG_DIGPWR_GATING |
+ MG_DP_MODE_CFG_GAONPWR_GATING);
+ I915_WRITE(mg_regs[i], val);
+ }
+
+ val = I915_READ(MG_MISC_SUS0(tc_port));
+ val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
+ MG_MISC_SUS0_CFG_TR2PWR_GATING |
+ MG_MISC_SUS0_CFG_CL2PWR_GATING |
+ MG_MISC_SUS0_CFG_GAONPWR_GATING |
+ MG_MISC_SUS0_CFG_TRPWR_GATING |
+ MG_MISC_SUS0_CFG_CL1PWR_GATING |
+ MG_MISC_SUS0_CFG_DGPWR_GATING);
+ I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
+static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ enum port port = intel_dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ u32 ln0, ln1, lane_info;
+
+ if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
+ return;
+
+ ln0 = I915_READ(MG_DP_MODE(port, 0));
+ ln1 = I915_READ(MG_DP_MODE(port, 1));
+
+ switch (intel_dig_port->tc_type) {
+ case TC_PORT_TYPEC:
+ ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+ ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+
+ lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
+ DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+ DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+
+ switch (lane_info) {
+ case 0x1:
+ case 0x4:
+ break;
+ case 0x2:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ break;
+ case 0x3:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+ case 0x8:
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ break;
+ case 0xC:
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+ case 0xF:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+ default:
+ MISSING_CASE(lane_info);
+ }
+ break;
+
+ case TC_PORT_LEGACY:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+
+ default:
+ MISSING_CASE(intel_dig_port->tc_type);
+ return;
+ }
+
+ I915_WRITE(MG_DP_MODE(port, 0), ln0);
+ I915_WRITE(MG_DP_MODE(port, 1), ln1);
+}
+
+static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->fec_enable)
+ return;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
+ DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n");
+}
+
+static void intel_ddi_enable_fec(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ val = I915_READ(DP_TP_CTL(port));
+ val |= DP_TP_CTL_FEC_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port),
+ DP_TP_STATUS_FEC_ENABLE_LIVE,
+ DP_TP_STATUS_FEC_ENABLE_LIVE,
+ 1))
+ DRM_ERROR("Timed out waiting for FEC Enable Status\n");
+}
+
+static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~DP_TP_CTL_FEC_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+}
+
static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -2894,19 +3169,16 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
- intel_display_power_get(dev_priv,
- intel_ddi_main_link_aux_domain(intel_dp));
-
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
intel_edp_panel_on(intel_dp);
- intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
+ intel_ddi_clk_select(encoder, crtc_state);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
- icl_program_mg_dp_mode(intel_dp);
+ icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
if (IS_ICELAKE(dev_priv))
@@ -2922,14 +3194,21 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
+ true);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
+ intel_ddi_enable_fec(encoder, crtc_state);
+
icl_enable_phy_clock_gating(dig_port);
if (!is_mst)
intel_ddi_enable_pipe_clock(crtc_state);
+
+ intel_dsc_enable(encoder, crtc_state);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2944,10 +3223,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
- intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
+ intel_ddi_clk_select(encoder, crtc_state);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+ icl_program_mg_dp_mode(dig_port);
+ icl_disable_phy_clock_gating(dig_port);
+
if (IS_ICELAKE(dev_priv))
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
@@ -2958,12 +3240,14 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
else
intel_prepare_hdmi_ddi_buffers(encoder, level);
+ icl_enable_phy_clock_gating(dig_port);
+
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
intel_ddi_enable_pipe_clock(crtc_state);
- intel_dig_port->set_infoframes(&encoder->base,
+ intel_dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
crtc_state, conn_state);
}
@@ -2991,15 +3275,31 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
WARN_ON(crtc_state->has_pch_encoder);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_map_plls_to_ports(encoder, crtc_state);
+
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
- else
+ } else {
+ struct intel_lspcon *lspcon =
+ enc_to_intel_lspcon(&encoder->base);
+
intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ if (lspcon->active) {
+ struct intel_digital_port *dig_port =
+ enc_to_dig_port(&encoder->base);
+
+ dig_port->set_infoframes(encoder,
+ crtc_state->has_infoframe,
+ crtc_state, conn_state);
+ }
+ }
}
-static void intel_disable_ddi_buf(struct intel_encoder *encoder)
+static void intel_disable_ddi_buf(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -3018,6 +3318,9 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder)
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
I915_WRITE(DP_TP_CTL(port), val);
+ /* Disable FEC in DP Sink */
+ intel_ddi_disable_fec_state(encoder, crtc_state);
+
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
}
@@ -3041,7 +3344,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
}
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
@@ -3049,9 +3352,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
-
- intel_display_power_put(dev_priv,
- intel_ddi_main_link_aux_domain(intel_dp));
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3062,12 +3362,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
- dig_port->set_infoframes(&encoder->base, false,
+ dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
intel_ddi_disable_pipe_clock(old_crtc_state);
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
@@ -3080,6 +3380,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
/*
* When called from DP MST code:
* - old_conn_state will be NULL
@@ -3099,6 +3401,9 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
else
intel_ddi_post_disable_dp(encoder,
old_crtc_state, old_conn_state);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_unmap_plls_to_ports(encoder);
}
void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
@@ -3118,7 +3423,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_ddi_clk_disable(encoder);
val = I915_READ(FDI_RX_MISC(PIPE_A));
@@ -3154,6 +3459,26 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
+static i915_reg_t
+gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ static const i915_reg_t regs[] = {
+ [PORT_A] = CHICKEN_TRANS_EDP,
+ [PORT_B] = CHICKEN_TRANS_A,
+ [PORT_C] = CHICKEN_TRANS_B,
+ [PORT_D] = CHICKEN_TRANS_C,
+ [PORT_E] = CHICKEN_TRANS_A,
+ };
+
+ WARN_ON(INTEL_GEN(dev_priv) < 9);
+
+ if (WARN_ON(port < PORT_A || port > PORT_E))
+ port = PORT_A;
+
+ return regs[port];
+}
+
static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -3177,17 +3502,10 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
* the bits affect a specific DDI port rather than
* a specific transcoder.
*/
- static const enum transcoder port_to_transcoder[] = {
- [PORT_A] = TRANSCODER_EDP,
- [PORT_B] = TRANSCODER_A,
- [PORT_C] = TRANSCODER_B,
- [PORT_D] = TRANSCODER_C,
- [PORT_E] = TRANSCODER_A,
- };
- enum transcoder transcoder = port_to_transcoder[port];
+ i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
u32 val;
- val = I915_READ(CHICKEN_TRANS(transcoder));
+ val = I915_READ(reg);
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3196,8 +3514,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
- I915_WRITE(CHICKEN_TRANS(transcoder), val);
- POSTING_READ(CHICKEN_TRANS(transcoder));
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
udelay(1);
@@ -3208,7 +3526,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
- I915_WRITE(CHICKEN_TRANS(transcoder), val);
+ I915_WRITE(reg, val);
}
/* In HDMI/DVI mode, the port width, and swing/emphasis values
@@ -3252,6 +3570,9 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
intel_edp_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(old_conn_state);
+ /* Disable the decompression in DP Sink */
+ intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
+ false);
}
static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
@@ -3282,13 +3603,76 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
}
-static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ enum port port)
{
- uint8_t mask = pipe_config->lane_lat_optim_mask;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
+ bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
+ switch (pipe_config->lane_count) {
+ case 1:
+ val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
+ DFLEXDPMLE1_DPMLETC_ML0(tc_port);
+ break;
+ case 2:
+ val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
+ DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
+ break;
+ case 4:
+ val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
+ break;
+ default:
+ MISSING_CASE(pipe_config->lane_count);
+ }
+ I915_WRITE(PORT_TX_DFLEXDPMLE1, val);
+}
- bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
+static void
+intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum port port = encoder->port;
+
+ if (intel_crtc_has_dp_encoder(crtc_state) ||
+ intel_port_is_tc(dev_priv, encoder->port))
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+
+ if (IS_GEN9_LP(dev_priv))
+ bxt_ddi_phy_set_lane_optim_mask(encoder,
+ crtc_state->lane_lat_optim_mask);
+
+ /*
+ * Program the lane count for static/dynamic connections on Type-C ports.
+ * Skip this step for TBT.
+ */
+ if (dig_port->tc_type == TC_PORT_UNKNOWN ||
+ dig_port->tc_type == TC_PORT_TBT)
+ return;
+
+ intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
+}
+
+static void
+intel_ddi_post_pll_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+
+ if (intel_crtc_has_dp_encoder(crtc_state) ||
+ intel_port_is_tc(dev_priv, encoder->port))
+ intel_display_power_put(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3353,10 +3737,10 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
- if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
- crtc_state->min_voltage_level = 2;
- else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+ if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
+ else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 2;
}
void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -3406,7 +3790,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = true;
intel_dig_port = enc_to_dig_port(&encoder->base);
- if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
+ if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
pipe_config->has_infoframe = true;
if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
@@ -3767,6 +4151,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
+ enum pipe pipe;
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
@@ -3805,8 +4190,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->compute_output_type = intel_ddi_compute_output_type;
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
- if (IS_GEN9_LP(dev_priv))
- intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
+ intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
+ intel_encoder->post_pll_disable = intel_ddi_post_pll_disable;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->post_disable = intel_ddi_post_disable;
@@ -3817,8 +4202,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->type = INTEL_OUTPUT_DDI;
intel_encoder->power_domain = intel_port_to_power_domain(port);
intel_encoder->port = port;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
+ for_each_pipe(dev_priv, pipe)
+ intel_encoder->crtc_mask |= BIT(pipe);
if (INTEL_GEN(dev_priv) >= 11)
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -3828,6 +4214,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
+ intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
switch (port) {
case PORT_A:
@@ -3858,8 +4245,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
MISSING_CASE(port);
}
- intel_infoframe_init(intel_dig_port);
-
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
goto err;
@@ -3888,6 +4273,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
port_name(port));
}
+ intel_infoframe_init(intel_dig_port);
return;
err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 0ef0c6448d53..1e56319334f3 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -77,6 +77,10 @@ void intel_device_info_dump_flags(const struct intel_device_info *info,
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
+
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
+ DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
+#undef PRINT_FLAG
}
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
@@ -474,7 +478,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
u8 eu_disabled_mask;
u32 n_disabled;
- if (!(sseu->subslice_mask[ss] & BIT(ss)))
+ if (!(sseu->subslice_mask[s] & BIT(ss)))
/* skip disabled subslice */
continue;
@@ -744,27 +748,30 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
if (INTEL_GEN(dev_priv) >= 10) {
for_each_pipe(dev_priv, pipe)
info->num_scalers[pipe] = 2;
- } else if (INTEL_GEN(dev_priv) == 9) {
+ } else if (IS_GEN9(dev_priv)) {
info->num_scalers[PIPE_A] = 2;
info->num_scalers[PIPE_B] = 2;
info->num_scalers[PIPE_C] = 1;
}
- BUILD_BUG_ON(I915_NUM_ENGINES >
- sizeof(intel_ring_mask_t) * BITS_PER_BYTE);
+ BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
- /*
- * Skylake and Broxton currently don't expose the topmost plane as its
- * use is exclusive with the legacy cursor and we only want to expose
- * one of those, not both. Until we can safely expose the topmost plane
- * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
- * we don't expose the topmost plane at all to prevent ABI breakage
- * down the line.
- */
- if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (IS_GEN11(dev_priv))
+ for_each_pipe(dev_priv, pipe)
+ info->num_sprites[pipe] = 6;
+ else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 3;
else if (IS_BROXTON(dev_priv)) {
+ /*
+ * Skylake and Broxton currently don't expose the topmost plane as its
+ * use is exclusive with the legacy cursor and we only want to expose
+ * one of those, not both. Until we can safely expose the topmost plane
+ * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+ * we don't expose the topmost plane at all to prevent ABI breakage
+ * down the line.
+ */
+
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
@@ -779,7 +786,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
if (i915_modparams.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
- } else if (info->num_pipes > 0 &&
+ } else if (HAS_DISPLAY(dev_priv) &&
(IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
@@ -804,7 +811,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
DRM_INFO("PipeC fused off\n");
info->num_pipes -= 1;
}
- } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
+ } else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) {
u32 dfsm = I915_READ(SKL_DFSM);
u8 disabled_mask = 0;
bool invalid;
@@ -844,13 +851,18 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
broadwell_sseu_info_init(dev_priv);
- else if (INTEL_GEN(dev_priv) == 9)
+ else if (IS_GEN9(dev_priv))
gen9_sseu_info_init(dev_priv);
- else if (INTEL_GEN(dev_priv) == 10)
+ else if (IS_GEN10(dev_priv))
gen10_sseu_info_init(dev_priv);
else if (INTEL_GEN(dev_priv) >= 11)
gen11_sseu_info_init(dev_priv);
+ if (IS_GEN6(dev_priv) && intel_vtd_active()) {
+ DRM_INFO("Disabling ppGTT for VT-d support\n");
+ info->ppgtt = INTEL_PPGTT_NONE;
+ }
+
/* Initialize command stream timestamp frequency */
info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
}
@@ -872,40 +884,37 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
- u8 vdbox_disable, vebox_disable;
u32 media_fuse;
- int i;
+ unsigned int i;
if (INTEL_GEN(dev_priv) < 11)
return;
- media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
+ media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
- vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
+ info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
- DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable);
+ DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(dev_priv, _VCS(i)))
continue;
- if (!(BIT(i) & vdbox_disable))
- continue;
-
- info->ring_mask &= ~ENGINE_MASK(_VCS(i));
- DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+ if (!(BIT(i) & info->vdbox_enable)) {
+ info->ring_mask &= ~ENGINE_MASK(_VCS(i));
+ DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+ }
}
- DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable);
+ DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(dev_priv, _VECS(i)))
continue;
- if (!(BIT(i) & vebox_disable))
- continue;
-
- info->ring_mask &= ~ENGINE_MASK(_VECS(i));
- DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+ if (!(BIT(i) & info->vebox_enable)) {
+ info->ring_mask &= ~ENGINE_MASK(_VECS(i));
+ DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 6eecd64734d5..1caf24e2cf0b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -25,6 +25,8 @@
#ifndef _INTEL_DEVICE_INFO_H_
#define _INTEL_DEVICE_INFO_H_
+#include <uapi/drm/i915_drm.h>
+
#include "intel_display.h"
struct drm_printer;
@@ -74,51 +76,58 @@ enum intel_platform {
INTEL_MAX_PLATFORMS
};
+enum intel_ppgtt {
+ INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
+ INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
+ INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL,
+ INTEL_PPGTT_FULL_4LVL,
+};
+
#define DEV_INFO_FOR_EACH_FLAG(func) \
func(is_mobile); \
func(is_lp); \
func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
- func(has_aliasing_ppgtt); \
- func(has_csr); \
- func(has_ddi); \
- func(has_dp_mst); \
func(has_reset_engine); \
- func(has_fbc); \
func(has_fpga_dbg); \
- func(has_full_ppgtt); \
- func(has_full_48bit_ppgtt); \
- func(has_gmch_display); \
func(has_guc); \
func(has_guc_ct); \
- func(has_hotplug); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
func(has_logical_ring_preemption); \
- func(has_overlay); \
func(has_pooled_eu); \
- func(has_psr); \
func(has_rc6); \
func(has_rc6p); \
func(has_runtime_pm); \
func(has_snoop); \
func(has_coherent_ggtt); \
func(unfenced_needs_alignment); \
+ func(hws_needs_physical);
+
+#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \
+ /* Keep in alphabetical order */ \
func(cursor_needs_physical); \
- func(hws_needs_physical); \
+ func(has_csr); \
+ func(has_ddi); \
+ func(has_dp_mst); \
+ func(has_fbc); \
+ func(has_gmch_display); \
+ func(has_hotplug); \
+ func(has_ipc); \
+ func(has_overlay); \
+ func(has_psr); \
func(overlay_needs_physical); \
- func(supports_tv); \
- func(has_ipc);
+ func(supports_tv);
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
struct sseu_dev_info {
u8 slice_mask;
- u8 subslice_mask[GEN_MAX_SUBSLICES];
+ u8 subslice_mask[GEN_MAX_SLICES];
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
@@ -154,6 +163,7 @@ struct intel_device_info {
enum intel_platform platform;
u32 platform_mask;
+ enum intel_ppgtt ppgtt;
unsigned int page_sizes; /* page sizes supported by the HW */
u32 display_mmio_offset;
@@ -165,12 +175,18 @@ struct intel_device_info {
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
+
+ struct {
+#define DEFINE_FLAG(name) u8 name:1
+ DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
+ } display;
+
u16 ddb_size; /* in blocks */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
- int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
/* Slice/subslice/EU info */
@@ -178,6 +194,10 @@ struct intel_device_info {
u32 cs_timestamp_frequency_khz;
+ /* Enabled (not fused off) media engine bitmasks. */
+ u8 vdbox_enable;
+ u8 vebox_enable;
+
struct color_luts {
u16 degamma_lut_size;
u16 gamma_lut_size;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9741cc419e1b..07c861884c70 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,7 +24,6 @@
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
@@ -74,55 +73,6 @@ static const uint64_t i9xx_format_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
-static const uint32_t skl_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
-};
-
-static const uint32_t skl_pri_planar_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
-};
-
-static const uint64_t skl_format_modifiers_noccs[] = {
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const uint64_t skl_format_modifiers_ccs[] = {
- I915_FORMAT_MOD_Yf_TILED_CCS,
- I915_FORMAT_MOD_Y_TILED_CCS,
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
/* Cursor formats */
static const uint32_t intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
@@ -141,15 +91,15 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
static int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
-static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
-static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
-static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
-static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n,
- struct intel_link_m_n *m2_n2);
-static void ironlake_set_pipeconf(struct drm_crtc *crtc);
-static void haswell_set_pipeconf(struct drm_crtc *crtc);
-static void haswell_set_pipemisc(struct drm_crtc *crtc);
+static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
+static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+ const struct intel_link_m_n *m_n,
+ const struct intel_link_m_n *m2_n2);
+static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -158,9 +108,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
-static void skylake_pfit_enable(struct intel_crtc *crtc);
-static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
-static void ironlake_pfit_enable(struct intel_crtc *crtc);
+static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
+static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
@@ -506,23 +456,8 @@ static const struct intel_limit intel_limits_bxt = {
};
static void
-skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
-{
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return;
-
- if (enable)
- I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
- else
- I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
-}
-
-static void
skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
{
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return;
-
if (enable)
I915_WRITE(CLKGATE_DIS_PSL(pipe),
DUPS1_GATING_DIS | DUPS2_GATING_DIS);
@@ -1381,6 +1316,7 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
+ /* PCH SDVOB multiplex with HDMIB */
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
@@ -1565,14 +1501,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
}
}
-static void i9xx_disable_pll(struct intel_crtc *crtc)
+static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev_priv) &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
!intel_num_dvo_pipes(dev_priv)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1666,16 +1603,16 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
I915_READ(dpll_reg) & port_mask, expected_mask);
}
-static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
uint32_t val, pipeconf_val;
/* Make sure PCH DPLL is enabled */
- assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
+ assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
@@ -1701,7 +1638,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
* here for both 8bpc and 12bpc.
*/
val &= ~PIPECONF_BPC_MASK;
- if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val |= PIPECONF_8BPC;
else
val |= pipeconf_val & PIPECONF_BPC_MASK;
@@ -1710,7 +1647,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
if (HAS_PCH_IBX(dev_priv) &&
- intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
@@ -2254,6 +2191,11 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
return new_offset;
}
+static bool is_surface_linear(u64 modifier, int color_plane)
+{
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+}
+
static u32 intel_adjust_aligned_offset(int *x, int *y,
const struct drm_framebuffer *fb,
int color_plane,
@@ -2266,7 +2208,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
WARN_ON(new_offset > old_offset);
- if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ if (!is_surface_linear(fb->modifier, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int pitch_tiles;
@@ -2330,14 +2272,13 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
unsigned int rotation,
u32 alignment)
{
- uint64_t fb_modifier = fb->modifier;
unsigned int cpp = fb->format->cpp[color_plane];
u32 offset, offset_aligned;
if (alignment)
alignment--;
- if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
+ if (!is_surface_linear(fb->modifier, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles, pitch_tiles;
@@ -2400,10 +2341,26 @@ static int intel_fb_offset_to_xy(int *x, int *y,
int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ unsigned int height;
if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
- fb->offsets[color_plane] % intel_tile_size(dev_priv))
+ fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
+ DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
+ fb->offsets[color_plane], color_plane);
return -EINVAL;
+ }
+
+ height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
+ height = ALIGN(height, intel_tile_height(fb, color_plane));
+
+ /* Catch potential overflows early */
+ if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
+ fb->offsets[color_plane])) {
+ DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
+ fb->offsets[color_plane], fb->pitches[color_plane],
+ color_plane);
+ return -ERANGE;
+ }
*x = 0;
*y = 0;
@@ -2574,7 +2531,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
tile_size);
offset /= tile_size;
- if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ if (!is_surface_linear(fb->modifier, i)) {
unsigned int tile_width, tile_height;
unsigned int pitch_tiles;
struct drm_rect r;
@@ -2788,10 +2745,6 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
else
crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
-
- DRM_DEBUG_KMS("%s active planes 0x%x\n",
- crtc_state->base.crtc->name,
- crtc_state->active_planes);
}
static void fixup_active_planes(struct intel_crtc_state *crtc_state)
@@ -2819,6 +2772,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
+ DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
+ plane->base.base.id, plane->base.name,
+ crtc->base.base.id, crtc->base.name);
+
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_active_planes(crtc_state);
@@ -2826,7 +2783,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_pre_disable_primary_noatomic(&crtc->base);
trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, crtc);
+ plane->disable_plane(plane, crtc_state);
}
static void
@@ -2890,6 +2847,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
+ intel_state->base.rotation = plane_config->rotation;
intel_fill_fb_ggtt_view(&intel_state->view, fb,
intel_state->base.rotation);
intel_state->color_plane[0].stride =
@@ -3098,28 +3056,6 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
return 0;
}
-static int
-skl_check_nv12_surface(struct intel_plane_state *plane_state)
-{
- /* Display WA #1106 */
- if (plane_state->base.rotation !=
- (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
- plane_state->base.rotation != DRM_MODE_ROTATE_270)
- return 0;
-
- /*
- * src coordinates are rotated here.
- * We check height but report it as width
- */
- if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
- DRM_DEBUG_KMS("src width must be multiple "
- "of 4 for rotated NV12\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -3198,9 +3134,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
* the main surface setup depends on it.
*/
if (fb->format->format == DRM_FORMAT_NV12) {
- ret = skl_check_nv12_surface(plane_state);
- if (ret)
- return ret;
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
@@ -3398,7 +3331,6 @@ static void i9xx_update_plane(struct intel_plane *plane,
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 linear_offset;
u32 dspcntr = plane_state->ctl;
- i915_reg_t reg = DSPCNTR(i9xx_plane);
int x = plane_state->color_plane[0].x;
int y = plane_state->color_plane[0].y;
unsigned long irqflags;
@@ -3413,48 +3345,51 @@ static void i9xx_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
+
if (INTEL_GEN(dev_priv) < 4) {
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
+ I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
I915_WRITE_FW(DSPSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
I915_WRITE_FW(PRIMSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
}
- I915_WRITE_FW(reg, dspcntr);
-
- I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(DSPSURF(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) +
- dspaddr_offset);
I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) {
+ I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
+ I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+ }
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+ if (INTEL_GEN(dev_priv) >= 4)
I915_WRITE_FW(DSPSURF(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
- I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
- } else {
+ else
I915_WRITE_FW(DSPADDR(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- }
- POSTING_READ_FW(reg);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_disable_plane(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@@ -3467,7 +3402,6 @@ static void i9xx_disable_plane(struct intel_plane *plane,
I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
else
I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
- POSTING_READ_FW(DSPCNTR(i9xx_plane));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -3527,13 +3461,13 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
/*
* This function detaches (aka. unbinds) unused scalers in hardware
*/
-static void skl_detach_scalers(struct intel_crtc *intel_crtc)
+static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc_scaler_state *scaler_state;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
int i;
- scaler_state = &intel_crtc->config->scaler_state;
-
/* loop through and disable scalers that aren't in use */
for (i = 0; i < intel_crtc->num_scalers; i++) {
if (!scaler_state->scalers[i].in_use)
@@ -3541,6 +3475,21 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc)
}
}
+static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int rotation)
+{
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ return 64;
+ else if (drm_rotation_90_or_270(rotation))
+ return intel_tile_height(fb, color_plane);
+ else
+ return intel_tile_width_bytes(fb, color_plane);
+}
+
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int color_plane)
{
@@ -3551,16 +3500,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state,
if (color_plane >= fb->format->num_planes)
return 0;
- /*
- * The stride is either expressed as a multiple of 64 bytes chunks for
- * linear buffers or in number of tiles for tiled buffers.
- */
- if (drm_rotation_90_or_270(rotation))
- stride /= intel_tile_height(fb, color_plane);
- else
- stride /= intel_fb_stride_alignment(fb, color_plane);
-
- return stride;
+ return stride / skl_plane_stride_mult(fb, color_plane, rotation);
}
static u32 skl_plane_ctl_format(uint32_t pixel_format)
@@ -3597,29 +3537,38 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
return 0;
}
-/*
- * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
- * to be already pre-multiplied. We need to add a knob (or a different
- * DRM_FORMAT) for user-space to configure that.
- */
-static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
+static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
{
- switch (pixel_format) {
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_ARGB8888:
+ if (!plane_state->base.fb->format->has_alpha)
+ return PLANE_CTL_ALPHA_DISABLE;
+
+ switch (plane_state->base.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return PLANE_CTL_ALPHA_DISABLE;
+ case DRM_MODE_BLEND_PREMULTI:
return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ case DRM_MODE_BLEND_COVERAGE:
+ return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
default:
+ MISSING_CASE(plane_state->base.pixel_blend_mode);
return PLANE_CTL_ALPHA_DISABLE;
}
}
-static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
+static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
{
- switch (pixel_format) {
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_ARGB8888:
+ if (!plane_state->base.fb->format->has_alpha)
+ return PLANE_COLOR_ALPHA_DISABLE;
+
+ switch (plane_state->base.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return PLANE_COLOR_ALPHA_DISABLE;
+ case DRM_MODE_BLEND_PREMULTI:
return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
+ case DRM_MODE_BLEND_COVERAGE:
+ return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
default:
+ MISSING_CASE(plane_state->base.pixel_blend_mode);
return PLANE_COLOR_ALPHA_DISABLE;
}
}
@@ -3696,7 +3645,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl = PLANE_CTL_ENABLE;
if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
- plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
+ plane_ctl |= skl_plane_ctl_alpha(plane_state);
plane_ctl |=
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE |
@@ -3731,6 +3680,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
u32 plane_color_ctl = 0;
if (INTEL_GEN(dev_priv) < 11) {
@@ -3738,9 +3688,9 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
}
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
- plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
+ plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
- if (fb->format->is_yuv) {
+ if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
@@ -3748,6 +3698,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
+ } else if (fb->format->is_yuv) {
+ plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
}
return plane_color_ctl;
@@ -3932,15 +3884,15 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
/* on skylake this is done by detaching scalers */
if (INTEL_GEN(dev_priv) >= 9) {
- skl_detach_scalers(crtc);
+ skl_detach_scalers(new_crtc_state);
if (new_crtc_state->pch_pfit.enabled)
- skylake_pfit_enable(crtc);
+ skylake_pfit_enable(new_crtc_state);
} else if (HAS_PCH_SPLIT(dev_priv)) {
if (new_crtc_state->pch_pfit.enabled)
- ironlake_pfit_enable(crtc);
+ ironlake_pfit_enable(new_crtc_state);
else if (old_crtc_state->pch_pfit.enabled)
- ironlake_pfit_disable(crtc, true);
+ ironlake_pfit_disable(old_crtc_state);
}
}
@@ -4339,10 +4291,10 @@ train_done:
DRM_DEBUG_KMS("FDI train done.\n");
}
-static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
+static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4351,7 +4303,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
@@ -4500,10 +4452,11 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
}
/* Program iCLKIP clock to the desired frequency */
-static void lpt_program_iclkip(struct intel_crtc *crtc)
+static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int clock = crtc->config->base.adjusted_mode.crtc_clock;
+ int clock = crtc_state->base.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
@@ -4614,12 +4567,12 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
desired_divisor << auxdiv);
}
-static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
+static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
enum pipe pch_transcoder)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
I915_READ(HTOTAL(cpu_transcoder)));
@@ -4638,9 +4591,8 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
I915_READ(VSYNCSHIFT(cpu_transcoder)));
}
-static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
+static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t temp;
temp = I915_READ(SOUTH_CHICKEN1);
@@ -4659,22 +4611,23 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
POSTING_READ(SOUTH_CHICKEN1);
}
-static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
+static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- switch (intel_crtc->pipe) {
+ switch (crtc->pipe) {
case PIPE_A:
break;
case PIPE_B:
- if (intel_crtc->config->fdi_lanes > 2)
- cpt_set_fdi_bc_bifurcation(dev, false);
+ if (crtc_state->fdi_lanes > 2)
+ cpt_set_fdi_bc_bifurcation(dev_priv, false);
else
- cpt_set_fdi_bc_bifurcation(dev, true);
+ cpt_set_fdi_bc_bifurcation(dev_priv, true);
break;
case PIPE_C:
- cpt_set_fdi_bc_bifurcation(dev, true);
+ cpt_set_fdi_bc_bifurcation(dev_priv, true);
break;
default:
@@ -4731,7 +4684,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
assert_pch_transcoder_disabled(dev_priv, pipe);
if (IS_IVYBRIDGE(dev_priv))
- ivybridge_update_fdi_bc_bifurcation(crtc);
+ ivybridge_update_fdi_bc_bifurcation(crtc_state);
/* Write the TU size bits before fdi link training, so that error
* detection works. */
@@ -4764,11 +4717,11 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
* Note that enable_shared_dpll tries to do the right thing, but
* get_shared_dpll unconditionally resets the pll - we need that to have
* the right LVDS enable sequence. */
- intel_enable_shared_dpll(crtc);
+ intel_enable_shared_dpll(crtc_state);
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
- ironlake_pch_transcoder_set_timings(crtc, pipe);
+ ironlake_pch_transcoder_set_timings(crtc_state, pipe);
intel_fdi_normal_train(crtc);
@@ -4800,7 +4753,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
I915_WRITE(reg, temp);
}
- ironlake_enable_pch_transcoder(dev_priv, pipe);
+ ironlake_enable_pch_transcoder(crtc_state);
}
static void lpt_pch_enable(const struct intel_atomic_state *state,
@@ -4812,10 +4765,10 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
assert_pch_transcoder_disabled(dev_priv, PIPE_A);
- lpt_program_iclkip(crtc);
+ lpt_program_iclkip(crtc_state);
/* Set transcoder timing. */
- ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
+ ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
@@ -4850,8 +4803,31 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
* chroma samples for both of the luma samples, and thus we don't
* actually get the expected MPEG2 chroma siting convention :(
* The same behaviour is observed on pre-SKL platforms as well.
+ *
+ * Theory behind the formula (note that we ignore sub-pixel
+ * source coordinates):
+ * s = source sample position
+ * d = destination sample position
+ *
+ * Downscaling 4:1:
+ * -0.5
+ * | 0.0
+ * | | 1.5 (initial phase)
+ * | | |
+ * v v v
+ * | s | s | s | s |
+ * | d |
+ *
+ * Upscaling 1:4:
+ * -0.5
+ * | -0.375 (initial phase)
+ * | | 0.0
+ * | | |
+ * v v v
+ * | s |
+ * | d | d | d | d |
*/
-u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
+u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
{
int phase = -0x8000;
u16 trip = 0;
@@ -4859,6 +4835,15 @@ u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
if (chroma_cosited)
phase += (sub - 1) * 0x8000 / sub;
+ phase += scale / (2 * sub);
+
+ /*
+ * Hardware initial phase limited to [-0.5:1.5].
+ * Since the max hardware scale factor is 3.0, we
+ * should never actually excdeed 1.0 here.
+ */
+ WARN_ON(phase < -0x8000 || phase > 0x18000);
+
if (phase < 0)
phase = 0x10000 + phase;
else
@@ -4871,8 +4856,7 @@ static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
int src_w, int src_h, int dst_w, int dst_h,
- bool plane_scaler_check,
- uint32_t pixel_format)
+ const struct drm_format_info *format, bool need_scaler)
{
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
@@ -4881,21 +4865,14 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
- int need_scaling;
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- need_scaling = src_w != dst_w || src_h != dst_h;
-
- if (plane_scaler_check)
- if (pixel_format == DRM_FORMAT_NV12)
- need_scaling = true;
-
- if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
- need_scaling = true;
+ if (src_w != dst_w || src_h != dst_h)
+ need_scaler = true;
/*
* Scaling/fitting not supported in IF-ID mode in GEN9+
@@ -4904,7 +4881,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* for NV12.
*/
if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
- need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
return -EINVAL;
}
@@ -4919,7 +4896,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* update to free the scaler is done in plane/panel-fit programming.
* For this purpose crtc/plane_state->scaler_id isn't reset here.
*/
- if (force_detach || !need_scaling) {
+ if (force_detach || !need_scaler) {
if (*scaler_id >= 0) {
scaler_state->scaler_users &= ~(1 << scaler_user);
scaler_state->scalers[*scaler_id].in_use = 0;
@@ -4933,7 +4910,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
+ if (format && format->format == DRM_FORMAT_NV12 &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
DRM_DEBUG_KMS("NV12: src dimensions not met\n");
return -EINVAL;
@@ -4976,12 +4953,16 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
int skl_update_scaler_crtc(struct intel_crtc_state *state)
{
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
+ bool need_scaler = false;
+
+ if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ need_scaler = true;
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_vdisplay, false, 0);
+ adjusted_mode->crtc_vdisplay, NULL, need_scaler);
}
/**
@@ -4996,13 +4977,17 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
-
struct intel_plane *intel_plane =
to_intel_plane(plane_state->base.plane);
struct drm_framebuffer *fb = plane_state->base.fb;
int ret;
-
bool force_detach = !fb || !plane_state->base.visible;
+ bool need_scaler = false;
+
+ /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
+ if (!icl_is_hdr_plane(intel_plane) &&
+ fb && fb->format->format == DRM_FORMAT_NV12)
+ need_scaler = true;
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
@@ -5011,7 +4996,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
drm_rect_height(&plane_state->base.src) >> 16,
drm_rect_width(&plane_state->base.dst),
drm_rect_height(&plane_state->base.dst),
- fb ? true : false, fb ? fb->format->format : 0);
+ fb ? fb->format : NULL, need_scaler);
if (ret || plane_state->scaler_id < 0)
return ret;
@@ -5057,23 +5042,30 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
skl_detach_scaler(crtc, i);
}
-static void skylake_pfit_enable(struct intel_crtc *crtc)
+static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
- struct intel_crtc_scaler_state *scaler_state =
- &crtc->config->scaler_state;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
- if (crtc->config->pch_pfit.enabled) {
+ if (crtc_state->pch_pfit.enabled) {
u16 uv_rgb_hphase, uv_rgb_vphase;
+ int pfit_w, pfit_h, hscale, vscale;
int id;
- if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
+ if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
return;
- uv_rgb_hphase = skl_scaler_calc_phase(1, false);
- uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+ pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
+ pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
+
+ hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
+ vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
+
+ uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
id = scaler_state->scaler_id;
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
@@ -5082,18 +5074,18 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
- I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
+ I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
+ I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
}
}
-static void ironlake_pfit_enable(struct intel_crtc *crtc)
+static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int pipe = crtc->pipe;
- if (crtc->config->pch_pfit.enabled) {
+ if (crtc_state->pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
@@ -5103,8 +5095,8 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
PF_PIPE_SEL_IVB(pipe));
else
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
- I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
+ I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
+ I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
}
}
@@ -5299,11 +5291,8 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
if (!crtc_state->nv12_planes)
return false;
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return false;
-
- if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
- IS_CANNONLAKE(dev_priv))
+ /* WA Display #0827: Gen9:all */
+ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
return true;
return false;
@@ -5346,7 +5335,6 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (needs_nv12_wa(dev_priv, old_crtc_state) &&
!needs_nv12_wa(dev_priv, pipe_config)) {
skl_wa_clkgate(dev_priv, crtc->pipe, false);
- skl_wa_528(dev_priv, crtc->pipe, false);
}
}
@@ -5386,7 +5374,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
needs_nv12_wa(dev_priv, pipe_config)) {
skl_wa_clkgate(dev_priv, crtc->pipe, true);
- skl_wa_528(dev_priv, crtc->pipe, true);
}
/*
@@ -5409,7 +5396,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
- if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
+ if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
+ old_crtc_state->base.active)
intel_wait_for_vblank(dev_priv, crtc->pipe);
/*
@@ -5440,24 +5428,32 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
intel_update_watermarks(crtc);
}
-static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
+static void intel_crtc_disable_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *p;
- int pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ unsigned int update_mask = new_crtc_state->update_planes;
+ const struct intel_plane_state *old_plane_state;
+ struct intel_plane *plane;
+ unsigned fb_bits = 0;
+ int i;
- intel_crtc_dpms_overlay_disable(intel_crtc);
+ intel_crtc_dpms_overlay_disable(crtc);
- drm_for_each_plane_mask(p, dev, plane_mask)
- to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
+ for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
- /*
- * FIXME: Once we grow proper nuclear flip support out of this we need
- * to compute the mask of flip planes precisely. For the time being
- * consider this a flip to a NULL plane.
- */
- intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
+ plane->disable_plane(plane, new_crtc_state);
+
+ if (old_plane_state->base.visible)
+ fb_bits |= plane->frontbuffer_bit;
+ }
+
+ intel_frontbuffer_flip(dev_priv, fb_bits);
}
static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
@@ -5515,7 +5511,8 @@ static void intel_encoders_enable(struct drm_crtc *crtc,
if (conn_state->crtc != crtc)
continue;
- encoder->enable(encoder, crtc_state, conn_state);
+ if (encoder->enable)
+ encoder->enable(encoder, crtc_state, conn_state);
intel_opregion_notify_encoder(encoder, true);
}
}
@@ -5536,7 +5533,8 @@ static void intel_encoders_disable(struct drm_crtc *crtc,
continue;
intel_opregion_notify_encoder(encoder, false);
- encoder->disable(encoder, old_crtc_state, old_conn_state);
+ if (encoder->disable)
+ encoder->disable(encoder, old_crtc_state, old_conn_state);
}
}
@@ -5607,37 +5605,37 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- if (intel_crtc->config->has_pch_encoder)
- intel_prepare_shared_dpll(intel_crtc);
+ if (pipe_config->has_pch_encoder)
+ intel_prepare_shared_dpll(pipe_config);
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ intel_dp_set_m_n(pipe_config, M1_N1);
- intel_set_pipe_timings(intel_crtc);
- intel_set_pipe_src_size(intel_crtc);
+ intel_set_pipe_timings(pipe_config);
+ intel_set_pipe_src_size(pipe_config);
- if (intel_crtc->config->has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config->fdi_m_n, NULL);
+ if (pipe_config->has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(pipe_config,
+ &pipe_config->fdi_m_n, NULL);
}
- ironlake_set_pipeconf(crtc);
+ ironlake_set_pipeconf(pipe_config);
intel_crtc->active = true;
intel_encoders_pre_enable(crtc, pipe_config, old_state);
- if (intel_crtc->config->has_pch_encoder) {
+ if (pipe_config->has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
* enabling. */
- ironlake_fdi_pll_enable(intel_crtc);
+ ironlake_fdi_pll_enable(pipe_config);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
assert_fdi_rx_disabled(dev_priv, pipe);
}
- ironlake_pfit_enable(intel_crtc);
+ ironlake_pfit_enable(pipe_config);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
@@ -5646,10 +5644,10 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
+ dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
intel_enable_pipe(pipe_config);
- if (intel_crtc->config->has_pch_encoder)
+ if (pipe_config->has_pch_encoder)
ironlake_pch_enable(old_intel_state, pipe_config);
assert_vblank_disabled(crtc);
@@ -5666,7 +5664,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
* some interlaced HDMI modes. Let's do the double wait always
* in case there are more corner cases we don't know about.
*/
- if (intel_crtc->config->has_pch_encoder) {
+ if (pipe_config->has_pch_encoder) {
intel_wait_for_vblank(dev_priv, pipe);
intel_wait_for_vblank(dev_priv, pipe);
}
@@ -5700,10 +5698,9 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
enum pipe pipe = crtc->pipe;
uint32_t val;
- val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
-
- /* Program B credit equally to all pipes */
- val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
+ val = MBUS_DBOX_A_CREDIT(2);
+ val |= MBUS_DBOX_BW_CREDIT(1);
+ val |= MBUS_DBOX_B_CREDIT(8);
I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
}
@@ -5715,7 +5712,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
bool psl_clkgate_wa;
@@ -5726,37 +5723,34 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
- if (intel_crtc->config->shared_dpll)
- intel_enable_shared_dpll(intel_crtc);
-
- if (INTEL_GEN(dev_priv) >= 11)
- icl_map_plls_to_ports(crtc, pipe_config, old_state);
+ if (pipe_config->shared_dpll)
+ intel_enable_shared_dpll(pipe_config);
intel_encoders_pre_enable(crtc, pipe_config, old_state);
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ intel_dp_set_m_n(pipe_config, M1_N1);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_set_pipe_timings(intel_crtc);
+ intel_set_pipe_timings(pipe_config);
- intel_set_pipe_src_size(intel_crtc);
+ intel_set_pipe_src_size(pipe_config);
if (cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(cpu_transcoder)) {
I915_WRITE(PIPE_MULT(cpu_transcoder),
- intel_crtc->config->pixel_multiplier - 1);
+ pipe_config->pixel_multiplier - 1);
}
- if (intel_crtc->config->has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config->fdi_m_n, NULL);
+ if (pipe_config->has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(pipe_config,
+ &pipe_config->fdi_m_n, NULL);
}
if (!transcoder_is_dsi(cpu_transcoder))
- haswell_set_pipeconf(crtc);
+ haswell_set_pipeconf(pipe_config);
- haswell_set_pipemisc(crtc);
+ haswell_set_pipemisc(pipe_config);
intel_color_set_csc(&pipe_config->base);
@@ -5764,14 +5758,14 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
- intel_crtc->config->pch_pfit.enabled;
+ pipe_config->pch_pfit.enabled;
if (psl_clkgate_wa)
glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
if (INTEL_GEN(dev_priv) >= 9)
- skylake_pfit_enable(intel_crtc);
+ skylake_pfit_enable(pipe_config);
else
- ironlake_pfit_enable(intel_crtc);
+ ironlake_pfit_enable(pipe_config);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
@@ -5804,10 +5798,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (!transcoder_is_dsi(cpu_transcoder))
intel_enable_pipe(pipe_config);
- if (intel_crtc->config->has_pch_encoder)
+ if (pipe_config->has_pch_encoder)
lpt_pch_enable(old_intel_state, pipe_config);
- if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(pipe_config, true);
assert_vblank_disabled(crtc);
@@ -5829,15 +5823,15 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
}
}
-static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
+static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
- if (force || crtc->config->pch_pfit.enabled) {
+ if (old_crtc_state->pch_pfit.enabled) {
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_POS(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -5868,14 +5862,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_disable_pipe(old_crtc_state);
- ironlake_pfit_disable(intel_crtc, false);
+ ironlake_pfit_disable(old_crtc_state);
- if (intel_crtc->config->has_pch_encoder)
+ if (old_crtc_state->has_pch_encoder)
ironlake_fdi_disable(crtc);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
- if (intel_crtc->config->has_pch_encoder) {
+ if (old_crtc_state->has_pch_encoder) {
ironlake_disable_pch_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev_priv)) {
@@ -5926,24 +5920,24 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(old_crtc_state);
+ intel_dsc_disable(old_crtc_state);
+
if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
else
- ironlake_pfit_disable(intel_crtc, false);
+ ironlake_pfit_disable(old_crtc_state);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
+ intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
}
-static void i9xx_pfit_enable(struct intel_crtc *crtc)
+static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *pipe_config = crtc->config;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!pipe_config->gmch_pfit.control)
+ if (!crtc_state->gmch_pfit.control)
return;
/*
@@ -5953,8 +5947,8 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
assert_pipe_disabled(dev_priv, crtc->pipe);
- I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
- I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
+ I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
/* Border color in case we don't scale up to the full screen. Black by
* default, change to something else for debugging. */
@@ -6009,6 +6003,28 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
}
}
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port)
+{
+ switch (dig_port->aux_ch) {
+ case AUX_CH_A:
+ return POWER_DOMAIN_AUX_A;
+ case AUX_CH_B:
+ return POWER_DOMAIN_AUX_B;
+ case AUX_CH_C:
+ return POWER_DOMAIN_AUX_C;
+ case AUX_CH_D:
+ return POWER_DOMAIN_AUX_D;
+ case AUX_CH_E:
+ return POWER_DOMAIN_AUX_E;
+ case AUX_CH_F:
+ return POWER_DOMAIN_AUX_F;
+ default:
+ MISSING_CASE(dig_port->aux_ch);
+ return POWER_DOMAIN_AUX_A;
+ }
+}
+
static u64 get_crtc_power_domains(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
@@ -6088,20 +6104,18 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
if (WARN_ON(intel_crtc->active))
return;
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ intel_dp_set_m_n(pipe_config, M1_N1);
- intel_set_pipe_timings(intel_crtc);
- intel_set_pipe_src_size(intel_crtc);
+ intel_set_pipe_timings(pipe_config);
+ intel_set_pipe_src_size(pipe_config);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
I915_WRITE(CHV_CANVAS(pipe), 0);
}
- i9xx_set_pipeconf(intel_crtc);
+ i9xx_set_pipeconf(pipe_config);
intel_color_set_csc(&pipe_config->base);
@@ -6112,16 +6126,16 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
if (IS_CHERRYVIEW(dev_priv)) {
- chv_prepare_pll(intel_crtc, intel_crtc->config);
- chv_enable_pll(intel_crtc, intel_crtc->config);
+ chv_prepare_pll(intel_crtc, pipe_config);
+ chv_enable_pll(intel_crtc, pipe_config);
} else {
- vlv_prepare_pll(intel_crtc, intel_crtc->config);
- vlv_enable_pll(intel_crtc, intel_crtc->config);
+ vlv_prepare_pll(intel_crtc, pipe_config);
+ vlv_enable_pll(intel_crtc, pipe_config);
}
intel_encoders_pre_enable(crtc, pipe_config, old_state);
- i9xx_pfit_enable(intel_crtc);
+ i9xx_pfit_enable(pipe_config);
intel_color_load_luts(&pipe_config->base);
@@ -6135,13 +6149,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
}
-static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
+static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
- I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
+ I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
+ I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
}
static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
@@ -6158,15 +6172,15 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
if (WARN_ON(intel_crtc->active))
return;
- i9xx_set_pll_dividers(intel_crtc);
+ i9xx_set_pll_dividers(pipe_config);
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ intel_dp_set_m_n(pipe_config, M1_N1);
- intel_set_pipe_timings(intel_crtc);
- intel_set_pipe_src_size(intel_crtc);
+ intel_set_pipe_timings(pipe_config);
+ intel_set_pipe_src_size(pipe_config);
- i9xx_set_pipeconf(intel_crtc);
+ i9xx_set_pipeconf(pipe_config);
intel_crtc->active = true;
@@ -6177,13 +6191,13 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
i9xx_enable_pll(intel_crtc, pipe_config);
- i9xx_pfit_enable(intel_crtc);
+ i9xx_pfit_enable(pipe_config);
intel_color_load_luts(&pipe_config->base);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state,
- intel_crtc->config);
+ pipe_config);
else
intel_update_watermarks(intel_crtc);
intel_enable_pipe(pipe_config);
@@ -6194,12 +6208,12 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
}
-static void i9xx_pfit_disable(struct intel_crtc *crtc)
+static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!crtc->config->gmch_pfit.control)
+ if (!old_crtc_state->gmch_pfit.control)
return;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -6232,17 +6246,17 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_disable_pipe(old_crtc_state);
- i9xx_pfit_disable(intel_crtc);
+ i9xx_pfit_disable(old_crtc_state);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
- if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev_priv))
chv_disable_pll(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev_priv))
vlv_disable_pll(dev_priv, pipe);
else
- i9xx_disable_pll(intel_crtc);
+ i9xx_disable_pll(old_crtc_state);
}
intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
@@ -6316,7 +6330,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
intel_fbc_disable(intel_crtc);
intel_update_watermarks(intel_crtc);
- intel_disable_shared_dpll(intel_crtc);
+ intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
domains = intel_crtc->enabled_power_domains;
for_each_power_domain(domain, domains)
@@ -6394,66 +6408,6 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
}
}
-int intel_connector_init(struct intel_connector *connector)
-{
- struct intel_digital_connector_state *conn_state;
-
- /*
- * Allocate enough memory to hold intel_digital_connector_state,
- * This might be a few bytes too many, but for connectors that don't
- * need it we'll free the state and allocate a smaller one on the first
- * succesful commit anyway.
- */
- conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
- if (!conn_state)
- return -ENOMEM;
-
- __drm_atomic_helper_connector_reset(&connector->base,
- &conn_state->base);
-
- return 0;
-}
-
-struct intel_connector *intel_connector_alloc(void)
-{
- struct intel_connector *connector;
-
- connector = kzalloc(sizeof *connector, GFP_KERNEL);
- if (!connector)
- return NULL;
-
- if (intel_connector_init(connector) < 0) {
- kfree(connector);
- return NULL;
- }
-
- return connector;
-}
-
-/*
- * Free the bits allocated by intel_connector_alloc.
- * This should only be used after intel_connector_alloc has returned
- * successfully, and before drm_connector_init returns successfully.
- * Otherwise the destroy callbacks for the connector and the state should
- * take care of proper cleanup/free
- */
-void intel_connector_free(struct intel_connector *connector)
-{
- kfree(to_intel_digital_connector_state(connector->base.state));
- kfree(connector);
-}
-
-/* Simple connector->get_hw_state implementation for encoders that support only
- * one connector and no cloning and hence the encoder state determines the state
- * of the connector. */
-bool intel_connector_get_hw_state(struct intel_connector *connector)
-{
- enum pipe pipe = 0;
- struct intel_encoder *encoder = connector->encoder;
-
- return encoder->get_hw_state(encoder, &pipe);
-}
-
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
{
if (crtc_state->base.enable && crtc_state->has_pch_encoder)
@@ -6564,6 +6518,9 @@ retry:
link_bw, &pipe_config->fdi_m_n, false);
ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
+
if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
pipe_config->pipe_bpp -= 2*3;
DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
@@ -6720,7 +6677,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return -EINVAL;
}
- if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
+ if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
+ pipe_config->base.ctm) {
/*
* There is only one pipe CSC unit per pipe, and we need that
* for output conversion from RGB->YCBCR. So if CTM is already
@@ -6795,7 +6754,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
}
void
-intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n)
@@ -6886,12 +6845,12 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
}
-static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n)
+static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+ const struct intel_link_m_n *m_n)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
@@ -6899,25 +6858,39 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
}
-static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n,
- struct intel_link_m_n *m2_n2)
+static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+ enum transcoder transcoder)
{
+ if (IS_HASWELL(dev_priv))
+ return transcoder == TRANSCODER_EDP;
+
+ /*
+ * Strictly speaking some registers are available before
+ * gen7, but we only support DRRS on gen7+
+ */
+ return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv);
+}
+
+static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+ const struct intel_link_m_n *m_n,
+ const struct intel_link_m_n *m2_n2)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int pipe = crtc->pipe;
- enum transcoder transcoder = crtc->config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
+ enum transcoder transcoder = crtc_state->cpu_transcoder;
if (INTEL_GEN(dev_priv) >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
- /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
- * for gen < 8) and if DRRS is supported (to make sure the
- * registers are not unnecessarily accessed).
+ /*
+ * M2_N2 registers are set only if DRRS is supported
+ * (to make sure the registers are not unnecessarily accessed).
*/
- if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
- INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
+ if (m2_n2 && crtc_state->has_drrs &&
+ transcoder_has_m2_n2(dev_priv, transcoder)) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -6932,29 +6905,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
}
}
-void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
{
- struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+ const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
if (m_n == M1_N1) {
- dp_m_n = &crtc->config->dp_m_n;
- dp_m2_n2 = &crtc->config->dp_m2_n2;
+ dp_m_n = &crtc_state->dp_m_n;
+ dp_m2_n2 = &crtc_state->dp_m2_n2;
} else if (m_n == M2_N2) {
/*
* M2_N2 registers are not supported. Hence m2_n2 divider value
* needs to be programmed into M1_N1.
*/
- dp_m_n = &crtc->config->dp_m2_n2;
+ dp_m_n = &crtc_state->dp_m2_n2;
} else {
DRM_ERROR("Unsupported divider value\n");
return;
}
- if (crtc->config->has_pch_encoder)
- intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
+ if (crtc_state->has_pch_encoder)
+ intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
else
- intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
+ intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
}
static void vlv_compute_dpll(struct intel_crtc *crtc,
@@ -7053,8 +7026,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
/* Set HBR and RBR LPF coefficients */
if (pipe_config->port_clock == 162000 ||
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
+ intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
+ intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
@@ -7081,7 +7054,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
- if (intel_crtc_has_dp_encoder(crtc->config))
+ if (intel_crtc_has_dp_encoder(pipe_config))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
@@ -7360,12 +7333,13 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.dpll = dpll;
}
-static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
+static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
uint32_t crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
@@ -7379,7 +7353,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
crtc_vtotal -= 1;
crtc_vblank_end -= 1;
- if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
else
vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -7421,18 +7395,18 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
}
-static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
I915_WRITE(PIPESRC(pipe),
- ((intel_crtc->config->pipe_src_w - 1) << 16) |
- (intel_crtc->config->pipe_src_h - 1));
+ ((crtc_state->pipe_src_w - 1) << 16) |
+ (crtc_state->pipe_src_h - 1));
}
static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -7508,29 +7482,30 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
drm_mode_set_name(mode);
}
-static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
+static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t pipeconf;
pipeconf = 0;
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
- pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
+ pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
- if (intel_crtc->config->double_wide)
+ if (crtc_state->double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
- if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
+ if (crtc_state->dither && crtc_state->pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
- switch (intel_crtc->config->pipe_bpp) {
+ switch (crtc_state->pipe_bpp) {
case 18:
pipeconf |= PIPECONF_6BPC;
break;
@@ -7546,9 +7521,9 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_GEN(dev_priv) < 4 ||
- intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -7556,11 +7531,11 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
pipeconf |= PIPECONF_PROGRESSIVE;
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_crtc->config->limited_color_range)
+ crtc_state->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
- I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
- POSTING_READ(PIPECONF(intel_crtc->pipe));
+ I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
+ POSTING_READ(PIPECONF(crtc->pipe));
}
static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
@@ -7843,8 +7818,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->tiling = I915_TILING_X;
fb->modifier = I915_FORMAT_MOD_X_TILED;
}
+
+ if (val & DISPPLANE_ROTATE_180)
+ plane_config->rotation = DRM_MODE_ROTATE_180;
}
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
+ val & DISPPLANE_MIRROR)
+ plane_config->rotation |= DRM_MODE_REFLECT_X;
+
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
fb->format = drm_format_info(fourcc);
@@ -7916,6 +7898,49 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
}
+static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
+
+ pipe_config->lspcon_downsampling = false;
+
+ if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
+ u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+ if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+ bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
+ bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
+
+ if (ycbcr420_enabled) {
+ /* We support 4:2:0 in full blend mode only */
+ if (!blend)
+ output = INTEL_OUTPUT_FORMAT_INVALID;
+ else if (!(IS_GEMINILAKE(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 10))
+ output = INTEL_OUTPUT_FORMAT_INVALID;
+ else
+ output = INTEL_OUTPUT_FORMAT_YCBCR420;
+ } else {
+ /*
+ * Currently there is no interface defined to
+ * check user preference between RGB/YCBCR444
+ * or YCBCR420. So the only possible case for
+ * YCBCR444 usage is driving YCBCR420 output
+ * with LSPCON, when pipe is configured for
+ * YCBCR444 output and LSPCON takes care of
+ * downsampling it.
+ */
+ pipe_config->lspcon_downsampling = true;
+ output = INTEL_OUTPUT_FORMAT_YCBCR444;
+ }
+ }
+ }
+
+ pipe_config->output_format = output;
+}
+
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -7928,6 +7953,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
@@ -8459,16 +8485,16 @@ void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
lpt_init_pch_refclk(dev_priv);
}
-static void ironlake_set_pipeconf(struct drm_crtc *crtc)
+static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
uint32_t val;
val = 0;
- switch (intel_crtc->config->pipe_bpp) {
+ switch (crtc_state->pipe_bpp) {
case 18:
val |= PIPECONF_6BPC;
break;
@@ -8486,32 +8512,32 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config->dither)
+ if (crtc_state->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
- if (intel_crtc->config->limited_color_range)
+ if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
-static void haswell_set_pipeconf(struct drm_crtc *crtc)
+static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
- if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
+ if (IS_HASWELL(dev_priv) && crtc_state->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
@@ -8520,16 +8546,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
POSTING_READ(PIPECONF(cpu_transcoder));
}
-static void haswell_set_pipemisc(struct drm_crtc *crtc)
+static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *config = intel_crtc->config;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
u32 val = 0;
- switch (intel_crtc->config->pipe_bpp) {
+ switch (crtc_state->pipe_bpp) {
case 18:
val |= PIPEMISC_DITHER_6_BPC;
break;
@@ -8547,14 +8572,16 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config->dither)
+ if (crtc_state->dither)
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
- if (config->ycbcr420) {
- val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
- PIPEMISC_YUV420_ENABLE |
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ val |= PIPEMISC_YUV420_ENABLE |
PIPEMISC_YUV420_MODE_FULL_BLEND;
- }
I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
}
@@ -8765,12 +8792,8 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
- /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
- * gen < 8) and if DRRS is supported (to make sure the
- * registers are not unnecessarily read).
- */
- if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
- crtc->config->has_drrs) {
+
+ if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
@@ -8913,6 +8936,29 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
goto error;
}
+ /*
+ * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
+ * while i915 HW rotation is clockwise, thats why this swapping.
+ */
+ switch (val & PLANE_CTL_ROTATE_MASK) {
+ case PLANE_CTL_ROTATE_0:
+ plane_config->rotation = DRM_MODE_ROTATE_0;
+ break;
+ case PLANE_CTL_ROTATE_90:
+ plane_config->rotation = DRM_MODE_ROTATE_270;
+ break;
+ case PLANE_CTL_ROTATE_180:
+ plane_config->rotation = DRM_MODE_ROTATE_180;
+ break;
+ case PLANE_CTL_ROTATE_270:
+ plane_config->rotation = DRM_MODE_ROTATE_90;
+ break;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ val & PLANE_CTL_FLIP_HORIZONTAL)
+ plane_config->rotation |= DRM_MODE_REFLECT_X;
+
base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
plane_config->base = base;
@@ -8923,7 +8969,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, plane_id));
- stride_mult = intel_fb_stride_alignment(fb, 0);
+ stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(fb, 0, fb->height);
@@ -8979,6 +9025,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
@@ -9286,10 +9333,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->base.state);
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+ IS_ICELAKE(dev_priv)) {
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
@@ -9327,30 +9376,17 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
u32 temp;
/* TODO: TBT pll not implemented. */
- switch (port) {
- case PORT_A:
- case PORT_B:
+ if (intel_port_is_combophy(dev_priv, port)) {
temp = I915_READ(DPCLKA_CFGCR0_ICL) &
DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
- if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
+ if (WARN_ON(!intel_dpll_is_combophy(id)))
return;
- break;
- case PORT_C:
- id = DPLL_ID_ICL_MGPLL1;
- break;
- case PORT_D:
- id = DPLL_ID_ICL_MGPLL2;
- break;
- case PORT_E:
- id = DPLL_ID_ICL_MGPLL3;
- break;
- case PORT_F:
- id = DPLL_ID_ICL_MGPLL4;
- break;
- default:
- MISSING_CASE(port);
+ } else if (intel_port_is_tc(dev_priv, port)) {
+ id = icl_port_to_mg_pll_id(port);
+ } else {
+ WARN(1, "Invalid port %x\n", port);
return;
}
@@ -9440,11 +9476,18 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
+ unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
+ unsigned long enabled_panel_transcoders = 0;
+ enum transcoder panel_transcoder;
u32 tmp;
+ if (IS_ICELAKE(dev_priv))
+ panel_transcoder_mask |=
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+
/*
* The pipe->transcoder mapping is fixed with the exception of the eDP
- * transcoder handled below.
+ * and DSI transcoders handled below.
*/
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -9452,29 +9495,49 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
- if (tmp & TRANS_DDI_FUNC_ENABLE) {
- enum pipe trans_edp_pipe;
+ for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) {
+ enum pipe trans_pipe;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
+ if (!(tmp & TRANS_DDI_FUNC_ENABLE))
+ continue;
+
+ /*
+ * Log all enabled ones, only use the first one.
+ *
+ * FIXME: This won't work for two separate DSI displays.
+ */
+ enabled_panel_transcoders |= BIT(panel_transcoder);
+ if (enabled_panel_transcoders != BIT(panel_transcoder))
+ continue;
+
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
- WARN(1, "unknown pipe linked to edp transcoder\n");
+ WARN(1, "unknown pipe linked to transcoder %s\n",
+ transcoder_name(panel_transcoder));
/* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
case TRANS_DDI_EDP_INPUT_A_ON:
- trans_edp_pipe = PIPE_A;
+ trans_pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
- trans_edp_pipe = PIPE_B;
+ trans_pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
- trans_edp_pipe = PIPE_C;
+ trans_pipe = PIPE_C;
break;
}
- if (trans_edp_pipe == crtc->pipe)
- pipe_config->cpu_transcoder = TRANSCODER_EDP;
+ if (trans_pipe == crtc->pipe)
+ pipe_config->cpu_transcoder = panel_transcoder;
}
+ /*
+ * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
+ */
+ WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
+ enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
@@ -9607,33 +9670,18 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (!active)
goto out;
- if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
+ IS_ICELAKE(dev_priv)) {
haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
intel_get_pipe_src_size(crtc, pipe_config);
+ intel_get_crtc_ycbcr_config(crtc, pipe_config);
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
- u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
- bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
-
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- bool blend_mode_420 = tmp &
- PIPEMISC_YUV420_MODE_FULL_BLEND;
-
- pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
- if (pipe_config->ycbcr420 != clrspace_yuv ||
- pipe_config->ycbcr420 != blend_mode_420)
- DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
- } else if (clrspace_yuv) {
- DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
- }
- }
-
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT_ULL(power_domain);
@@ -9679,7 +9727,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
- if (INTEL_INFO(dev_priv)->cursor_needs_physical)
+ if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
base = obj->phys_handle->busaddr;
else
base = intel_plane_ggtt_offset(plane_state);
@@ -9902,15 +9950,13 @@ static void i845_update_cursor(struct intel_plane *plane,
I915_WRITE_FW(CURPOS(PIPE_A), pos);
}
- POSTING_READ_FW(CURCNTR(PIPE_A));
-
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i845_disable_cursor(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
- i845_update_cursor(plane, NULL, NULL);
+ i845_update_cursor(plane, crtc_state, NULL);
}
static bool i845_cursor_get_hw_state(struct intel_plane *plane,
@@ -10101,8 +10147,8 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* On some platforms writing CURCNTR first will also
* cause CURPOS to be armed by the CURBASE write.
* Without the CURCNTR write the CURPOS write would
- * arm itself. Thus we always start the full update
- * with a CURCNTR write.
+ * arm itself. Thus we always update CURCNTR before
+ * CURPOS.
*
* On other platforms CURPOS always requires the
* CURBASE write to arm the update. Additonally
@@ -10112,15 +10158,20 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* cursor that doesn't appear to move, or even change
* shape. Thus we always write CURBASE.
*
- * CURCNTR and CUR_FBC_CTL are always
- * armed by the CURBASE write only.
+ * The other registers are armed by by the CURBASE write
+ * except when the plane is getting enabled at which time
+ * the CURCNTR write arms the update.
*/
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_write_cursor_wm(plane, crtc_state);
+
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
plane->cursor.cntl != cntl) {
- I915_WRITE_FW(CURCNTR(pipe), cntl);
if (HAS_CUR_FBC(dev_priv))
I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
+ I915_WRITE_FW(CURCNTR(pipe), cntl);
I915_WRITE_FW(CURPOS(pipe), pos);
I915_WRITE_FW(CURBASE(pipe), base);
@@ -10132,15 +10183,13 @@ static void i9xx_update_cursor(struct intel_plane *plane,
I915_WRITE_FW(CURBASE(pipe), base);
}
- POSTING_READ_FW(CURBASE(pipe));
-
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_disable_cursor(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
- i9xx_update_cursor(plane, NULL, NULL);
+ i9xx_update_cursor(plane, crtc_state, NULL);
}
static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
@@ -10738,14 +10787,40 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
pipe_config->fb_bits |= plane->frontbuffer_bit;
/*
+ * ILK/SNB DVSACNTR/Sprite Enable
+ * IVB SPR_CTL/Sprite Enable
+ * "When in Self Refresh Big FIFO mode, a write to enable the
+ * plane will be internally buffered and delayed while Big FIFO
+ * mode is exiting."
+ *
+ * Which means that enabling the sprite can take an extra frame
+ * when we start in big FIFO mode (LP1+). Thus we need to drop
+ * down to LP0 and wait for vblank in order to make sure the
+ * sprite gets enabled on the next vblank after the register write.
+ * Doing otherwise would risk enabling the sprite one frame after
+ * we've already signalled flip completion. We can resume LP1+
+ * once the sprite has been enabled.
+ *
+ *
* WaCxSRDisabledForSpriteScaling:ivb
+ * IVB SPR_SCALE/Scaling Enable
+ * "Low Power watermarks must be disabled for at least one
+ * frame before enabling sprite scaling, and kept disabled
+ * until sprite scaling is disabled."
+ *
+ * ILK/SNB DVSASCALE/Scaling Enable
+ * "When in Self Refresh Big FIFO mode, scaling enable will be
+ * masked off while Big FIFO mode is exiting."
*
- * cstate->update_wm was already set above, so this flag will
- * take effect when we commit and program watermarks.
+ * Despite the w/a only being listed for IVB we assume that
+ * the ILK/SNB note has similar ramifications, hence we apply
+ * the w/a on all three platforms.
*/
- if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
- needs_scaling(to_intel_plane_state(plane_state)) &&
- !needs_scaling(old_plane_state))
+ if (plane->id == PLANE_SPRITE0 &&
+ (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)) &&
+ (turn_on || (!needs_scaling(old_plane_state) &&
+ needs_scaling(to_intel_plane_state(plane_state)))))
pipe_config->disable_lp_wm = true;
return 0;
@@ -10781,6 +10856,101 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
return true;
}
+static int icl_add_linked_planes(struct intel_atomic_state *state)
+{
+ struct intel_plane *plane, *linked;
+ struct intel_plane_state *plane_state, *linked_plane_state;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ linked = plane_state->linked_plane;
+
+ if (!linked)
+ continue;
+
+ linked_plane_state = intel_atomic_get_plane_state(state, linked);
+ if (IS_ERR(linked_plane_state))
+ return PTR_ERR(linked_plane_state);
+
+ WARN_ON(linked_plane_state->linked_plane != plane);
+ WARN_ON(linked_plane_state->slave == plane_state->slave);
+ }
+
+ return 0;
+}
+
+static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
+ struct intel_plane *plane, *linked;
+ struct intel_plane_state *plane_state;
+ int i;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ /*
+ * Destroy all old plane links and make the slave plane invisible
+ * in the crtc_state->active_planes mask.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
+ continue;
+
+ plane_state->linked_plane = NULL;
+ if (plane_state->slave && !plane_state->base.visible) {
+ crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->update_planes |= BIT(plane->id);
+ }
+
+ plane_state->slave = false;
+ }
+
+ if (!crtc_state->nv12_planes)
+ return 0;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_plane_state *linked_state = NULL;
+
+ if (plane->pipe != crtc->pipe ||
+ !(crtc_state->nv12_planes & BIT(plane->id)))
+ continue;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
+ if (!icl_is_nv12_y_plane(linked->id))
+ continue;
+
+ if (crtc_state->active_planes & BIT(linked->id))
+ continue;
+
+ linked_state = intel_atomic_get_plane_state(state, linked);
+ if (IS_ERR(linked_state))
+ return PTR_ERR(linked_state);
+
+ break;
+ }
+
+ if (!linked_state) {
+ DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
+ hweight8(crtc_state->nv12_planes));
+
+ return -EINVAL;
+ }
+
+ plane_state->linked_plane = linked;
+
+ linked_state->slave = true;
+ linked_state->linked_plane = plane;
+ crtc_state->active_planes |= BIT(linked->id);
+ crtc_state->update_planes |= BIT(linked->id);
+ DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+ }
+
+ return 0;
+}
+
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
@@ -10789,7 +10959,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
- struct drm_atomic_state *state = crtc_state->state;
int ret;
bool mode_changed = needs_modeset(crtc_state);
@@ -10826,8 +10995,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
}
- if (dev_priv->display.compute_intermediate_wm &&
- !to_intel_atomic_state(state)->skip_intermediate_wm) {
+ if (dev_priv->display.compute_intermediate_wm) {
if (WARN_ON(!dev_priv->display.compute_pipe_wm))
return 0;
@@ -10843,9 +11011,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
return ret;
}
- } else if (dev_priv->display.compute_intermediate_wm) {
- if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
- pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
}
if (INTEL_GEN(dev_priv) >= 9) {
@@ -10853,6 +11018,8 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
ret = skl_update_scaler_crtc(pipe_config);
if (!ret)
+ ret = icl_check_nv12_planes(pipe_config);
+ if (!ret)
ret = skl_check_pipe_max_pixel_rate(intel_crtc,
pipe_config);
if (!ret)
@@ -10867,8 +11034,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
- .atomic_begin = intel_begin_crtc_commit,
- .atomic_flush = intel_finish_crtc_commit,
.atomic_check = intel_crtc_atomic_check,
};
@@ -10897,30 +11062,42 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
drm_connector_list_iter_end(&conn_iter);
}
-static void
-connected_sink_compute_bpp(struct intel_connector *connector,
- struct intel_crtc_state *pipe_config)
+static int
+compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
+ struct intel_crtc_state *pipe_config)
{
- const struct drm_display_info *info = &connector->base.display_info;
- int bpp = pipe_config->pipe_bpp;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
- connector->base.base.id,
- connector->base.name);
+ struct drm_connector *connector = conn_state->connector;
+ const struct drm_display_info *info = &connector->display_info;
+ int bpp;
- /* Don't use an invalid EDID bpc value */
- if (info->bpc != 0 && info->bpc * 3 < bpp) {
- DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
- bpp, info->bpc * 3);
- pipe_config->pipe_bpp = info->bpc * 3;
+ switch (conn_state->max_bpc) {
+ case 6 ... 7:
+ bpp = 6 * 3;
+ break;
+ case 8 ... 9:
+ bpp = 8 * 3;
+ break;
+ case 10 ... 11:
+ bpp = 10 * 3;
+ break;
+ case 12:
+ bpp = 12 * 3;
+ break;
+ default:
+ return -EINVAL;
}
- /* Clamp bpp to 8 on screens without EDID 1.4 */
- if (info->bpc == 0 && bpp > 24) {
- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
- bpp);
- pipe_config->pipe_bpp = 24;
+ if (bpp < pipe_config->pipe_bpp) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
+ "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
+ connector->base.id, connector->name,
+ bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
+ pipe_config->pipe_bpp);
+
+ pipe_config->pipe_bpp = bpp;
}
+
+ return 0;
}
static int
@@ -10928,7 +11105,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_atomic_state *state;
+ struct drm_atomic_state *state = pipe_config->base.state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
@@ -10941,21 +11118,21 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
else
bpp = 8*3;
-
pipe_config->pipe_bpp = bpp;
- state = pipe_config->base.state;
-
- /* Clamp display bpp to EDID value */
+ /* Clamp display bpp to connector max bpp */
for_each_new_connector_in_state(state, connector, connector_state, i) {
+ int ret;
+
if (connector_state->crtc != &crtc->base)
continue;
- connected_sink_compute_bpp(to_intel_connector(connector),
- pipe_config);
+ ret = compute_sink_pipe_bpp(connector_state, pipe_config);
+ if (ret)
+ return ret;
}
- return bpp;
+ return 0;
}
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
@@ -11025,6 +11202,20 @@ static void snprintf_output_types(char *buf, size_t len,
WARN_ON_ONCE(output_types != 0);
}
+static const char * const output_format_str[] = {
+ [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
+ [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
+ [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
+ [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
+};
+
+static const char *output_formats(enum intel_output_format format)
+{
+ if (format >= ARRAY_SIZE(output_format_str))
+ format = INTEL_OUTPUT_FORMAT_INVALID;
+ return output_format_str[format];
+}
+
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
const char *context)
@@ -11044,6 +11235,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
buf, pipe_config->output_types);
+ DRM_DEBUG_KMS("output format: %s\n",
+ output_formats(pipe_config->output_format));
+
DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
@@ -11053,9 +11247,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->fdi_lanes,
&pipe_config->fdi_m_n);
- if (pipe_config->ycbcr420)
- DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
-
if (intel_crtc_has_dp_encoder(pipe_config)) {
intel_dump_m_n_config(pipe_config, "dp m_n",
pipe_config->lane_count, &pipe_config->dp_m_n);
@@ -11244,7 +11435,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int base_bpp, ret = -EINVAL;
+ int base_bpp, ret;
int i;
bool retry = true;
@@ -11266,10 +11457,12 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
- base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
- pipe_config);
- if (base_bpp < 0)
- goto fail;
+ ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
+ pipe_config);
+ if (ret)
+ return ret;
+
+ base_bpp = pipe_config->pipe_bpp;
/*
* Determine the real pipe dimensions. Note that stereo modes can
@@ -11291,7 +11484,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
- goto fail;
+ return -EINVAL;
}
/*
@@ -11327,7 +11520,7 @@ encoder_retry:
if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
DRM_DEBUG_KMS("Encoder config failure\n");
- goto fail;
+ return -EINVAL;
}
}
@@ -11338,16 +11531,16 @@ encoder_retry:
* pipe_config->pixel_multiplier;
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
if (ret < 0) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
- goto fail;
+ return ret;
}
if (ret == RETRY) {
- if (WARN(!retry, "loop in pipe configuration computation\n")) {
- ret = -EINVAL;
- goto fail;
- }
+ if (WARN(!retry, "loop in pipe configuration computation\n"))
+ return -EINVAL;
DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
retry = false;
@@ -11363,8 +11556,7 @@ encoder_retry:
DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
-fail:
- return ret;
+ return 0;
}
static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@ -11633,6 +11825,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
+ PIPE_CONF_CHECK_I(output_format);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -11641,7 +11834,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
- PIPE_CONF_CHECK_BOOL(ycbcr420);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
@@ -11763,6 +11955,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct skl_pipe_wm hw_wm, *sw_wm;
struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -11773,6 +11967,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
skl_pipe_wm_get_hw_state(crtc, &hw_wm);
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+ skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
+
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
@@ -11815,8 +12011,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb.plane[pipe][plane];
- sw_ddb_entry = &sw_ddb->plane[pipe][plane];
+ hw_ddb_entry = &hw_ddb_y[plane];
+ sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
@@ -11865,8 +12061,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+ hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
+ sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
@@ -12150,8 +12346,9 @@ intel_modeset_verify_disabled(struct drm_device *dev,
verify_disabled_dpll_state(dev);
}
-static void update_scanline_offset(struct intel_crtc *crtc)
+static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
@@ -12182,7 +12379,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
* answer that's slightly in the future.
*/
if (IS_GEN2(dev_priv)) {
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
int vtotal;
vtotal = adjusted_mode->crtc_vtotal;
@@ -12191,7 +12388,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = vtotal - 1;
} else if (HAS_DDI(dev_priv) &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
crtc->scanline_offset = 1;
@@ -12474,6 +12671,8 @@ static int intel_atomic_check(struct drm_device *dev,
}
ret = intel_modeset_pipe_config(crtc, pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
if (ret) {
intel_dump_pipe_config(to_intel_crtc(crtc),
pipe_config, "[failed]");
@@ -12505,6 +12704,10 @@ static int intel_atomic_check(struct drm_device *dev,
intel_state->cdclk.logical = dev_priv->cdclk.logical;
}
+ ret = icl_add_linked_planes(intel_state);
+ if (ret)
+ return ret;
+
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
@@ -12544,7 +12747,7 @@ static void intel_update_crtc(struct drm_crtc *crtc,
to_intel_plane(crtc->primary));
if (modeset) {
- update_scanline_offset(intel_crtc);
+ update_scanline_offset(pipe_config);
dev_priv->display.crtc_enable(pipe_config, state);
/* vblanks work again, re-enable pipe CRC. */
@@ -12557,7 +12760,14 @@ static void intel_update_crtc(struct drm_crtc *crtc,
if (new_plane_state)
intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
- drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
+ intel_begin_crtc_commit(crtc, old_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+ else
+ i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+
+ intel_finish_crtc_commit(crtc, old_crtc_state);
}
static void intel_update_crtcs(struct drm_atomic_state *state)
@@ -12589,13 +12799,12 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
int i;
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
-
- const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
/* ignore allocations for crtc's that have been turned off. */
if (new_crtc_state->active)
- entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
+ entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
/* If 2nd DBuf slice required, enable it here */
if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -12621,14 +12830,13 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
if (updated & cmask || !cstate->base.active)
continue;
- if (skl_ddb_allocation_overlaps(dev_priv,
+ if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
entries,
- &cstate->wm.skl.ddb,
- i))
+ INTEL_INFO(dev_priv)->num_pipes, i))
continue;
updated |= cmask;
- entries[i] = &cstate->wm.skl.ddb;
+ entries[i] = cstate->wm.skl.ddb;
/*
* If this is an already active pipe, it's DDB changed,
@@ -12718,8 +12926,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
struct drm_crtc *crtc;
- struct intel_crtc_state *intel_cstate;
+ struct intel_crtc *intel_crtc;
u64 put_domains[I915_MAX_PIPES] = {};
int i;
@@ -12731,24 +12940,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
+ new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
+ intel_crtc = to_intel_crtc(crtc);
if (needs_modeset(new_crtc_state) ||
to_intel_crtc_state(new_crtc_state)->update_pipe) {
- put_domains[to_intel_crtc(crtc)->pipe] =
+ put_domains[intel_crtc->pipe] =
modeset_get_crtc_power_domains(crtc,
- to_intel_crtc_state(new_crtc_state));
+ new_intel_crtc_state);
}
if (!needs_modeset(new_crtc_state))
continue;
- intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
- to_intel_crtc_state(new_crtc_state));
+ intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
if (old_crtc_state->active) {
- intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
+ intel_crtc_disable_planes(intel_state, intel_crtc);
/*
* We need to disable pipe CRC before disabling the pipe,
@@ -12756,10 +12966,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
*/
intel_crtc_disable_pipe_crc(intel_crtc);
- dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
+ dev_priv->display.crtc_disable(old_intel_crtc_state, state);
intel_crtc->active = false;
intel_fbc_disable(intel_crtc);
- intel_disable_shared_dpll(intel_crtc);
+ intel_disable_shared_dpll(old_intel_crtc_state);
/*
* Underruns don't always raise
@@ -12768,17 +12978,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
- if (!new_crtc_state->active) {
- /*
- * Make sure we don't call initial_watermarks
- * for ILK-style watermark updates.
- *
- * No clue what this is supposed to achieve.
- */
- if (INTEL_GEN(dev_priv) >= 9)
- dev_priv->display.initial_watermarks(intel_state,
- to_intel_crtc_state(new_crtc_state));
- }
+ /* FIXME unify this for all platforms */
+ if (!new_crtc_state->active &&
+ !HAS_GMCH_DISPLAY(dev_priv) &&
+ dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(intel_state,
+ new_intel_crtc_state);
}
}
@@ -12837,11 +13042,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* TODO: Move this (and other cleanup) to an async worker eventually.
*/
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- intel_cstate = to_intel_crtc_state(new_crtc_state);
+ new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
if (dev_priv->display.optimize_watermarks)
dev_priv->display.optimize_watermarks(intel_state,
- intel_cstate);
+ new_intel_crtc_state);
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -13108,7 +13313,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
struct i915_vma *vma;
if (plane->id == PLANE_CURSOR &&
- INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int align = intel_cursor_alignment(dev_priv);
int err;
@@ -13224,13 +13429,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
- fb_obj_bump_render_priority(obj);
-
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
if (ret)
return ret;
+ fb_obj_bump_render_priority(obj);
intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
if (!new_state->fence) { /* implicit fencing */
@@ -13361,7 +13565,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
if (intel_cstate->update_pipe)
intel_update_pipe_config(old_intel_cstate, intel_cstate);
else if (INTEL_GEN(dev_priv) >= 9)
- skl_detach_scalers(intel_crtc);
+ skl_detach_scalers(intel_cstate);
out:
if (dev_priv->display.atomic_update_watermarks)
@@ -13463,56 +13667,6 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
}
}
-static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- struct intel_plane *plane = to_intel_plane(_plane);
-
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- break;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (!plane->has_ccs)
- return false;
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- if (is_ccs_modifier(modifier))
- return true;
- /* fall through */
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- if (modifier == I915_FORMAT_MOD_Yf_TILED)
- return true;
- /* fall through */
- case DRM_FORMAT_C8:
- if (modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED ||
- modifier == I915_FORMAT_MOD_Y_TILED)
- return true;
- /* fall through */
- default:
- return false;
- }
-}
-
static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
@@ -13520,18 +13674,7 @@ static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
format == DRM_FORMAT_ARGB8888;
}
-static struct drm_plane_funcs skl_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = skl_plane_format_mod_supported,
-};
-
-static struct drm_plane_funcs i965_plane_funcs = {
+static const struct drm_plane_funcs i965_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
@@ -13542,7 +13685,7 @@ static struct drm_plane_funcs i965_plane_funcs = {
.format_mod_supported = i965_plane_format_mod_supported,
};
-static struct drm_plane_funcs i8xx_plane_funcs = {
+static const struct drm_plane_funcs i8xx_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
@@ -13568,14 +13711,16 @@ intel_legacy_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state, *new_plane_state;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *old_fb;
- struct drm_crtc_state *crtc_state = crtc->state;
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->state);
+ struct intel_crtc_state *new_crtc_state;
/*
* When crtc is inactive or there is a modeset pending,
* wait for it to complete in the slowpath
*/
- if (!crtc_state->active || needs_modeset(crtc_state) ||
- to_intel_crtc_state(crtc_state)->update_pipe)
+ if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
+ crtc_state->update_pipe)
goto slow;
old_plane_state = plane->state;
@@ -13605,6 +13750,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
if (!new_plane_state)
return -ENOMEM;
+ new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
+ if (!new_crtc_state) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
drm_atomic_set_fb_for_plane(new_plane_state, fb);
new_plane_state->src_x = src_x;
@@ -13616,9 +13767,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
new_plane_state->crtc_w = crtc_w;
new_plane_state->crtc_h = crtc_h;
- ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
- to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
- to_intel_plane_state(plane->state),
+ ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
+ to_intel_plane_state(old_plane_state),
to_intel_plane_state(new_plane_state));
if (ret)
goto out_free;
@@ -13640,14 +13790,25 @@ intel_legacy_cursor_update(struct drm_plane *plane,
/* Swap plane state */
plane->state = new_plane_state;
+ /*
+ * We cannot swap crtc_state as it may be in use by an atomic commit or
+ * page flip that's running simultaneously. If we swap crtc_state and
+ * destroy the old state, we will cause a use-after-free there.
+ *
+ * Only update active_planes, which is needed for our internal
+ * bookkeeping. Either value will do the right thing when updating
+ * planes atomically. If the cursor was part of the atomic update then
+ * we would have taken the slowpath.
+ */
+ crtc_state->active_planes = new_crtc_state->active_planes;
+
if (plane->state->visible) {
trace_intel_update_plane(plane, to_intel_crtc(crtc));
- intel_plane->update_plane(intel_plane,
- to_intel_crtc_state(crtc->state),
+ intel_plane->update_plane(intel_plane, crtc_state,
to_intel_plane_state(plane->state));
} else {
trace_intel_disable_plane(plane, to_intel_crtc(crtc));
- intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
+ intel_plane->disable_plane(intel_plane, crtc_state);
}
intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
@@ -13655,6 +13816,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
out_free:
+ if (new_crtc_state)
+ intel_crtc_destroy_state(crtc, &new_crtc_state->base);
if (ret)
intel_plane_destroy_state(plane, new_plane_state);
else
@@ -13695,176 +13858,90 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
return i9xx_plane == PLANE_A;
}
-static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- if (!HAS_FBC(dev_priv))
- return false;
-
- return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
-}
-
-bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- /*
- * FIXME: ICL requires two hardware planes for scanning out NV12
- * framebuffers. Do not advertize support until this is implemented.
- */
- if (INTEL_GEN(dev_priv) >= 11)
- return false;
-
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return false;
-
- if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
- return false;
-
- if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
- return false;
-
- return true;
-}
-
static struct intel_plane *
intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct intel_plane *primary = NULL;
- struct intel_plane_state *state = NULL;
+ struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
- const uint32_t *intel_primary_formats;
unsigned int supported_rotations;
- unsigned int num_formats;
- const uint64_t *modifiers;
+ unsigned int possible_crtcs;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
int ret;
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (!primary) {
- ret = -ENOMEM;
- goto fail;
- }
-
- state = intel_create_plane_state(&primary->base);
- if (!state) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (INTEL_GEN(dev_priv) >= 9)
+ return skl_universal_plane_create(dev_priv, pipe,
+ PLANE_PRIMARY);
- primary->base.state = &state->base;
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
- if (INTEL_GEN(dev_priv) >= 9)
- state->scaler_id = -1;
- primary->pipe = pipe;
+ plane->pipe = pipe;
/*
* On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
* port is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
- primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
+ plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
else
- primary->i9xx_plane = (enum i9xx_plane_id) pipe;
- primary->id = PLANE_PRIMARY;
- primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
+ plane->i9xx_plane = (enum i9xx_plane_id) pipe;
+ plane->id = PLANE_PRIMARY;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- if (INTEL_GEN(dev_priv) >= 9)
- primary->has_fbc = skl_plane_has_fbc(dev_priv,
- primary->pipe,
- primary->id);
- else
- primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
- primary->i9xx_plane);
-
- if (primary->has_fbc) {
+ plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
+ if (plane->has_fbc) {
struct intel_fbc *fbc = &dev_priv->fbc;
- fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
+ fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}
- if (INTEL_GEN(dev_priv) >= 9) {
- primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
- PLANE_PRIMARY);
-
- if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
- intel_primary_formats = skl_pri_planar_formats;
- num_formats = ARRAY_SIZE(skl_pri_planar_formats);
- } else {
- intel_primary_formats = skl_primary_formats;
- num_formats = ARRAY_SIZE(skl_primary_formats);
- }
-
- if (primary->has_ccs)
- modifiers = skl_format_modifiers_ccs;
- else
- modifiers = skl_format_modifiers_noccs;
-
- primary->max_stride = skl_plane_max_stride;
- primary->update_plane = skl_update_plane;
- primary->disable_plane = skl_disable_plane;
- primary->get_hw_state = skl_plane_get_hw_state;
- primary->check_plane = skl_plane_check;
-
- plane_funcs = &skl_plane_funcs;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- intel_primary_formats = i965_primary_formats;
+ if (INTEL_GEN(dev_priv) >= 4) {
+ formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
modifiers = i9xx_format_modifiers;
- primary->max_stride = i9xx_plane_max_stride;
- primary->update_plane = i9xx_update_plane;
- primary->disable_plane = i9xx_disable_plane;
- primary->get_hw_state = i9xx_plane_get_hw_state;
- primary->check_plane = i9xx_plane_check;
+ plane->max_stride = i9xx_plane_max_stride;
+ plane->update_plane = i9xx_update_plane;
+ plane->disable_plane = i9xx_disable_plane;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
plane_funcs = &i965_plane_funcs;
} else {
- intel_primary_formats = i8xx_primary_formats;
+ formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
modifiers = i9xx_format_modifiers;
- primary->max_stride = i9xx_plane_max_stride;
- primary->update_plane = i9xx_update_plane;
- primary->disable_plane = i9xx_disable_plane;
- primary->get_hw_state = i9xx_plane_get_hw_state;
- primary->check_plane = i9xx_plane_check;
+ plane->max_stride = i9xx_plane_max_stride;
+ plane->update_plane = i9xx_update_plane;
+ plane->disable_plane = i9xx_disable_plane;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
plane_funcs = &i8xx_plane_funcs;
}
- if (INTEL_GEN(dev_priv) >= 9)
- ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, plane_funcs,
- intel_primary_formats, num_formats,
- modifiers,
- DRM_PLANE_TYPE_PRIMARY,
- "plane 1%c", pipe_name(pipe));
- else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, plane_funcs,
- intel_primary_formats, num_formats,
- modifiers,
+ possible_crtcs = BIT(pipe);
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ possible_crtcs, plane_funcs,
+ formats, num_formats, modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
- ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, plane_funcs,
- intel_primary_formats, num_formats,
- modifiers,
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ possible_crtcs, plane_funcs,
+ formats, num_formats, modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane %c",
- plane_name(primary->i9xx_plane));
+ plane_name(plane->i9xx_plane));
if (ret)
goto fail;
- if (INTEL_GEN(dev_priv) >= 10) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
- DRM_MODE_REFLECT_X;
- } else if (INTEL_GEN(dev_priv) >= 9) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
- } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X;
@@ -13876,26 +13953,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
}
if (INTEL_GEN(dev_priv) >= 4)
- drm_plane_create_rotation_property(&primary->base,
+ drm_plane_create_rotation_property(&plane->base,
DRM_MODE_ROTATE_0,
supported_rotations);
- if (INTEL_GEN(dev_priv) >= 9)
- drm_plane_create_color_properties(&primary->base,
- BIT(DRM_COLOR_YCBCR_BT601) |
- BIT(DRM_COLOR_YCBCR_BT709),
- BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
- BIT(DRM_COLOR_YCBCR_FULL_RANGE),
- DRM_COLOR_YCBCR_BT709,
- DRM_COLOR_YCBCR_LIMITED_RANGE);
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
- drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
-
- return primary;
+ return plane;
fail:
- kfree(state);
- kfree(primary);
+ intel_plane_free(plane);
return ERR_PTR(ret);
}
@@ -13904,23 +13971,13 @@ static struct intel_plane *
intel_cursor_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct intel_plane *cursor = NULL;
- struct intel_plane_state *state = NULL;
+ unsigned int possible_crtcs;
+ struct intel_plane *cursor;
int ret;
- cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
- if (!cursor) {
- ret = -ENOMEM;
- goto fail;
- }
-
- state = intel_create_plane_state(&cursor->base);
- if (!state) {
- ret = -ENOMEM;
- goto fail;
- }
-
- cursor->base.state = &state->base;
+ cursor = intel_plane_alloc();
+ if (IS_ERR(cursor))
+ return cursor;
cursor->pipe = pipe;
cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
@@ -13947,8 +14004,10 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
cursor->cursor.size = ~0;
+ possible_crtcs = BIT(pipe);
+
ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- 0, &intel_cursor_plane_funcs,
+ possible_crtcs, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
cursor_format_modifiers,
@@ -13963,16 +14022,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180);
- if (INTEL_GEN(dev_priv) >= 9)
- state->scaler_id = -1;
-
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
return cursor;
fail:
- kfree(state);
- kfree(cursor);
+ intel_plane_free(cursor);
return ERR_PTR(ret);
}
@@ -13993,7 +14048,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct intel_scaler *scaler = &scaler_state->scalers[i];
scaler->in_use = 0;
- scaler->mode = PS_SCALER_MODE_DYN;
+ scaler->mode = 0;
}
scaler_state->scaler_id = -1;
@@ -14088,18 +14143,6 @@ fail:
return ret;
}
-enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
-{
- struct drm_device *dev = connector->base.dev;
-
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-
- if (!connector->base.state->crtc)
- return INVALID_PIPE;
-
- return to_intel_crtc(connector->base.state->crtc)->pipe;
-}
-
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -14216,7 +14259,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_pps_init(dev_priv);
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return;
/*
@@ -14236,6 +14279,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_D);
intel_ddi_init(dev_priv, PORT_E);
intel_ddi_init(dev_priv, PORT_F);
+ icl_dsi_init(dev_priv);
} else if (IS_GEN9_LP(dev_priv)) {
/*
* FIXME: Broxton doesn't support port detection via the
@@ -14458,7 +14502,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
static
u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
- uint64_t fb_modifier, uint32_t pixel_format)
+ u32 pixel_format, u64 fb_modifier)
{
struct intel_crtc *crtc;
struct intel_plane *plane;
@@ -14480,7 +14524,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_framebuffer *fb = &intel_fb->base;
- struct drm_format_name_buf format_name;
u32 pitch_limit;
unsigned int tiling, stride;
int ret = -EINVAL;
@@ -14511,33 +14554,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
}
- /* Passed in modifier sanity checking. */
- switch (mode_cmd->modifier[0]) {
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- break;
- default:
- DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
- goto err;
- }
- /* fall through */
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- if (INTEL_GEN(dev_priv) < 9) {
- DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
- mode_cmd->modifier[0]);
- goto err;
- }
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
- DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
+ if (!drm_any_plane_has_format(&dev_priv->drm,
+ mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name),
mode_cmd->modifier[0]);
goto err;
}
@@ -14552,8 +14576,8 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
- mode_cmd->pixel_format);
+ pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
@@ -14572,69 +14596,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- /* Reject formats not supported by any plane early. */
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- break;
- case DRM_FORMAT_XRGB1555:
- if (INTEL_GEN(dev_priv) > 3) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_ABGR8888:
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- INTEL_GEN(dev_priv) < 9) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- if (INTEL_GEN(dev_priv) < 4) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_ABGR2101010:
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_VYUY:
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_NV12:
- if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
- IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format,
- &format_name));
- goto err;
- }
- break;
- default:
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
-
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
if (mode_cmd->offsets[0] != 0)
goto err;
@@ -14646,7 +14607,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
fb->height < SKL_MIN_YUV_420_SRC_H ||
(fb->width % 4) != 0 || (fb->height % 4) != 0)) {
DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
- return -EINVAL;
+ goto err;
}
for (i = 0; i < fb->format->num_planes; i++) {
@@ -14906,174 +14867,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.update_crtcs = intel_update_crtcs;
}
-/*
- * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
- */
-static void quirk_ssc_force_disable(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
- DRM_INFO("applying lvds SSC disable quirk\n");
-}
-
-/*
- * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
- * brightness value
- */
-static void quirk_invert_brightness(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
- DRM_INFO("applying inverted panel brightness quirk\n");
-}
-
-/* Some VBT's incorrectly indicate no backlight is present */
-static void quirk_backlight_present(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
- DRM_INFO("applying backlight present quirk\n");
-}
-
-/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
- * which is 300 ms greater than eDP spec T12 min.
- */
-static void quirk_increase_t12_delay(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
- DRM_INFO("Applying T12 delay quirk\n");
-}
-
-/*
- * GeminiLake NUC HDMI outputs require additional off time
- * this allows the onboard retimer to correctly sync to signal
- */
-static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
- DRM_INFO("Applying Increase DDI Disabled quirk\n");
-}
-
-struct intel_quirk {
- int device;
- int subsystem_vendor;
- int subsystem_device;
- void (*hook)(struct drm_device *dev);
-};
-
-/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
-struct intel_dmi_quirk {
- void (*hook)(struct drm_device *dev);
- const struct dmi_system_id (*dmi_id_list)[];
-};
-
-static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
-{
- DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
- return 1;
-}
-
-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
- {
- .dmi_id_list = &(const struct dmi_system_id[]) {
- {
- .callback = intel_dmi_reverse_brightness,
- .ident = "NCR Corporation",
- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, ""),
- },
- },
- { } /* terminating entry */
- },
- .hook = quirk_invert_brightness,
- },
-};
-
-static struct intel_quirk intel_quirks[] = {
- /* Lenovo U160 cannot use SSC on LVDS */
- { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
-
- /* Sony Vaio Y cannot use SSC on LVDS */
- { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
-
- /* Acer Aspire 5734Z must invert backlight brightness */
- { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
-
- /* Acer/eMachines G725 */
- { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
-
- /* Acer/eMachines e725 */
- { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
-
- /* Acer/Packard Bell NCL20 */
- { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
-
- /* Acer Aspire 4736Z */
- { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
-
- /* Acer Aspire 5336 */
- { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
-
- /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
- { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
-
- /* Acer C720 Chromebook (Core i3 4005U) */
- { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
-
- /* Apple Macbook 2,1 (Core 2 T7400) */
- { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
-
- /* Apple Macbook 4,1 */
- { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
-
- /* Toshiba CB35 Chromebook (Celeron 2955U) */
- { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
-
- /* HP Chromebook 14 (Celeron 2955U) */
- { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
-
- /* Dell Chromebook 11 */
- { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
-
- /* Dell Chromebook 11 (2015 version) */
- { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
-
- /* Toshiba Satellite P50-C-18C */
- { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
-
- /* GeminiLake NUC */
- { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
- { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
- /* ASRock ITX*/
- { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
- { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
-};
-
-static void intel_init_quirks(struct drm_device *dev)
-{
- struct pci_dev *d = dev->pdev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
- struct intel_quirk *q = &intel_quirks[i];
-
- if (d->device == q->device &&
- (d->subsystem_vendor == q->subsystem_vendor ||
- q->subsystem_vendor == PCI_ANY_ID) &&
- (d->subsystem_device == q->subsystem_device ||
- q->subsystem_device == PCI_ANY_ID))
- q->hook(dev);
- }
- for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
- if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
- intel_dmi_quirks[i].hook(dev);
- }
-}
-
/* Disable the VGA plane that we never use */
static void i915_disable_vga(struct drm_i915_private *dev_priv)
{
@@ -15233,6 +15026,14 @@ retry:
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
goto out;
+
+ /*
+ * FIXME hack to force a LUT update to avoid the
+ * plane update forcing the pipe gamma on without
+ * having a proper LUT loaded. Remove once we
+ * have readout for pipe gamma enable.
+ */
+ crtc_state->color_mgmt_changed = true;
}
}
@@ -15279,7 +15080,9 @@ int intel_modeset_init(struct drm_device *dev)
INIT_WORK(&dev_priv->atomic_helper.free_work,
intel_atomic_helper_free_state_worker);
- intel_init_quirks(dev);
+ intel_init_quirks(dev_priv);
+
+ intel_fbc_init(dev_priv);
intel_init_pm(dev_priv);
@@ -15511,8 +15314,8 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
if (pipe == crtc->pipe)
continue;
- DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
- plane->base.name);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
+ plane->base.base.id, plane->base.name);
plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
intel_plane_disable_noatomic(plane_crtc, plane);
@@ -15553,7 +15356,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/* Clear any frame start delays used for debugging left by the BIOS */
if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
@@ -15563,7 +15367,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
}
- if (crtc->active) {
+ if (crtc_state->base.active) {
struct intel_plane *plane;
/* Disable everything but the primary plane */
@@ -15579,10 +15383,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
- if (crtc->active && !intel_crtc_has_encoders(crtc))
+ if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base, ctx);
- if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
+ if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
@@ -15613,6 +15417,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_connector *connector;
/* We need to check both for a crtc link (meaning that the
@@ -15636,7 +15441,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
encoder->base.name);
- encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
+ if (encoder->disable)
+ encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
if (encoder->post_disable)
encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
}
@@ -15653,6 +15459,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
/* notify opregion of the sanitized encoder state */
intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_sanitize_encoder_pll_mapping(encoder);
}
void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -15701,6 +15510,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
crtc_state = to_intel_crtc_state(crtc->base.state);
intel_set_plane_visible(crtc_state, plane_state, visible);
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
+ plane->base.base.id, plane->base.name,
+ enableddisabled(visible), pipe_name(pipe));
}
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -15853,7 +15666,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_calc_timestamping_constants(&crtc->base,
&crtc_state->base.adjusted_mode);
- update_scanline_offset(crtc);
+ update_scanline_offset(crtc_state);
}
dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
@@ -15908,6 +15721,65 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
}
}
+static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+ enum port port, i915_reg_t hdmi_reg)
+{
+ u32 val = I915_READ(hdmi_reg);
+
+ if (val & SDVO_ENABLE ||
+ (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
+ return;
+
+ DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
+ port_name(port));
+
+ val &= ~SDVO_PIPE_SEL_MASK;
+ val |= SDVO_PIPE_SEL(PIPE_A);
+
+ I915_WRITE(hdmi_reg, val);
+}
+
+static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+ enum port port, i915_reg_t dp_reg)
+{
+ u32 val = I915_READ(dp_reg);
+
+ if (val & DP_PORT_EN ||
+ (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
+ return;
+
+ DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
+ port_name(port));
+
+ val &= ~DP_PIPE_SEL_MASK;
+ val |= DP_PIPE_SEL(PIPE_A);
+
+ I915_WRITE(dp_reg, val);
+}
+
+static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+{
+ /*
+ * The BIOS may select transcoder B on some of the PCH
+ * ports even it doesn't enable the port. This would trip
+ * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
+ * Sanitize the transcoder select bits to prevent that. We
+ * assume that the BIOS never actually enabled the port,
+ * because if it did we'd actually have to toggle the port
+ * on and back off to make the transcoder A select stick
+ * (see. intel_dp_link_down(), intel_disable_hdmi(),
+ * intel_disable_sdvo()).
+ */
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+
+ /* PCH SDVOB multiplex with HDMIB */
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+}
+
/* Scan out the current hw modeset state,
* and sanitizes it to the current state
*/
@@ -15917,6 +15789,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
struct intel_encoder *encoder;
int i;
@@ -15928,6 +15801,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
/* HW state is read out, now we need to sanitize this mess. */
get_encoder_power_domains(dev_priv);
+ if (HAS_PCH_IBX(dev_priv))
+ ibx_sanitize_pch_ports(dev_priv);
+
/*
* intel_sanitize_plane_mapping() may need to do vblank
* waits, so we need vblank interrupts restored beforehand.
@@ -15935,7 +15811,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
for_each_intel_crtc(&dev_priv->drm, crtc) {
drm_crtc_vblank_reset(&crtc->base);
- if (crtc->active)
+ if (crtc->base.state->active)
drm_crtc_vblank_on(&crtc->base);
}
@@ -15945,8 +15821,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_sanitize_encoder(encoder);
for_each_intel_crtc(&dev_priv->drm, crtc) {
+ crtc_state = to_intel_crtc_state(crtc->base.state);
intel_sanitize_crtc(crtc, ctx);
- intel_dump_pipe_config(crtc, crtc->config,
+ intel_dump_pipe_config(crtc, crtc_state,
"[setup_hw_state]");
}
@@ -15980,7 +15857,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
for_each_intel_crtc(dev, crtc) {
u64 put_domains;
- put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+ put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
@@ -16024,29 +15902,6 @@ void intel_display_resume(struct drm_device *dev)
drm_atomic_state_put(state);
}
-int intel_connector_register(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- int ret;
-
- ret = intel_backlight_device_register(intel_connector);
- if (ret)
- goto err;
-
- return 0;
-
-err:
- return ret;
-}
-
-void intel_connector_unregister(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- intel_backlight_device_unregister(intel_connector);
- intel_panel_destroy_backlight(connector);
-}
-
static void intel_hpd_poll_fini(struct drm_device *dev)
{
struct intel_connector *connector;
@@ -16057,9 +15912,9 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->modeset_retry_work.func)
cancel_work_sync(&connector->modeset_retry_work);
- if (connector->hdcp_shim) {
- cancel_delayed_work_sync(&connector->hdcp_check_work);
- cancel_work_sync(&connector->hdcp_prop_work);
+ if (connector->hdcp.shim) {
+ cancel_delayed_work_sync(&connector->hdcp.check_work);
+ cancel_work_sync(&connector->hdcp.prop_work);
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -16099,18 +15954,13 @@ void intel_modeset_cleanup(struct drm_device *dev)
drm_mode_config_cleanup(dev);
- intel_cleanup_overlay(dev_priv);
+ intel_overlay_cleanup(dev_priv);
intel_teardown_gmbus(dev_priv);
destroy_workqueue(dev_priv->modeset_wq);
-}
-void intel_connector_attach_encoder(struct intel_connector *connector,
- struct intel_encoder *encoder)
-{
- connector->encoder = encoder;
- drm_connector_attach_encoder(&connector->base, &encoder->base);
+ intel_fbc_cleanup_cfb(dev_priv);
}
/*
@@ -16200,7 +16050,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
};
int i;
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 9fac67e31205..4262452963b3 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -43,6 +43,11 @@ enum i915_gpio {
GPIOM,
};
+/*
+ * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
+ * rest have consecutive values and match the enum values of transcoders
+ * with a 1:1 transcoder -> pipe mapping.
+ */
enum pipe {
INVALID_PIPE = -1,
@@ -57,12 +62,25 @@ enum pipe {
#define pipe_name(p) ((p) + 'A')
enum transcoder {
- TRANSCODER_A = 0,
- TRANSCODER_B,
- TRANSCODER_C,
+ /*
+ * The following transcoders have a 1:1 transcoder -> pipe mapping,
+ * keep their values fixed: the code assumes that TRANSCODER_A=0, the
+ * rest have consecutive values and match the enum values of the pipes
+ * they map to.
+ */
+ TRANSCODER_A = PIPE_A,
+ TRANSCODER_B = PIPE_B,
+ TRANSCODER_C = PIPE_C,
+
+ /*
+ * The following transcoders can map to any pipe, their enum value
+ * doesn't need to stay fixed.
+ */
TRANSCODER_EDP,
- TRANSCODER_DSI_A,
- TRANSCODER_DSI_C,
+ TRANSCODER_DSI_0,
+ TRANSCODER_DSI_1,
+ TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */
+ TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */
I915_MAX_TRANSCODERS
};
@@ -120,6 +138,9 @@ enum plane_id {
PLANE_SPRITE0,
PLANE_SPRITE1,
PLANE_SPRITE2,
+ PLANE_SPRITE3,
+ PLANE_SPRITE4,
+ PLANE_SPRITE5,
PLANE_CURSOR,
I915_MAX_PLANES,
@@ -221,6 +242,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_EDP_VDSC,
POWER_DOMAIN_TRANSCODER_DSI_A,
POWER_DOMAIN_TRANSCODER_DSI_C,
POWER_DOMAIN_PORT_DDI_A_LANES,
@@ -363,7 +385,7 @@ struct intel_link_m_n {
(__dev_priv)->power_domains.power_well_count; \
(__power_well)++)
-#define for_each_power_well_rev(__dev_priv, __power_well) \
+#define for_each_power_well_reverse(__dev_priv, __power_well) \
for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
(__dev_priv)->power_domains.power_well_count - 1; \
(__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
@@ -373,10 +395,18 @@ struct intel_link_m_n {
for_each_power_well(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
-#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well_rev(__dev_priv, __power_well) \
+#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well_reverse(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
+#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
@@ -402,10 +432,18 @@ struct intel_link_m_n {
(__i)++) \
for_each_if(plane)
-void intel_link_compute_m_n(int bpp, int nlanes,
+#define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
+void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n);
-
bool is_ccs_modifier(u64 modifier);
#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 13f9b56a9ce7..fdd2cbc56fa3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -45,6 +45,19 @@
#define DP_DPRX_ESI_LEN 14
+/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
+#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
+#define DP_DSC_MIN_SUPPORTED_BPC 8
+#define DP_DSC_MAX_SUPPORTED_BPC 10
+
+/* DP DSC throughput values used for slice count calculations KPixels/s */
+#define DP_DSC_PEAK_PIXEL_RATE 2720000
+#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
+#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
+
+/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
+#define DP_DSC_FEC_OVERHEAD_FACTOR 976
+
/* Compliance test status bits */
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
@@ -93,6 +106,14 @@ static const struct dp_link_dpll chv_dpll[] = {
{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
};
+/* Constants for DP DSC configurations */
+static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
+
+/* With Single pipe configuration, HW is capable of supporting maximum
+ * of 4 slices per line.
+ */
+static const u8 valid_dsc_slicecount[] = {1, 2, 4};
+
/**
* intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -222,138 +243,6 @@ intel_dp_link_required(int pixel_clock, int bpp)
return DIV_ROUND_UP(pixel_clock * bpp, 8);
}
-void icl_program_mg_dp_mode(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum port port = intel_dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 ln0, ln1, lane_info;
-
- if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
- return;
-
- ln0 = I915_READ(MG_DP_MODE(port, 0));
- ln1 = I915_READ(MG_DP_MODE(port, 1));
-
- switch (intel_dig_port->tc_type) {
- case TC_PORT_TYPEC:
- ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
- ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
-
- lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
- DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
-
- switch (lane_info) {
- case 0x1:
- case 0x4:
- break;
- case 0x2:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0x3:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0x8:
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0xC:
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0xF:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- default:
- MISSING_CASE(lane_info);
- }
- break;
-
- case TC_PORT_LEGACY:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
- break;
-
- default:
- MISSING_CASE(intel_dig_port->tc_type);
- return;
- }
-
- I915_WRITE(MG_DP_MODE(port, 0), ln0);
- I915_WRITE(MG_DP_MODE(port, 1), ln1);
-}
-
-void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
- u32 val;
- int i;
-
- if (tc_port == PORT_TC_NONE)
- return;
-
- for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
- val = I915_READ(mg_regs[i]);
- val |= MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING;
- I915_WRITE(mg_regs[i], val);
- }
-
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING;
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
-void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
- u32 val;
- int i;
-
- if (tc_port == PORT_TC_NONE)
- return;
-
- for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
- val = I915_READ(mg_regs[i]);
- val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING);
- I915_WRITE(mg_regs[i], val);
- }
-
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING);
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
int
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
{
@@ -455,7 +344,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 10) {
source_rates = cnl_rates;
size = ARRAY_SIZE(cnl_rates);
- if (INTEL_GEN(dev_priv) == 10)
+ if (IS_GEN10(dev_priv))
max_rate = cnl_max_source_rate(intel_dp);
else
max_rate = icl_max_source_rate(intel_dp);
@@ -616,9 +505,12 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
int max_dotclk;
+ u16 dsc_max_output_bpp = 0;
+ u8 dsc_slice_count = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -641,7 +533,33 @@ intel_dp_mode_valid(struct drm_connector *connector,
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock, 18);
- if (mode_rate > max_rate || target_clock > max_dotclk)
+ /*
+ * Output bpp is stored in 6.4 format so right shift by 4 to get the
+ * integer value since we support only integer values of bpp.
+ */
+ if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
+ drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
+ if (intel_dp_is_edp(intel_dp)) {
+ dsc_max_output_bpp =
+ drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
+ dsc_slice_count =
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ true);
+ } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(max_link_clock,
+ max_lanes,
+ target_clock,
+ mode->hdisplay) >> 4;
+ dsc_slice_count =
+ intel_dp_dsc_get_slice_count(intel_dp,
+ target_clock,
+ mode->hdisplay);
+ }
+ }
+
+ if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
+ target_clock > max_dotclk)
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
@@ -690,7 +608,8 @@ static void pps_lock(struct intel_dp *intel_dp)
* See intel_power_sequencer_reset() why we need
* a power domain reference here.
*/
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dp_to_dig_port(intel_dp)));
mutex_lock(&dev_priv->pps_mutex);
}
@@ -701,7 +620,8 @@ static void pps_unlock(struct intel_dp *intel_dp)
mutex_unlock(&dev_priv->pps_mutex);
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(dp_to_dig_port(intel_dp)));
}
static void
@@ -1156,6 +1076,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
if (index)
return 0;
@@ -1165,7 +1086,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* like to run at 2MHz. So, take the cdclk or PCH rawclk value and
* divide by 2000 and use that
*/
- if (intel_dp->aux_ch == AUX_CH_A)
+ if (dig_port->aux_ch == AUX_CH_A)
return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
else
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
@@ -1174,8 +1095,9 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -1503,80 +1425,12 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
}
-static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
- enum aux_ch aux_ch;
-
- if (!info->alternate_aux_channel) {
- aux_ch = (enum aux_ch) port;
-
- DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
- aux_ch_name(aux_ch), port_name(port));
- return aux_ch;
- }
-
- switch (info->alternate_aux_channel) {
- case DP_AUX_A:
- aux_ch = AUX_CH_A;
- break;
- case DP_AUX_B:
- aux_ch = AUX_CH_B;
- break;
- case DP_AUX_C:
- aux_ch = AUX_CH_C;
- break;
- case DP_AUX_D:
- aux_ch = AUX_CH_D;
- break;
- case DP_AUX_E:
- aux_ch = AUX_CH_E;
- break;
- case DP_AUX_F:
- aux_ch = AUX_CH_F;
- break;
- default:
- MISSING_CASE(info->alternate_aux_channel);
- aux_ch = AUX_CH_A;
- break;
- }
-
- DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
- aux_ch_name(aux_ch), port_name(port));
-
- return aux_ch;
-}
-
-static enum intel_display_power_domain
-intel_aux_power_domain(struct intel_dp *intel_dp)
-{
- switch (intel_dp->aux_ch) {
- case AUX_CH_A:
- return POWER_DOMAIN_AUX_A;
- case AUX_CH_B:
- return POWER_DOMAIN_AUX_B;
- case AUX_CH_C:
- return POWER_DOMAIN_AUX_C;
- case AUX_CH_D:
- return POWER_DOMAIN_AUX_D;
- case AUX_CH_E:
- return POWER_DOMAIN_AUX_E;
- case AUX_CH_F:
- return POWER_DOMAIN_AUX_F;
- default:
- MISSING_CASE(intel_dp->aux_ch);
- return POWER_DOMAIN_AUX_A;
- }
-}
static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_B:
@@ -1592,7 +1446,8 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_B:
@@ -1608,7 +1463,8 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_A:
@@ -1626,7 +1482,8 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_A:
@@ -1644,7 +1501,8 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_A:
@@ -1663,7 +1521,8 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_A:
@@ -1689,10 +1548,8 @@ static void
intel_dp_aux_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
-
- intel_dp->aux_ch = intel_aux_ch(intel_dp);
- intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
if (INTEL_GEN(dev_priv) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
@@ -1853,6 +1710,41 @@ struct link_config_limits {
int min_bpp, max_bpp;
};
+static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return INTEL_GEN(dev_priv) >= 11 &&
+ pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
+ drm_dp_sink_supports_fec(intel_dp->fec_capable);
+}
+
+static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return INTEL_GEN(dev_priv) >= 10 &&
+ pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
+ return false;
+
+ return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
+ drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
+}
+
static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
{
@@ -1951,14 +1843,158 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return false;
}
+/* Optimize link config in order: max bpp, min lanes, min clock */
+static bool
+intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ const struct link_config_limits *limits)
+{
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ int bpp, clock, lane_count;
+ int mode_rate, link_clock, link_avail;
+
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+ bpp);
+
+ for (lane_count = limits->min_lane_count;
+ lane_count <= limits->max_lane_count;
+ lane_count <<= 1) {
+ for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+ link_clock = intel_dp->common_rates[clock];
+ link_avail = intel_dp_max_data_rate(link_clock,
+ lane_count);
+
+ if (mode_rate <= link_avail) {
+ pipe_config->lane_count = lane_count;
+ pipe_config->pipe_bpp = bpp;
+ pipe_config->port_clock = link_clock;
+
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
+{
+ int i, num_bpc;
+ u8 dsc_bpc[3] = {0};
+
+ num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
+ dsc_bpc);
+ for (i = 0; i < num_bpc; i++) {
+ if (dsc_max_bpc >= dsc_bpc[i])
+ return dsc_bpc[i] * 3;
+ }
+
+ return 0;
+}
+
+static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ u8 dsc_max_bpc;
+ int pipe_bpp;
+
+ if (!intel_dp_supports_dsc(intel_dp, pipe_config))
+ return false;
+
+ dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
+ conn_state->max_requested_bpc);
+
+ pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
+ if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
+ DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
+ return false;
+ }
+
+ /*
+ * For now enable DSC for max bpp, max link rate, max lane count.
+ * Optimize this later for the minimum possible link rate/lane count
+ * with DSC enabled for the requested mode.
+ */
+ pipe_config->pipe_bpp = pipe_bpp;
+ pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
+ pipe_config->lane_count = limits->max_lane_count;
+
+ if (intel_dp_is_edp(intel_dp)) {
+ pipe_config->dsc_params.compressed_bpp =
+ min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
+ pipe_config->pipe_bpp);
+ pipe_config->dsc_params.slice_count =
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ true);
+ } else {
+ u16 dsc_max_output_bpp;
+ u8 dsc_dp_slice_count;
+
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay);
+ dsc_dp_slice_count =
+ intel_dp_dsc_get_slice_count(intel_dp,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay);
+ if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
+ DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
+ return false;
+ }
+ pipe_config->dsc_params.compressed_bpp = min_t(u16,
+ dsc_max_output_bpp >> 4,
+ pipe_config->pipe_bpp);
+ pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
+ }
+ /*
+ * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
+ * is greater than the maximum Cdclock and if slice count is even
+ * then we need to use 2 VDSC instances.
+ */
+ if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
+ if (pipe_config->dsc_params.slice_count > 1) {
+ pipe_config->dsc_params.dsc_split = true;
+ } else {
+ DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
+ return false;
+ }
+ }
+ if (intel_dp_compute_dsc_params(intel_dp, pipe_config) < 0) {
+ DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
+ "Compressed BPP = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp);
+ return false;
+ }
+ pipe_config->dsc_params.compression_enable = true;
+ DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
+ "Compressed Bpp = %d Slice Count = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp,
+ pipe_config->dsc_params.slice_count);
+
+ return true;
+}
+
static bool
intel_dp_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct link_config_limits limits;
int common_len;
+ bool ret;
common_len = intel_dp_common_len_rate_limit(intel_dp,
intel_dp->max_link_rate);
@@ -1975,13 +2011,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_bpp = 6 * 3;
limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
- if (intel_dp_is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
/*
* Use the maximum clock and number of lanes the eDP panel
- * advertizes being capable of. The panels are generally
- * designed to support only a single clock and lane
- * configuration, and typically these values correspond to the
- * native resolution of the panel.
+ * advertizes being capable of. The eDP 1.3 and earlier panels
+ * are generally designed to support only a single clock and
+ * lane configuration, and typically these values correspond to
+ * the native resolution of the panel. With eDP 1.4 rate select
+ * and DSC, this is decreasingly the case, and we need to be
+ * able to select less than maximum link config.
*/
limits.min_lane_count = limits.max_lane_count;
limits.min_clock = limits.max_clock;
@@ -1995,23 +2033,52 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp->common_rates[limits.max_clock],
limits.max_bpp, adjusted_mode->crtc_clock);
- /*
- * Optimize for slow and wide. This is the place to add alternative
- * optimization policy.
- */
- if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits))
- return false;
-
- DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp);
-
- DRM_DEBUG_KMS("DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->pipe_bpp),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
+ if (intel_dp_is_edp(intel_dp))
+ /*
+ * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
+ * section A.1: "It is recommended that the minimum number of
+ * lanes be used, using the minimum link rate allowed for that
+ * lane configuration."
+ *
+ * Note that we use the max clock and lane count for eDP 1.3 and
+ * earlier, and fast vs. wide is irrelevant.
+ */
+ ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
+ &limits);
+ else
+ /* Optimize for slow and wide. */
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
+ &limits);
+
+ /* enable compression if the mode doesn't fit available BW */
+ if (!ret) {
+ if (!intel_dp_dsc_compute_config(intel_dp, pipe_config,
+ conn_state, &limits))
+ return false;
+ }
+
+ if (pipe_config->dsc_params.compression_enable) {
+ DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp);
+
+ DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->dsc_params.compressed_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
+ } else {
+ DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp);
+ DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->pipe_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
+ }
return true;
}
@@ -2023,6 +2090,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
enum port port = encoder->port;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
@@ -2034,6 +2102,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ if (lspcon->active)
+ lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
+
pipe_config->has_drrs = false;
if (IS_G4X(dev_priv) || port == PORT_A)
pipe_config->has_audio = false;
@@ -2072,7 +2144,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
- if (!intel_dp_compute_link_config(encoder, pipe_config))
+ pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
+ intel_dp_supports_fec(intel_dp, pipe_config);
+
+ if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state))
return false;
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
@@ -2090,11 +2165,20 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
}
- intel_link_compute_m_n(pipe_config->pipe_bpp, pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ if (!pipe_config->dsc_params.compression_enable)
+ intel_link_compute_m_n(pipe_config->pipe_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ constant_n);
+ else
+ intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ constant_n);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -2338,7 +2422,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
if (edp_have_panel_vdd(intel_dp))
return need_to_disable;
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
port_name(intel_dig_port->base.port));
@@ -2424,7 +2509,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->panel_power_off_time = ktime_get_boottime();
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
}
static void edp_panel_vdd_work(struct work_struct *__work)
@@ -2537,6 +2623,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
static void edp_panel_off(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2546,10 +2633,10 @@ static void edp_panel_off(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ port_name(dig_port->base.port));
WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ port_name(dig_port->base.port));
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2568,7 +2655,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
intel_dp->panel_power_off_time = ktime_get_boottime();
/* We got a reference when we enabled the VDD. */
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port));
}
void intel_edp_panel_off(struct intel_dp *intel_dp)
@@ -2788,6 +2875,22 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ int ret;
+
+ if (!crtc_state->dsc_params.compression_enable)
+ return;
+
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
+ enable ? DP_DECOMPRESSION_EN : 0);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
+ enable ? "enable" : "disable");
+}
+
/* If the sink supports it, try to set the power state appropriately */
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
@@ -3900,6 +4003,40 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
return intel_dp->dpcd[DP_DPCD_REV] != 0;
}
+static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
+{
+ /*
+ * Clear the cached register set to avoid using stale values
+ * for the sinks that do not support DSC.
+ */
+ memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
+
+ /* Clear fec_capable to avoid using stale values */
+ intel_dp->fec_capable = 0;
+
+ /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
+ intel_dp->edp_dpcd[0] >= DP_EDP_14) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
+ intel_dp->dsc_dpcd,
+ sizeof(intel_dp->dsc_dpcd)) < 0)
+ DRM_ERROR("Failed to read DPCD register 0x%x\n",
+ DP_DSC_SUPPORT);
+
+ DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
+ (int)sizeof(intel_dp->dsc_dpcd),
+ intel_dp->dsc_dpcd);
+
+ /* FEC is supported only on DP 1.4 */
+ if (!intel_dp_is_edp(intel_dp) &&
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
+ &intel_dp->fec_capable) < 0)
+ DRM_ERROR("Failed to read FEC DPCD register\n");
+
+ DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
+ }
+}
+
static bool
intel_edp_init_dpcd(struct intel_dp *intel_dp)
{
@@ -3976,6 +4113,10 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp_set_common_rates(intel_dp);
+ /* Read the eDP DSC DPCD registers */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ intel_dp_get_dsc_sink_cap(intel_dp);
+
return true;
}
@@ -3983,8 +4124,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
- u8 sink_count;
-
if (!intel_dp_read_dpcd(intel_dp))
return false;
@@ -3994,25 +4133,35 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
intel_dp_set_common_rates(intel_dp);
}
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0)
- return false;
-
/*
- * Sink count can change between short pulse hpd hence
- * a member variable in intel_dp will track any changes
- * between short pulse interrupts.
+ * Some eDP panels do not set a valid value for sink count, that is why
+ * it don't care about read it here and in intel_edp_init_dpcd().
*/
- intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count);
+ if (!intel_dp_is_edp(intel_dp)) {
+ u8 count;
+ ssize_t r;
- /*
- * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
- * a dongle is present but no display. Unless we require to know
- * if a dongle is present or not, we don't need to update
- * downstream port information. So, an early return here saves
- * time from performing other operations which are not required.
- */
- if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
- return false;
+ r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
+ if (r < 1)
+ return false;
+
+ /*
+ * Sink count can change between short pulse hpd hence
+ * a member variable in intel_dp will track any changes
+ * between short pulse interrupts.
+ */
+ intel_dp->sink_count = DP_GET_SINK_COUNT(count);
+
+ /*
+ * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+ * a dongle is present but no display. Unless we require to know
+ * if a dongle is present or not, we don't need to update
+ * downstream port information. So, an early return here saves
+ * time from performing other operations which are not required.
+ */
+ if (!intel_dp->sink_count)
+ return false;
+ }
if (!drm_dp_is_branch(intel_dp->dpcd))
return true; /* native DP sink */
@@ -4029,16 +4178,10 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
}
static bool
-intel_dp_can_mst(struct intel_dp *intel_dp)
+intel_dp_sink_can_mst(struct intel_dp *intel_dp)
{
u8 mstm_cap;
- if (!i915_modparams.enable_dp_mst)
- return false;
-
- if (!intel_dp->can_mst)
- return false;
-
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
@@ -4048,34 +4191,36 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
return mstm_cap & DP_MST_CAP;
}
+static bool
+intel_dp_can_mst(struct intel_dp *intel_dp)
+{
+ return i915_modparams.enable_dp_mst &&
+ intel_dp->can_mst &&
+ intel_dp_sink_can_mst(intel_dp);
+}
+
static void
intel_dp_configure_mst(struct intel_dp *intel_dp)
{
- if (!i915_modparams.enable_dp_mst)
- return;
+ struct intel_encoder *encoder =
+ &dp_to_dig_port(intel_dp)->base;
+ bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
+
+ DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
+ port_name(encoder->port), yesno(intel_dp->can_mst),
+ yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
if (!intel_dp->can_mst)
return;
- intel_dp->is_mst = intel_dp_can_mst(intel_dp);
-
- if (intel_dp->is_mst)
- DRM_DEBUG_KMS("Sink is MST capable\n");
- else
- DRM_DEBUG_KMS("Sink is not MST capable\n");
+ intel_dp->is_mst = sink_can_mst &&
+ i915_modparams.enable_dp_mst;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
}
static bool
-intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
-{
- return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector) == 1;
-}
-
-static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
@@ -4083,6 +4228,91 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
DP_DPRX_ESI_LEN;
}
+u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
+ int mode_clock, int mode_hdisplay)
+{
+ u16 bits_per_pixel, max_bpp_small_joiner_ram;
+ int i;
+
+ /*
+ * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
+ * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
+ * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
+ * for MST -> TimeSlotsPerMTP has to be calculated
+ */
+ bits_per_pixel = (link_clock * lane_count * 8 *
+ DP_DSC_FEC_OVERHEAD_FACTOR) /
+ mode_clock;
+
+ /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
+ max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
+ mode_hdisplay;
+
+ /*
+ * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
+ * check, output bpp from small joiner RAM check)
+ */
+ bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
+
+ /* Error out if the max bpp is less than smallest allowed valid bpp */
+ if (bits_per_pixel < valid_dsc_bpp[0]) {
+ DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
+ return 0;
+ }
+
+ /* Find the nearest match in the array of known BPPs from VESA */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+ if (bits_per_pixel < valid_dsc_bpp[i + 1])
+ break;
+ }
+ bits_per_pixel = valid_dsc_bpp[i];
+
+ /*
+ * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
+ * fractional part is 0
+ */
+ return bits_per_pixel << 4;
+}
+
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+ int mode_clock,
+ int mode_hdisplay)
+{
+ u8 min_slice_count, i;
+ int max_slice_width;
+
+ if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_0);
+ else
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_1);
+
+ max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
+ if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
+ DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
+ max_slice_width);
+ return 0;
+ }
+ /* Also take into account max slice width */
+ min_slice_count = min_t(uint8_t, min_slice_count,
+ DIV_ROUND_UP(mode_hdisplay,
+ max_slice_width));
+
+ /* Find the closest match to the valid slice count values */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
+ if (valid_dsc_slicecount[i] >
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ false))
+ break;
+ if (min_slice_count <= valid_dsc_slicecount[i])
+ return valid_dsc_slicecount[i];
+ }
+
+ DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+ return 0;
+}
+
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
int status = 0;
@@ -4341,6 +4571,17 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
if (!intel_dp->link_trained)
return false;
+ /*
+ * While PSR source HW is enabled, it will control main-link sending
+ * frames, enabling and disabling it so trying to do a retrain will fail
+ * as the link would or not be on or it could mix training patterns
+ * and frame data at the same time causing retrain to fail.
+ * Also when exiting PSR, HW will retrain the link anyways fixing
+ * any link status error.
+ */
+ if (intel_psr_enabled(intel_dp))
+ return false;
+
if (!intel_dp_get_link_status(intel_dp, link_status))
return false;
@@ -4403,7 +4644,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
/* Suppress underruns caused by re-training */
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- if (crtc->config->has_pch_encoder)
+ if (crtc_state->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), false);
@@ -4414,7 +4655,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
intel_wait_for_vblank(dev_priv, crtc->pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
- if (crtc->config->has_pch_encoder)
+ if (crtc_state->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), true);
@@ -4462,6 +4703,29 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
return changed;
}
+static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
+{
+ u8 val;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
+ return;
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
+
+ if (val & DP_AUTOMATED_TEST_REQUEST)
+ intel_dp_handle_test_request(intel_dp);
+
+ if (val & DP_CP_IRQ)
+ intel_hdcp_check_link(intel_dp->attached_connector);
+
+ if (val & DP_SINK_SPECIFIC_IRQ)
+ DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -4479,7 +4743,6 @@ static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 sink_irq_vector = 0;
u8 old_sink_count = intel_dp->sink_count;
bool ret;
@@ -4502,20 +4765,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
return false;
}
- /* Try to read the source of the interrupt */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
- sink_irq_vector != 0) {
- /* Clear interrupt source */
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector);
-
- if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
- intel_dp_handle_test_request(intel_dp);
- if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
- DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
- }
+ intel_dp_check_service_irq(intel_dp);
/* Handle CEC interrupts, if any */
drm_dp_cec_irq(&intel_dp->aux);
@@ -4810,6 +5060,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
type_str);
}
+static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port);
+
/*
* This function implements the first part of the Connect Flow described by our
* specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
@@ -4864,9 +5117,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
if (dig_port->tc_type == TC_PORT_TYPEC &&
!(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
- val = I915_READ(PORT_TX_DFLEXDPCSSS);
- val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
- I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
+ icl_tc_phy_disconnect(dev_priv, dig_port);
return false;
}
@@ -4881,21 +5132,24 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
struct intel_digital_port *dig_port)
{
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- u32 val;
- if (dig_port->tc_type != TC_PORT_LEGACY &&
- dig_port->tc_type != TC_PORT_TYPEC)
+ if (dig_port->tc_type == TC_PORT_UNKNOWN)
return;
/*
- * This function may be called many times in a row without an HPD event
- * in between, so try to avoid the write when we can.
+ * TBT disconnection flow is read the live status, what was done in
+ * caller.
*/
- val = I915_READ(PORT_TX_DFLEXDPCSSS);
- if (val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)) {
+ if (dig_port->tc_type == TC_PORT_TYPEC ||
+ dig_port->tc_type == TC_PORT_LEGACY) {
+ u32 val;
+
+ val = I915_READ(PORT_TX_DFLEXDPCSSS);
val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
}
+
+ dig_port->tc_type = TC_PORT_UNKNOWN;
}
/*
@@ -4945,19 +5199,14 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- switch (encoder->hpd_pin) {
- case HPD_PORT_A:
- case HPD_PORT_B:
+ if (intel_port_is_combophy(dev_priv, encoder->port))
return icl_combo_port_connected(dev_priv, dig_port);
- case HPD_PORT_C:
- case HPD_PORT_D:
- case HPD_PORT_E:
- case HPD_PORT_F:
+ else if (intel_port_is_tc(dev_priv, encoder->port))
return icl_tc_port_connected(dev_priv, dig_port);
- default:
+ else
MISSING_CASE(encoder->hpd_pin);
- return false;
- }
+
+ return false;
}
/*
@@ -4982,20 +5231,23 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
return g4x_digital_port_connected(encoder);
}
- if (IS_GEN5(dev_priv))
- return ilk_digital_port_connected(encoder);
- else if (IS_GEN6(dev_priv))
- return snb_digital_port_connected(encoder);
- else if (IS_GEN7(dev_priv))
- return ivb_digital_port_connected(encoder);
- else if (IS_GEN8(dev_priv))
- return bdw_digital_port_connected(encoder);
+ if (INTEL_GEN(dev_priv) >= 11)
+ return icl_digital_port_connected(encoder);
+ else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv))
+ return spt_digital_port_connected(encoder);
else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(encoder);
- else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
- return spt_digital_port_connected(encoder);
- else
- return icl_digital_port_connected(encoder);
+ else if (IS_GEN8(dev_priv))
+ return bdw_digital_port_connected(encoder);
+ else if (IS_GEN7(dev_priv))
+ return ivb_digital_port_connected(encoder);
+ else if (IS_GEN6(dev_priv))
+ return snb_digital_port_connected(encoder);
+ else if (IS_GEN5(dev_priv))
+ return ilk_digital_port_connected(encoder);
+
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ return false;
}
static struct edid *
@@ -5042,28 +5294,35 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
}
static int
-intel_dp_long_pulse(struct intel_connector *connector,
- struct drm_modeset_acquire_ctx *ctx)
+intel_dp_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
- u8 sink_irq_vector = 0;
+ enum intel_display_power_domain aux_domain =
+ intel_aux_power_domain(dig_port);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv, aux_domain);
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
- else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
+ else if (intel_digital_port_connected(encoder))
status = intel_dp_detect_dpcd(intel_dp);
else
status = connector_status_disconnected;
if (status == connector_status_disconnected) {
memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
+ memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
if (intel_dp->is_mst) {
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
@@ -5089,6 +5348,10 @@ intel_dp_long_pulse(struct intel_connector *connector,
intel_dp_print_rates(intel_dp);
+ /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
+ if (INTEL_GEN(dev_priv) >= 11)
+ intel_dp_get_dsc_sink_cap(intel_dp);
+
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
@@ -5109,9 +5372,13 @@ intel_dp_long_pulse(struct intel_connector *connector,
* with an IRQ_HPD, so force a link status check.
*/
if (!intel_dp_is_edp(intel_dp)) {
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int ret;
- intel_dp_retrain_link(encoder, ctx);
+ ret = intel_dp_retrain_link(encoder, ctx);
+ if (ret) {
+ intel_display_power_put(dev_priv, aux_domain);
+ return ret;
+ }
}
/*
@@ -5123,61 +5390,17 @@ intel_dp_long_pulse(struct intel_connector *connector,
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
- if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
+ if (intel_dp_is_edp(intel_dp) ||
+ to_intel_connector(connector)->detect_edid)
status = connector_status_connected;
- intel_dp->detect_done = true;
- /* Try to read the source of the interrupt */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
- sink_irq_vector != 0) {
- /* Clear interrupt source */
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector);
-
- if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
- intel_dp_handle_test_request(intel_dp);
- if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
- DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
- }
+ intel_dp_check_service_irq(intel_dp);
out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
- return status;
-}
-
-static int
-intel_dp_detect(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- int status = connector->status;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
-
- /* If full detect is not performed yet, do a full detect */
- if (!intel_dp->detect_done) {
- struct drm_crtc *crtc;
- int ret;
-
- crtc = connector->state->crtc;
- if (crtc) {
- ret = drm_modeset_lock(&crtc->mutex, ctx);
- if (ret)
- return ret;
- }
-
- status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
- }
-
- intel_dp->detect_done = false;
-
+ intel_display_power_put(dev_priv, aux_domain);
return status;
}
@@ -5185,8 +5408,11 @@ static void
intel_dp_force(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ enum intel_display_power_domain aux_domain =
+ intel_aux_power_domain(dig_port);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -5195,11 +5421,11 @@ intel_dp_force(struct drm_connector *connector)
if (connector->status != connector_status_connected)
return;
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv, aux_domain);
intel_dp_set_edid(intel_dp);
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv, aux_domain);
}
static int intel_dp_get_modes(struct drm_connector *connector)
@@ -5264,27 +5490,6 @@ intel_dp_connector_unregister(struct drm_connector *connector)
intel_connector_unregister(connector);
}
-static void
-intel_dp_connector_destroy(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- kfree(intel_connector->detect_edid);
-
- if (!IS_ERR_OR_NULL(intel_connector->edid))
- kfree(intel_connector->edid);
-
- /*
- * Can't call intel_dp_is_edp() since the encoder may have been
- * destroyed already.
- */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- intel_panel_fini(&intel_connector->panel);
-
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -5348,7 +5553,8 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
an, DRM_HDCP_AN_LEN);
if (dpcd_ret != DRM_HDCP_AN_LEN) {
- DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret);
+ DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
+ dpcd_ret);
return dpcd_ret >= 0 ? -EIO : dpcd_ret;
}
@@ -5364,10 +5570,10 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
rxbuf, sizeof(rxbuf),
DP_AUX_CH_CTL_AUX_AKSV_SELECT);
if (ret < 0) {
- DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
return ret;
} else if (ret == 0) {
- DRM_ERROR("Aksv write over DP/AUX was empty\n");
+ DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
return -EIO;
}
@@ -5382,7 +5588,7 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret != DRM_HDCP_KSV_LEN) {
- DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -5400,7 +5606,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret != DRM_HDCP_BSTATUS_LEN) {
- DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -5415,7 +5621,7 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
bcaps, 1);
if (ret != 1) {
- DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -5445,7 +5651,7 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret != DRM_HDCP_RI_LEN) {
- DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -5460,7 +5666,7 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
*ksv_ready = bstatus & DP_BSTATUS_READY;
@@ -5482,8 +5688,8 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
ksv_fifo + i * DRM_HDCP_KSV_LEN,
len);
if (ret != len) {
- DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i,
- ret);
+ DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
+ i, ret);
return ret >= 0 ? -EIO : ret;
}
}
@@ -5503,7 +5709,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
DP_AUX_HDCP_V_PRIME(i), part,
DRM_HDCP_V_PRIME_PART_LEN);
if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
- DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+ DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -5526,7 +5732,7 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
return false;
}
@@ -5565,6 +5771,7 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -5578,7 +5785,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
* indefinitely.
*/
DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
edp_panel_vdd_schedule_off(intel_dp);
}
@@ -5631,7 +5838,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_dp_connector_register,
.early_unregister = intel_dp_connector_unregister,
- .destroy = intel_dp_connector_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
@@ -5673,11 +5880,11 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
if (long_hpd) {
intel_dp->reset_link_params = true;
- intel_dp->detect_done = false;
return IRQ_NONE;
}
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
@@ -5690,7 +5897,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
- intel_dp->detect_done = false;
goto put_power;
}
}
@@ -5700,19 +5906,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
handled = intel_dp_short_pulse(intel_dp);
- /* Short pulse can signify loss of hdcp authentication */
- intel_hdcp_check_link(intel_dp->attached_connector);
-
- if (!handled) {
- intel_dp->detect_done = false;
+ if (!handled)
goto put_power;
- }
}
ret = IRQ_HANDLED;
put_power:
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
return ret;
}
@@ -5743,6 +5945,10 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+ if (HAS_GMCH_DISPLAY(dev_priv))
+ drm_connector_attach_max_bpc_property(connector, 6, 10);
+ else if (INTEL_GEN(dev_priv) >= 5)
+ drm_connector_attach_max_bpc_property(connector, 6, 12);
if (intel_dp_is_edp(intel_dp)) {
u32 allowed_scalers;
@@ -6099,10 +6305,10 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
switch (index) {
case DRRS_HIGH_RR:
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ intel_dp_set_m_n(crtc_state, M1_N1);
break;
case DRRS_LOW_RR:
- intel_dp_set_m_n(intel_crtc, M2_N2);
+ intel_dp_set_m_n(crtc_state, M2_N2);
break;
case DRRS_MAX_RR:
default:
@@ -6422,6 +6628,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!intel_dp_is_edp(intel_dp))
return true;
+ INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
+
/*
* On IBX/CPT we may get here with LVDS already registered. Since the
* driver uses the only internal power sequencer available for both
@@ -6514,6 +6722,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_connector->panel.backlight.power = intel_edp_backlight_power;
intel_panel_setup_backlight(connector, pipe);
+ if (fixed_mode)
+ drm_connector_init_panel_orientation_property(
+ connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
+
return true;
out_vdd_off:
@@ -6624,9 +6836,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_aux_init(intel_dp);
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
- edp_panel_vdd_work);
-
intel_connector_attach_encoder(intel_connector, intel_encoder);
if (HAS_DDI(dev_priv))
@@ -6743,6 +6952,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
if (port != PORT_A)
intel_infoframe_init(intel_dig_port);
+ intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 1b00f8ea145b..4de247ddf05f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -51,6 +51,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
bpp = 24;
if (intel_dp->compliance.test_data.bpc) {
@@ -208,12 +209,25 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- if (intel_dp->active_mst_links == 0 &&
- intel_dig_port->base.pre_pll_enable)
+ if (intel_dp->active_mst_links == 0)
intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
pipe_config, NULL);
}
+static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ if (intel_dp->active_mst_links == 0)
+ intel_dig_port->base.post_pll_disable(&intel_dig_port->base,
+ old_crtc_state,
+ old_conn_state);
+}
+
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
@@ -335,24 +349,12 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
intel_connector->port);
}
-static void
-intel_dp_mst_connector_destroy(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- if (!IS_ERR_OR_NULL(intel_connector->edid))
- kfree(intel_connector->edid);
-
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_dp_mst_connector_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
@@ -452,6 +454,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
if (!intel_connector)
return NULL;
+ intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ intel_connector->mst_port = intel_dp;
+ intel_connector->port = port;
+
connector = &intel_connector->base;
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
@@ -462,10 +468,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
- intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
- intel_connector->mst_port = intel_dp;
- intel_connector->port = port;
-
for_each_pipe(dev_priv, pipe) {
struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base;
@@ -560,6 +562,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
+ intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 00b3ab656b06..3c7f10d17658 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -748,7 +748,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
- if (crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
@@ -765,7 +765,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
- if (crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index e6cac9225536..d513ca875c67 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -126,16 +126,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
/**
* intel_prepare_shared_dpll - call a dpll's prepare hook
- * @crtc: CRTC which has a shared dpll
+ * @crtc_state: CRTC, and its state, which has a shared dpll
*
* This calls the PLL's prepare hook if it has one and if the PLL is not
* already enabled. The prepare hook is platform specific.
*/
-void intel_prepare_shared_dpll(struct intel_crtc *crtc)
+void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
if (WARN_ON(pll == NULL))
return;
@@ -154,15 +154,15 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
/**
* intel_enable_shared_dpll - enable a CRTC's shared DPLL
- * @crtc: CRTC which has a shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
*
* Enable the shared DPLL used by @crtc.
*/
-void intel_enable_shared_dpll(struct intel_crtc *crtc)
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
unsigned int old_mask;
@@ -199,14 +199,15 @@ out:
/**
* intel_disable_shared_dpll - disable a CRTC's shared DPLL
- * @crtc: CRTC which has a shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
*
* Disable the shared DPLL used by @crtc.
*/
-void intel_disable_shared_dpll(struct intel_crtc *crtc)
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
/* PCH only available on ILK+ */
@@ -409,14 +410,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
-
- /* Make sure no transcoder isn't still depending on us. */
- for_each_intel_crtc(dev, crtc) {
- if (crtc->config->shared_dpll == pll)
- assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
- }
I915_WRITE(PCH_DPLL(id), 0);
POSTING_READ(PCH_DPLL(id));
@@ -2530,7 +2523,8 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
if (intel_port_is_tc(dev_priv, encoder->port))
ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
else
ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
@@ -2628,11 +2622,16 @@ static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
}
-static enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
+enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
{
return port - PORT_C + DPLL_ID_ICL_MGPLL1;
}
+bool intel_dpll_is_combophy(enum intel_dpll_id id)
+{
+ return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1;
+}
+
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
uint32_t *target_dco_khz,
struct intel_dpll_hw_state *state)
@@ -2874,8 +2873,8 @@ static struct intel_shared_dpll *
icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *intel_dig_port;
struct intel_shared_dpll *pll;
struct intel_dpll_hw_state pll_state = {};
enum port port = encoder->port;
@@ -2883,18 +2882,21 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
int clock = crtc_state->port_clock;
bool ret;
- switch (port) {
- case PORT_A:
- case PORT_B:
+ if (intel_port_is_combophy(dev_priv, port)) {
min = DPLL_ID_ICL_DPLL0;
max = DPLL_ID_ICL_DPLL1;
ret = icl_calc_dpll_state(crtc_state, encoder, clock,
&pll_state);
- break;
- case PORT_C:
- case PORT_D:
- case PORT_E:
- case PORT_F:
+ } else if (intel_port_is_tc(dev_priv, port)) {
+ if (encoder->type == INTEL_OUTPUT_DP_MST) {
+ struct intel_dp_mst_encoder *mst_encoder;
+
+ mst_encoder = enc_to_mst(&encoder->base);
+ intel_dig_port = mst_encoder->primary;
+ } else {
+ intel_dig_port = enc_to_dig_port(&encoder->base);
+ }
+
if (intel_dig_port->tc_type == TC_PORT_TBT) {
min = DPLL_ID_ICL_TBTPLL;
max = min;
@@ -2906,8 +2908,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
&pll_state);
}
- break;
- default:
+ } else {
MISSING_CASE(port);
return NULL;
}
@@ -2932,21 +2933,16 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
{
- switch (id) {
- default:
- MISSING_CASE(id);
- /* fall through */
- case DPLL_ID_ICL_DPLL0:
- case DPLL_ID_ICL_DPLL1:
+ if (intel_dpll_is_combophy(id))
return CNL_DPLL_ENABLE(id);
- case DPLL_ID_ICL_TBTPLL:
+ else if (id == DPLL_ID_ICL_TBTPLL)
return TBT_PLL_ENABLE;
- case DPLL_ID_ICL_MGPLL1:
- case DPLL_ID_ICL_MGPLL2:
- case DPLL_ID_ICL_MGPLL3:
- case DPLL_ID_ICL_MGPLL4:
+ else
+ /*
+ * TODO: Make MG_PLL macros use
+ * tc port id instead of port id
+ */
return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id));
- }
}
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2965,17 +2961,11 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PLL_ENABLE))
goto out;
- switch (id) {
- case DPLL_ID_ICL_DPLL0:
- case DPLL_ID_ICL_DPLL1:
- case DPLL_ID_ICL_TBTPLL:
+ if (intel_dpll_is_combophy(id) ||
+ id == DPLL_ID_ICL_TBTPLL) {
hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
- break;
- case DPLL_ID_ICL_MGPLL1:
- case DPLL_ID_ICL_MGPLL2:
- case DPLL_ID_ICL_MGPLL3:
- case DPLL_ID_ICL_MGPLL4:
+ } else {
port = icl_mg_pll_id_to_port(id);
hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
@@ -3013,9 +3003,6 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
- break;
- default:
- MISSING_CASE(id);
}
ret = true;
@@ -3104,21 +3091,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
PLL_POWER_STATE, 1))
DRM_ERROR("PLL %d Power not enabled\n", id);
- switch (id) {
- case DPLL_ID_ICL_DPLL0:
- case DPLL_ID_ICL_DPLL1:
- case DPLL_ID_ICL_TBTPLL:
+ if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL)
icl_dpll_write(dev_priv, pll);
- break;
- case DPLL_ID_ICL_MGPLL1:
- case DPLL_ID_ICL_MGPLL2:
- case DPLL_ID_ICL_MGPLL3:
- case DPLL_ID_ICL_MGPLL4:
+ else
icl_mg_pll_write(dev_priv, pll);
- break;
- default:
- MISSING_CASE(id);
- }
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index bf0de8a4dc63..a033d8f06d4a 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -334,9 +334,9 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
struct intel_crtc *crtc,
struct drm_atomic_state *state);
-void intel_prepare_shared_dpll(struct intel_crtc *crtc);
-void intel_enable_shared_dpll(struct intel_crtc *crtc);
-void intel_disable_shared_dpll(struct intel_crtc *crtc);
+void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
@@ -345,5 +345,7 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
uint32_t pll_id);
int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
+enum intel_dpll_id icl_port_to_mg_pll_id(enum port port);
+bool intel_dpll_is_combophy(enum intel_dpll_id id);
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f8dc84b2d2d3..f94a04b4ad87 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -381,6 +381,15 @@ struct intel_hdcp_shim {
bool *hdcp_capable);
};
+struct intel_hdcp {
+ const struct intel_hdcp_shim *shim;
+ /* Mutex for hdcp state of the connector */
+ struct mutex mutex;
+ u64 value;
+ struct delayed_work check_work;
+ struct work_struct prop_work;
+};
+
struct intel_connector {
struct drm_connector base;
/*
@@ -413,11 +422,7 @@ struct intel_connector {
/* Work struct to schedule a uevent on link train failure */
struct work_struct modeset_retry_work;
- const struct intel_hdcp_shim *hdcp_shim;
- struct mutex hdcp_mutex;
- uint64_t hdcp_value; /* protected by hdcp_mutex */
- struct delayed_work hdcp_check_work;
- struct work_struct hdcp_prop_work;
+ struct intel_hdcp hdcp;
};
struct intel_digital_connector_state {
@@ -539,6 +544,26 @@ struct intel_plane_state {
*/
int scaler_id;
+ /*
+ * linked_plane:
+ *
+ * ICL planar formats require 2 planes that are updated as pairs.
+ * This member is used to make sure the other plane is also updated
+ * when required, and for update_slave() to find the correct
+ * plane_state to pass as argument.
+ */
+ struct intel_plane *linked_plane;
+
+ /*
+ * slave:
+ * If set don't update use the linked plane's state for updating
+ * this plane during atomic commit with the update_slave() callback.
+ *
+ * It's also used by the watermark code to ignore wm calculations on
+ * this plane. They're calculated by the linked plane's wm code.
+ */
+ u32 slave;
+
struct drm_intel_sprite_colorkey ckey;
};
@@ -547,6 +572,7 @@ struct intel_initial_plane_config {
unsigned int tiling;
int size;
u32 base;
+ u8 rotation;
};
#define SKL_MIN_SRC_W 8
@@ -680,6 +706,8 @@ struct intel_crtc_wm_state {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
struct skl_ddb_entry ddb;
+ struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry plane_ddb_uv[I915_MAX_PLANES];
} skl;
struct {
@@ -712,6 +740,13 @@ struct intel_crtc_wm_state {
bool need_postvbl_update;
};
+enum intel_output_format {
+ INTEL_OUTPUT_FORMAT_INVALID,
+ INTEL_OUTPUT_FORMAT_RGB,
+ INTEL_OUTPUT_FORMAT_YCBCR420,
+ INTEL_OUTPUT_FORMAT_YCBCR444,
+};
+
struct intel_crtc_state {
struct drm_crtc_state base;
@@ -893,14 +928,32 @@ struct intel_crtc_state {
u8 active_planes;
u8 nv12_planes;
+ /* bitmask of planes that will be updated during the commit */
+ u8 update_planes;
+
/* HDMI scrambling status */
bool hdmi_scrambling;
/* HDMI High TMDS char rate ratio */
bool hdmi_high_tmds_clock_ratio;
- /* output format is YCBCR 4:2:0 */
- bool ycbcr420;
+ /* Output format RGB/YCBCR etc */
+ enum intel_output_format output_format;
+
+ /* Output down scaling is done in LSPCON device */
+ bool lspcon_downsampling;
+
+ /* Display Stream compression state */
+ struct {
+ bool compression_enable;
+ bool dsc_split;
+ u16 compressed_bpp;
+ u8 slice_count;
+ } dsc_params;
+ struct drm_dsc_config dp_dsc_cfg;
+
+ /* Forward Error correction State */
+ bool fec_enable;
};
struct intel_crtc {
@@ -973,8 +1026,11 @@ struct intel_plane {
void (*update_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
+ void (*update_slave)(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
- struct intel_crtc *crtc);
+ const struct intel_crtc_state *crtc_state);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
@@ -1070,13 +1126,13 @@ struct intel_dp {
bool link_mst;
bool link_trained;
bool has_audio;
- bool detect_done;
bool reset_link_params;
- enum aux_ch aux_ch;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
+ u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
+ u8 fec_capable;
/* source rates */
int num_source_rates;
const int *source_rates;
@@ -1094,7 +1150,6 @@ struct intel_dp {
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
- enum intel_display_power_domain aux_power_domain;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@@ -1156,9 +1211,15 @@ struct intel_dp {
struct intel_dp_compliance compliance;
};
+enum lspcon_vendor {
+ LSPCON_VENDOR_MCA,
+ LSPCON_VENDOR_PARADE
+};
+
struct intel_lspcon {
bool active;
enum drm_lspcon_mode mode;
+ enum lspcon_vendor vendor;
};
struct intel_digital_port {
@@ -1170,18 +1231,20 @@ struct intel_digital_port {
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
+ /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
+ enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
enum tc_port_type tc_type;
- void (*write_infoframe)(struct drm_encoder *encoder,
+ void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len);
- void (*set_infoframes)(struct drm_encoder *encoder,
+ void (*set_infoframes)(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
- bool (*infoframe_enabled)(struct drm_encoder *encoder,
+ bool (*infoframe_enabled)(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
};
@@ -1281,6 +1344,12 @@ enc_to_dig_port(struct drm_encoder *encoder)
return NULL;
}
+static inline struct intel_digital_port *
+conn_to_dig_port(struct intel_connector *connector)
+{
+ return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
+}
+
static inline struct intel_dp_mst_encoder *
enc_to_mst(struct drm_encoder *encoder)
{
@@ -1306,6 +1375,12 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
}
}
+static inline struct intel_lspcon *
+enc_to_intel_lspcon(struct drm_encoder *encoder)
+{
+ return &enc_to_dig_port(encoder)->lspcon;
+}
+
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
@@ -1331,6 +1406,27 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
}
static inline struct intel_plane_state *
+intel_atomic_get_plane_state(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ struct drm_plane_state *ret =
+ drm_atomic_get_plane_state(&state->base, &plane->base);
+
+ if (IS_ERR(ret))
+ return ERR_CAST(ret);
+
+ return to_intel_plane_state(ret);
+}
+
+static inline struct intel_plane_state *
+intel_atomic_get_old_plane_state(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ return to_intel_plane_state(drm_atomic_get_old_plane_state(&state->base,
+ &plane->base));
+}
+
+static inline struct intel_plane_state *
intel_atomic_get_new_plane_state(struct intel_atomic_state *state,
struct intel_plane *plane)
{
@@ -1438,12 +1534,9 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
u8 voltage_swing);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state);
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state);
+void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id pll_id);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int color_plane, unsigned int height);
@@ -1488,7 +1581,6 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
@@ -1509,20 +1601,12 @@ void intel_mark_idle(struct drm_i915_private *dev_priv);
int intel_display_suspend(struct drm_device *dev);
void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
-int intel_connector_init(struct intel_connector *);
-struct intel_connector *intel_connector_alloc(void);
-void intel_connector_free(struct intel_connector *connector);
-bool intel_connector_get_hw_state(struct intel_connector *connector);
-void intel_connector_attach_encoder(struct intel_connector *connector,
- struct intel_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
-
-enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -1628,9 +1712,11 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
unsigned int skl_cdclk_get_vco(unsigned int freq);
+void skl_enable_dc6(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
+ enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
struct dpll *best_clock);
@@ -1641,12 +1727,14 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
-u16 skl_scaler_calc_phase(int sub, bool chroma_center);
+u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(const struct intel_crtc_state *crtc_state,
u32 pixel_format);
@@ -1670,6 +1758,24 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
+/* intel_connector.c */
+int intel_connector_init(struct intel_connector *connector);
+struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
+void intel_connector_destroy(struct drm_connector *connector);
+int intel_connector_register(struct drm_connector *connector);
+void intel_connector_unregister(struct drm_connector *connector);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+enum pipe intel_connector_get_pipe(struct intel_connector *connector);
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+void intel_attach_aspect_ratio_property(struct drm_connector *connector);
+
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
void intel_csr_load_program(struct drm_i915_private *);
@@ -1695,6 +1801,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
@@ -1728,9 +1837,6 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
-void icl_program_mg_dp_mode(struct intel_dp *intel_dp);
-void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port);
-void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port);
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1748,6 +1854,16 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
+uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
+ int mode_clock, int mode_hdisplay);
+uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
+ int mode_hdisplay);
+
+/* intel_vdsc.c */
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config);
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
@@ -1768,6 +1884,9 @@ void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* vlv_dsi.c */
void vlv_dsi_init(struct drm_i915_private *dev_priv);
+/* icl_dsi.c */
+void icl_dsi_init(struct drm_i915_private *dev_priv);
+
/* intel_dsi_dcs_backlight.c */
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1858,7 +1977,6 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
-
/* intel_lvds.c */
bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t lvds_reg, enum pipe *pipe);
@@ -1866,19 +1984,9 @@ void intel_lvds_init(struct drm_i915_private *dev_priv);
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
-
-/* intel_modes.c */
-int intel_connector_update_modes(struct drm_connector *connector,
- struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-void intel_attach_force_audio_property(struct drm_connector *connector);
-void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-
-
/* intel_overlay.c */
-void intel_setup_overlay(struct drm_i915_private *dev_priv);
-void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
+void intel_overlay_setup(struct drm_i915_private *dev_priv);
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
int intel_overlay_switch_off(struct intel_overlay *overlay);
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1907,7 +2015,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
-void intel_panel_destroy_backlight(struct drm_connector *connector);
extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_i915_private *dev_priv,
struct drm_display_mode *fixed_mode,
@@ -1936,6 +2043,7 @@ int intel_hdcp_enable(struct intel_connector *connector);
int intel_hdcp_disable(struct intel_connector *connector);
int intel_hdcp_check_link(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
+bool intel_hdcp_capable(struct intel_connector *connector);
/* intel_psr.c */
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
@@ -1961,12 +2069,18 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
u32 *out_value);
+bool intel_psr_enabled(struct intel_dp *intel_dp);
+
+/* intel_quirks.c */
+void intel_init_quirks(struct drm_i915_private *dev_priv);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
+void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void icl_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_power_domains_enable(struct drm_i915_private *dev_priv);
void intel_power_domains_disable(struct drm_i915_private *dev_priv);
@@ -2090,6 +2204,9 @@ void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
@@ -2101,10 +2218,13 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2);
-bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
- const struct skl_ddb_entry **entries,
- const struct skl_ddb_entry *ddb,
- int ignore);
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry entries[],
+ int num_entries, int ignore_idx);
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
bool ilk_disable_lp_wm(struct drm_device *dev);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
@@ -2127,23 +2247,29 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
-void skl_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
-bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
-bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id);
-bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id);
-unsigned int skl_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
-int skl_plane_check(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
int intel_plane_check_stride(const struct intel_plane_state *plane_state);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id);
+
+static inline bool icl_is_nv12_y_plane(enum plane_id id)
+{
+ /* Don't need to do a gen check, these planes are only available on gen11 */
+ if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
+ return true;
+
+ return false;
+}
+
+static inline bool icl_is_hdr_plane(struct intel_plane *plane)
+{
+ if (INTEL_GEN(to_i915(plane->base.dev)) < 11)
+ return false;
+
+ return plane->id < PLANE_SPRITE2;
+}
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
@@ -2185,11 +2311,16 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
/* intel_atomic_plane.c */
-struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
+struct intel_plane *intel_plane_alloc(void);
+void intel_plane_free(struct intel_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -2205,6 +2336,18 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state);
bool lspcon_init(struct intel_digital_port *intel_dig_port);
void lspcon_resume(struct intel_lspcon *lspcon);
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *buf, ssize_t len);
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+void lspcon_ycbcr420_config(struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state);
/* intel_pipe_crc.c */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 000000000000..5fec02aceaed
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include "intel_dsi.h"
+
+int intel_dsi_bitrate(const struct intel_dsi *intel_dsi)
+{
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ if (WARN_ON(bpp < 0))
+ bpp = 16;
+
+ return intel_dsi->pclk * bpp / intel_dsi->lane_count;
+}
+
+int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
+{
+ switch (intel_dsi->escape_clk_div) {
+ default:
+ case 0:
+ return 50;
+ case 1:
+ return 100;
+ case 2:
+ return 200;
+ }
+}
+
+int intel_dsi_get_modes(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *mode;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!intel_connector->panel.fixed_mode) {
+ DRM_DEBUG_KMS("no fixed mode\n");
+ return 0;
+ }
+
+ mode = drm_mode_duplicate(connector->dev,
+ intel_connector->panel.fixed_mode);
+ if (!mode) {
+ DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+ return 0;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
+enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ if (fixed_mode->clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+ }
+
+ return MODE_OK;
+}
+
+struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+ const struct mipi_dsi_host_ops *funcs,
+ enum port port)
+{
+ struct intel_dsi_host *host;
+ struct mipi_dsi_device *device;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ host->base.ops = funcs;
+ host->intel_dsi = intel_dsi;
+ host->port = port;
+
+ /*
+ * We should call mipi_dsi_host_register(&host->base) here, but we don't
+ * have a host->dev, and we don't have OF stuff either. So just use the
+ * dsi framework as a library and hope for the best. Create the dsi
+ * devices by ourselves here too. Need to be careful though, because we
+ * don't initialize any of the driver model devices here.
+ */
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ kfree(host);
+ return NULL;
+ }
+
+ device->host = &host->base;
+ host->device = device;
+
+ return host;
+}
+
+enum drm_panel_orientation
+intel_dsi_get_panel_orientation(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum drm_panel_orientation orientation;
+
+ orientation = dev_priv->vbt.dsi.orientation;
+ if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return orientation;
+
+ orientation = dev_priv->vbt.orientation;
+ if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return orientation;
+
+ return DRM_MODE_PANEL_ORIENTATION_NORMAL;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index ad7c1cb32983..d968f1f13e09 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -81,14 +81,21 @@ struct intel_dsi {
u16 dcs_backlight_ports;
u16 dcs_cabc_ports;
+ /* RGB or BGR */
+ bool bgr_enabled;
+
u8 pixel_overlap;
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
+
+ /* data lanes dphy timing */
+ u32 dphy_data_lane_reg;
u32 video_frmt_cfg_bits;
u16 lp_byte_clk;
/* timeouts in byte clocks */
+ u16 hs_tx_timeout;
u16 lp_rx_timeout;
u16 turn_arnd_val;
u16 rst_timer_val;
@@ -129,9 +136,36 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
return container_of(encoder, struct intel_dsi, base.base);
}
+static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
+}
+
+static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
+}
+
+static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
+{
+ return enc_to_intel_dsi(&encoder->base)->ports;
+}
+
+/* intel_dsi.c */
+int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
+int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
+enum drm_panel_orientation
+intel_dsi_get_panel_orientation(struct intel_connector *connector);
+
/* vlv_dsi.c */
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
+int intel_dsi_get_modes(struct drm_connector *connector);
+enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+ const struct mipi_dsi_host_ops *funcs,
+ enum port port);
/* vlv_dsi_pll.c */
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
@@ -158,5 +192,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
+void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index ac83d6b89ae0..a1a8b3790e61 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -103,6 +103,18 @@ static struct gpio_map vlv_gpio_table[] = {
#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
#define CHV_GPIO_CFGLOCK (1 << 31)
+/* ICL DSI Display GPIO Pins */
+#define ICL_GPIO_DDSP_HPD_A 0
+#define ICL_GPIO_L_VDDEN_1 1
+#define ICL_GPIO_L_BKLTEN_1 2
+#define ICL_GPIO_DDPA_CTRLCLK_1 3
+#define ICL_GPIO_DDPA_CTRLDATA_1 4
+#define ICL_GPIO_DDSP_HPD_B 5
+#define ICL_GPIO_L_VDDEN_2 6
+#define ICL_GPIO_L_BKLTEN_2 7
+#define ICL_GPIO_DDPA_CTRLCLK_2 8
+#define ICL_GPIO_DDPA_CTRLDATA_2 9
+
static inline enum port intel_dsi_seq_port_to_port(u8 port)
{
return port ? PORT_C : PORT_A;
@@ -111,6 +123,7 @@ static inline enum port intel_dsi_seq_port_to_port(u8 port)
static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
const u8 *data)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
struct mipi_dsi_device *dsi_device;
u8 type, flags, seq_port;
u16 len;
@@ -181,7 +194,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
- vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
+ if (!IS_ICELAKE(dev_priv))
+ vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
data += len;
@@ -322,6 +336,12 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
gpiod_set_value(gpio_desc, value);
}
+static void icl_exec_gpio(struct drm_i915_private *dev_priv,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n");
+}
+
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
@@ -345,7 +365,9 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
/* pull up/down */
value = *data++ & 1;
- if (IS_VALLEYVIEW(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ else if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
else if (IS_CHERRYVIEW(dev_priv))
chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
@@ -481,6 +503,17 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
}
}
+void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+
+ /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
+ if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
+ return;
+
+ msleep(msec);
+}
+
int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
{
struct intel_connector *connector = intel_dsi->attached_connector;
@@ -499,110 +532,125 @@ int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
return 1;
}
-bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
+#define ICL_PREPARE_CNT_MAX 0x7
+#define ICL_CLK_ZERO_CNT_MAX 0xf
+#define ICL_TRAIL_CNT_MAX 0x7
+#define ICL_TCLK_PRE_CNT_MAX 0x3
+#define ICL_TCLK_POST_CNT_MAX 0x7
+#define ICL_HS_ZERO_CNT_MAX 0xf
+#define ICL_EXIT_ZERO_CNT_MAX 0x7
+
+static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
- struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
- struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
- u32 bpp;
- u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
- u32 ui_num, ui_den;
+ u32 tlpx_ns;
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
u32 ths_prepare_ns, tclk_trail_ns;
- u32 tclk_prepare_clkzero, ths_prepare_hszero;
- u32 lp_to_hs_switch, hs_to_lp_switch;
- u32 pclk, computed_ddr;
- u32 mul;
- u16 burst_mode_ratio;
- enum port port;
-
- DRM_DEBUG_KMS("\n");
-
- intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
- intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
- intel_dsi->lane_count = mipi_config->lane_cnt + 1;
- intel_dsi->pixel_format =
- pixel_format_from_register_bits(
- mipi_config->videomode_color_format << 7);
- bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 hs_zero_cnt;
+ u32 tclk_pre_cnt, tclk_post_cnt;
- intel_dsi->dual_link = mipi_config->dual_link;
- intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
- intel_dsi->operation_mode = mipi_config->is_cmd_mode;
- intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
- intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
- intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
- intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
- intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
- intel_dsi->init_count = mipi_config->master_init_timer;
- intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
- intel_dsi->video_frmt_cfg_bits =
- mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
- pclk = mode->clock;
+ tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+ ths_prepare_ns = max(mipi_config->ths_prepare,
+ mipi_config->tclk_prepare);
- /* In dual link mode each port needs half of pixel clock */
- if (intel_dsi->dual_link) {
- pclk = pclk / 2;
+ /*
+ * prepare cnt in escape clocks
+ * this field represents a hexadecimal value with a precision
+ * of 1.2 – i.e. the most significant bit is the integer
+ * and the least significant 2 bits are fraction bits.
+ * so, the field can represent a range of 0.25 to 1.75
+ */
+ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
+ if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
+ DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
+ prepare_cnt = ICL_PREPARE_CNT_MAX;
+ }
- /* we can enable pixel_overlap if needed by panel. In this
- * case we need to increase the pixelclock for extra pixels
- */
- if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
- pclk += DIV_ROUND_UP(mode->vtotal *
- intel_dsi->pixel_overlap *
- 60, 1000);
- }
+ /* clk zero count in escape clocks */
+ clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+ ths_prepare_ns, tlpx_ns);
+ if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
+ clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
}
- /* Burst Mode Ratio
- * Target ddr frequency from VBT / non burst ddr freq
- * multiply by 100 to preserve remainder
- */
- if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
- if (mipi_config->target_burst_mode_freq) {
- computed_ddr = (pclk * bpp) / intel_dsi->lane_count;
+ /* trail cnt in escape clocks*/
+ trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
+ if (trail_cnt > ICL_TRAIL_CNT_MAX) {
+ DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
+ trail_cnt = ICL_TRAIL_CNT_MAX;
+ }
- if (mipi_config->target_burst_mode_freq <
- computed_ddr) {
- DRM_ERROR("Burst mode freq is less than computed\n");
- return false;
- }
+ /* tclk pre count in escape clocks */
+ tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+ if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
+ DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
+ tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
+ }
- burst_mode_ratio = DIV_ROUND_UP(
- mipi_config->target_burst_mode_freq * 100,
- computed_ddr);
+ /* tclk post count in escape clocks */
+ tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
+ if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
+ DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
+ tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
+ }
- pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
- } else {
- DRM_ERROR("Burst mode target is not set\n");
- return false;
- }
- } else
- burst_mode_ratio = 100;
+ /* hs zero cnt in escape clocks */
+ hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+ ths_prepare_ns, tlpx_ns);
+ if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
+ hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
+ }
- intel_dsi->burst_mode_ratio = burst_mode_ratio;
- intel_dsi->pclk = pclk;
+ /* hs exit zero cnt in escape clocks */
+ exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+ if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
+ exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
+ }
- bitrate = (pclk * bpp) / intel_dsi->lane_count;
+ /* clock lane dphy timings */
+ intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
+ CLK_PREPARE(prepare_cnt) |
+ CLK_ZERO_OVERRIDE |
+ CLK_ZERO(clk_zero_cnt) |
+ CLK_PRE_OVERRIDE |
+ CLK_PRE(tclk_pre_cnt) |
+ CLK_POST_OVERRIDE |
+ CLK_POST(tclk_post_cnt) |
+ CLK_TRAIL_OVERRIDE |
+ CLK_TRAIL(trail_cnt));
+
+ /* data lanes dphy timings */
+ intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
+ HS_PREPARE(prepare_cnt) |
+ HS_ZERO_OVERRIDE |
+ HS_ZERO(hs_zero_cnt) |
+ HS_TRAIL_OVERRIDE |
+ HS_TRAIL(trail_cnt) |
+ HS_EXIT_OVERRIDE |
+ HS_EXIT(exit_zero_cnt));
+}
- switch (intel_dsi->escape_clk_div) {
- case 0:
- tlpx_ns = 50;
- break;
- case 1:
- tlpx_ns = 100;
- break;
+static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ u32 tlpx_ns, extra_byte_count, tlpx_ui;
+ u32 ui_num, ui_den;
+ u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+ u32 ths_prepare_ns, tclk_trail_ns;
+ u32 tclk_prepare_clkzero, ths_prepare_hszero;
+ u32 lp_to_hs_switch, hs_to_lp_switch;
+ u32 mul;
- case 2:
- tlpx_ns = 200;
- break;
- default:
- tlpx_ns = 50;
- break;
- }
+ tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
switch (intel_dsi->lane_count) {
case 1:
@@ -620,7 +668,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
/* in Kbps */
ui_num = NS_KHZ_RATIO;
- ui_den = bitrate;
+ ui_den = intel_dsi_bitrate(intel_dsi);
tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
ths_prepare_hszero = mipi_config->ths_prepare_hszero;
@@ -746,6 +794,88 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
8);
intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+}
+
+bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
+ struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+ u16 burst_mode_ratio;
+ enum port port;
+
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
+ intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
+ intel_dsi->lane_count = mipi_config->lane_cnt + 1;
+ intel_dsi->pixel_format =
+ pixel_format_from_register_bits(
+ mipi_config->videomode_color_format << 7);
+
+ intel_dsi->dual_link = mipi_config->dual_link;
+ intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
+ intel_dsi->operation_mode = mipi_config->is_cmd_mode;
+ intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
+ intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
+ intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
+ intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout;
+ intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
+ intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
+ intel_dsi->init_count = mipi_config->master_init_timer;
+ intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
+ intel_dsi->video_frmt_cfg_bits =
+ mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ intel_dsi->bgr_enabled = mipi_config->rgb_flip;
+
+ /* Starting point, adjusted depending on dual link and burst mode */
+ intel_dsi->pclk = mode->clock;
+
+ /* In dual link mode each port needs half of pixel clock */
+ if (intel_dsi->dual_link) {
+ intel_dsi->pclk /= 2;
+
+ /* we can enable pixel_overlap if needed by panel. In this
+ * case we need to increase the pixelclock for extra pixels
+ */
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ intel_dsi->pclk += DIV_ROUND_UP(mode->vtotal * intel_dsi->pixel_overlap * 60, 1000);
+ }
+ }
+
+ /* Burst Mode Ratio
+ * Target ddr frequency from VBT / non burst ddr freq
+ * multiply by 100 to preserve remainder
+ */
+ if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+ if (mipi_config->target_burst_mode_freq) {
+ u32 bitrate = intel_dsi_bitrate(intel_dsi);
+
+ if (mipi_config->target_burst_mode_freq < bitrate) {
+ DRM_ERROR("Burst mode freq is less than computed\n");
+ return false;
+ }
+
+ burst_mode_ratio = DIV_ROUND_UP(
+ mipi_config->target_burst_mode_freq * 100,
+ bitrate);
+
+ intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
+ } else {
+ DRM_ERROR("Burst mode target is not set\n");
+ return false;
+ }
+ } else
+ burst_mode_ratio = 100;
+
+ intel_dsi->burst_mode_ratio = burst_mode_ratio;
+
+ if (IS_ICELAKE(dev_priv))
+ icl_dphy_param_init(intel_dsi);
+ else
+ vlv_dphy_param_init(intel_dsi);
DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 4e142ff49708..0042a7f69387 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -256,6 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return true;
}
@@ -333,18 +334,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
return 0;
}
-static void intel_dvo_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
- intel_panel_fini(&to_intel_connector(connector)->panel);
- kfree(connector);
-}
-
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.detect = intel_dvo_detect,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_dvo_destroy,
+ .destroy = intel_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 217ed3ee1cab..af2873403009 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -273,13 +273,13 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
- if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
+ if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
return -EINVAL;
- if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
+ if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
return -EINVAL;
- if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+ if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
return -EINVAL;
GEM_BUG_ON(dev_priv->engine[id]);
@@ -335,7 +335,10 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
WARN_ON(ring_mask == 0);
WARN_ON(ring_mask &
- GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
+ GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
+
+ if (i915_inject_load_failure())
+ return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
if (!HAS_ENGINE(dev_priv, i))
@@ -399,7 +402,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
err = -EINVAL;
err_id = id;
- if (GEM_WARN_ON(!init))
+ if (GEM_DEBUG_WARN_ON(!init))
goto cleanup;
err = init(engine);
@@ -463,7 +466,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
execlists->port_mask = 1;
- BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
+ GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
execlists->queue_priority = INT_MIN;
@@ -482,7 +485,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
i915_timeline_init(engine->i915, &engine->timeline, engine->name);
- lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE);
+ i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
intel_engine_init_execlist(engine);
intel_engine_init_hangcheck(engine);
@@ -490,46 +493,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_cmd_parser(engine);
}
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int ret;
-
- WARN_ON(engine->scratch);
-
- obj = i915_gem_object_create_stolen(engine->i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(engine->i915, size);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
- return PTR_ERR(obj);
- }
-
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unref;
- }
-
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- goto err_unref;
-
- engine->scratch = vma;
- return 0;
-
-err_unref:
- i915_gem_object_put(obj);
- return ret;
-}
-
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
-{
- i915_vma_unpin_and_release(&engine->scratch, 0);
-}
-
static void cleanup_status_page(struct intel_engine_cs *engine)
{
if (HWS_NEEDS_PHYSICAL(engine->i915)) {
@@ -704,8 +667,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- intel_engine_cleanup_scratch(engine);
-
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@@ -720,6 +681,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
__intel_context_unpin(i915->kernel_context, engine);
i915_timeline_fini(&engine->timeline);
+
+ intel_wa_list_free(&engine->ctx_wa_list);
+ intel_wa_list_free(&engine->wa_list);
+ intel_wa_list_free(&engine->whitelist);
}
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
@@ -809,7 +774,7 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
u32 slice = fls(sseu->slice_mask);
u32 subslice = fls(sseu->subslice_mask[slice]);
- if (INTEL_GEN(dev_priv) == 10)
+ if (IS_GEN10(dev_priv))
mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
GEN8_MCR_SUBSLICE(subslice);
else if (INTEL_GEN(dev_priv) >= 11)
@@ -1534,10 +1499,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
count = 0;
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
- struct i915_priolist *p =
- rb_entry(rb, typeof(*p), node);
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ int i;
- list_for_each_entry(rq, &p->requests, sched.link) {
+ priolist_for_each_request(rq, p, i) {
if (count++ < MAX_REQUESTS_TO_SHOW - 1)
print_request(m, rq, "\t\tQ ");
else
@@ -1559,8 +1524,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
- drm_printf(m, "\t%s [%d] waiting for %x\n",
- w->tsk->comm, w->tsk->pid, w->seqno);
+ drm_printf(m, "\t%s [%d:%c] waiting for %x\n",
+ w->tsk->comm, w->tsk->pid,
+ task_state_to_char(w->tsk),
+ w->seqno);
}
spin_unlock(&b->rb_lock);
local_irq_restore(flags);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 74d425c700ef..f23570c44323 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -84,7 +84,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
int lines;
intel_fbc_get_plane_source_size(cache, NULL, &lines);
- if (INTEL_GEN(dev_priv) == 7)
+ if (IS_GEN7(dev_priv))
lines = min(lines, 2048);
else if (INTEL_GEN(dev_priv) >= 8)
lines = min(lines, 2560);
@@ -674,6 +674,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->plane.adjusted_y = plane_state->color_plane[0].y;
cache->plane.y = plane_state->base.src.y1 >> 16;
+ cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
+
if (!cache->plane.visible)
return;
@@ -748,6 +750,12 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
+ if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
+ cache->fb.format->has_alpha) {
+ fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
+ return false;
+ }
+
/* WaFbcExceedCdClockThreshold:hsw,bdw */
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
@@ -1301,7 +1309,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
fbc->active = false;
if (need_fbc_vtd_wa(dev_priv))
- mkwrite_device_info(dev_priv)->has_fbc = false;
+ mkwrite_device_info(dev_priv)->display.has_fbc = false;
i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f99332972b7a..fb5bb5b32a60 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -593,7 +593,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
* pipe. Note we need to use the selected fb's pitch and bpp
* rather than the current pipe's, since they differ.
*/
- cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
+ cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
@@ -603,13 +603,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
break;
}
- cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
+ cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
- intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
- intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
+ crtc->state->adjusted_mode.crtc_hdisplay,
+ crtc->state->adjusted_mode.crtc_vdisplay,
fb->base.format->cpp[0] * 8,
cur_size);
@@ -672,7 +672,7 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0))
+ if (WARN_ON(!HAS_DISPLAY(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 230aea69385d..8660af3fd755 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -50,7 +50,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
unsigned int i;
guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
- guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
+ guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
+ BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
for (i = 0; i < guc->send_regs.count; i++) {
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -521,6 +522,44 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
+/*
+ * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and
+ * then return, so waiting on the H2G is not enough to guarantee GuC is done.
+ * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
+ * scratch register 14, so we can poll on that. Note that GuC does not ensure
+ * that the value in the register is different from
+ * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
+ * take care of that ourselves as well.
+ */
+static int guc_sleep_state_action(struct intel_guc *guc,
+ const u32 *action, u32 len)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ int ret;
+ u32 status;
+
+ I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
+
+ ret = intel_guc_send(guc, action, len);
+ if (ret)
+ return ret;
+
+ ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
+ INTEL_GUC_SLEEP_STATE_INVALID_MASK,
+ 0, 0, 10, &status);
+ if (ret)
+ return ret;
+
+ if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
+ DRM_ERROR("GuC failed to change sleep state. "
+ "action=0x%x, err=%u\n",
+ action[0], status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* intel_guc_suspend() - notify GuC entering suspend state
* @guc: the guc
@@ -533,7 +572,7 @@ int intel_guc_suspend(struct intel_guc *guc)
intel_guc_ggtt_offset(guc, guc->shared_data)
};
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
+ return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
}
/**
@@ -571,7 +610,7 @@ int intel_guc_resume(struct intel_guc *guc)
intel_guc_ggtt_offset(guc, guc->shared_data)
};
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
+ return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index ad42faf48c46..0f1c4f9ebfd8 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -95,6 +95,11 @@ struct intel_guc {
void (*notify)(struct intel_guc *guc);
};
+static inline bool intel_guc_is_alive(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_loaded(&guc->fw);
+}
+
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index a9e6fcce467c..a67144ee5ceb 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -78,7 +78,8 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
guc_fw->major_ver_wanted = KBL_FW_MAJOR;
guc_fw->minor_ver_wanted = KBL_FW_MINOR;
} else {
- DRM_WARN("%s: No firmware known for this platform!\n",
+ dev_info(dev_priv->drm.dev,
+ "%s: No firmware known for this platform!\n",
intel_uc_fw_type_repr(guc_fw->type));
}
}
@@ -125,66 +126,26 @@ static void guc_prepare_xfer(struct intel_guc *guc)
}
/* Copy RSA signature from the fw image to HW for verification */
-static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
+static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uc_fw *guc_fw = &guc->fw;
- struct sg_table *sg = vma->pages;
u32 rsa[UOS_RSA_SCRATCH_COUNT];
int i;
- if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa),
- guc_fw->rsa_offset) != sizeof(rsa))
- return -EINVAL;
+ sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
+ rsa, sizeof(rsa), guc->fw.rsa_offset);
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
-
- return 0;
}
-/*
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Architecturally, the DMA engine is bidirectional, and can potentially even
- * transfer between GTT locations. This functionality is left out of the API
- * for now as there is no need for it.
- */
-static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
+static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uc_fw *guc_fw = &guc->fw;
- unsigned long offset;
- u32 status;
- int ret;
-
- /*
- * The header plus uCode will be copied to WOPCM via DMA, excluding any
- * other components
- */
- I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
-
- /* Set the source address for the new blob */
- offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
- I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
- I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
- /*
- * Set the DMA destination. Current uCode expects the code to be
- * loaded at 8k; locations below this are used for the stack.
- */
- I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
- I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
-
- /* Finally start the DMA */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
-
- /* Wait for DMA to finish */
- ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0,
- 2, 100, &status);
- DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status);
-
- return ret;
+ /* Did we complete the xfer? */
+ *status = I915_READ(DMA_CTRL);
+ return !(*status & START_DMA);
}
/*
@@ -217,8 +178,8 @@ static int guc_wait_ucode(struct intel_guc *guc)
* NB: Docs recommend not using the interrupt for completion.
* Measurements indicate this should take no more than 20ms, so a
* timeout here indicates that the GuC has failed and is unusable.
- * (Higher levels of the driver will attempt to fall back to
- * execlist mode if this happens.)
+ * (Higher levels of the driver may decide to reset the GuC and
+ * attempt the ucode load again if this happens.)
*/
ret = wait_for(guc_ready(guc, &status), 100);
DRM_DEBUG_DRIVER("GuC status %#x\n", status);
@@ -228,10 +189,52 @@ static int guc_wait_ucode(struct intel_guc *guc)
ret = -ENOEXEC;
}
+ if (ret == 0 && !guc_xfer_completed(guc, &status)) {
+ DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
+ status);
+ ret = -ENXIO;
+ }
+
return ret;
}
/*
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * Architecturally, the DMA engine is bidirectional, and can potentially even
+ * transfer between GTT locations. This functionality is left out of the API
+ * for now as there is no need for it.
+ */
+static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uc_fw *guc_fw = &guc->fw;
+ unsigned long offset;
+
+ /*
+ * The header plus uCode will be copied to WOPCM via DMA, excluding any
+ * other components
+ */
+ I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
+
+ /* Set the source address for the new blob */
+ offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
+ I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
+ I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+
+ /*
+ * Set the DMA destination. Current uCode expects the code to be
+ * loaded at 8k; locations below this are used for the stack.
+ */
+ I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
+ I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+ /* Finally start the DMA */
+ I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
+
+ return guc_wait_ucode(guc);
+}
+/*
* Load the GuC firmware blob into the MinuteIA.
*/
static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
@@ -251,17 +254,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
* by the DMA engine in one operation, whereas the RSA signature is
* loaded via MMIO.
*/
- ret = guc_xfer_rsa(guc, vma);
- if (ret)
- DRM_WARN("GuC firmware signature xfer error %d\n", ret);
+ guc_xfer_rsa(guc, vma);
ret = guc_xfer_ucode(guc, vma);
- if (ret)
- DRM_WARN("GuC firmware code xfer error %d\n", ret);
-
- ret = guc_wait_ucode(guc);
- if (ret)
- DRM_ERROR("GuC firmware xfer error %d\n", ret);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 8382d591c784..b2f5148f4f17 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -39,6 +39,11 @@
#define GUC_VIDEO_ENGINE2 4
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
+#define GUC_DOORBELL_INVALID 256
+
+#define GUC_DB_SIZE (PAGE_SIZE)
+#define GUC_WQ_SIZE (PAGE_SIZE * 2)
+
/* Work queue item header definitions */
#define WQ_STATUS_ACTIVE 1
#define WQ_STATUS_SUSPENDED 2
@@ -59,9 +64,6 @@
#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
-#define GUC_DOORBELL_ENABLED 1
-#define GUC_DOORBELL_DISABLED 0
-
#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2)
@@ -219,26 +221,6 @@ struct uc_css_header {
u32 header_info;
} __packed;
-struct guc_doorbell_info {
- u32 db_status;
- u32 cookie;
- u32 reserved[14];
-} __packed;
-
-union guc_doorbell_qw {
- struct {
- u32 db_status;
- u32 cookie;
- };
- u64 value_qw;
-} __packed;
-
-#define GUC_NUM_DOORBELLS 256
-#define GUC_DOORBELL_INVALID (GUC_NUM_DOORBELLS)
-
-#define GUC_DB_SIZE (PAGE_SIZE)
-#define GUC_WQ_SIZE (PAGE_SIZE * 2)
-
/* Work item for submitting workloads into work queue of GuC. */
struct guc_wq_item {
u32 header;
@@ -601,7 +583,9 @@ struct guc_shared_ctx_data {
* registers, where first register holds data treated as message header,
* and other registers are used to hold message payload.
*
- * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8
+ * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8,
+ * but no H2G command takes more than 8 parameters and the GuC FW
+ * itself uses an 8-element array to store the H2G message.
*
* +-----------+---------+---------+---------+
* | MMIO[0] | MMIO[1] | ... | MMIO[n] |
@@ -633,6 +617,8 @@ struct guc_shared_ctx_data {
* field.
*/
+#define GUC_MAX_MMIO_MSG_LEN 8
+
#define INTEL_GUC_MSG_TYPE_SHIFT 28
#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
#define INTEL_GUC_MSG_DATA_SHIFT 16
@@ -687,6 +673,13 @@ enum intel_guc_report_status {
INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
};
+enum intel_guc_sleep_state_status {
+ INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0,
+ INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1,
+ INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2
+#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
+};
+
#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0)
#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4
#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index d86084742a4a..57e7ad522c2f 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -104,6 +104,18 @@
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)
+#define GUC_NUM_DOORBELLS 256
+
+/* format of the HW-monitored doorbell cacheline */
+struct guc_doorbell_info {
+ u32 db_status;
+#define GUC_DOORBELL_DISABLED 0
+#define GUC_DOORBELL_ENABLED 1
+
+ u32 cookie;
+ u32 reserved[14];
+} __packed;
+
#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
#define GEN8_DRB_VALID (1<<0)
#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index a81f04d46e87..1570dcbe249c 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -192,7 +192,15 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
return client->vaddr + client->doorbell_offset;
}
-static void __create_doorbell(struct intel_guc_client *client)
+static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
+ return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
+}
+
+static void __init_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *doorbell;
@@ -201,21 +209,19 @@ static void __create_doorbell(struct intel_guc_client *client)
doorbell->cookie = 0;
}
-static void __destroy_doorbell(struct intel_guc_client *client)
+static void __fini_doorbell(struct intel_guc_client *client)
{
- struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
struct guc_doorbell_info *doorbell;
u16 db_id = client->doorbell_id;
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_DISABLED;
- doorbell->cookie = 0;
/* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
* to go to zero after updating db_status before we call the GuC to
* release the doorbell
*/
- if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
+ if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
WARN_ONCE(true, "Doorbell never became invalid after disable\n");
}
@@ -227,11 +233,11 @@ static int create_doorbell(struct intel_guc_client *client)
return -ENODEV; /* internal setup error, should never happen */
__update_doorbell_desc(client, client->doorbell_id);
- __create_doorbell(client);
+ __init_doorbell(client);
ret = __guc_allocate_doorbell(client->guc, client->stage_id);
if (ret) {
- __destroy_doorbell(client);
+ __fini_doorbell(client);
__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
client->stage_id, ret);
@@ -247,7 +253,7 @@ static int destroy_doorbell(struct intel_guc_client *client)
GEM_BUG_ON(!has_doorbell(client));
- __destroy_doorbell(client);
+ __fini_doorbell(client);
ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
if (ret)
DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
@@ -282,8 +288,7 @@ __get_process_desc(struct intel_guc_client *client)
/*
* Initialise the process descriptor shared with the GuC firmware.
*/
-static void guc_proc_desc_init(struct intel_guc *guc,
- struct intel_guc_client *client)
+static void guc_proc_desc_init(struct intel_guc_client *client)
{
struct guc_process_desc *desc;
@@ -304,6 +309,14 @@ static void guc_proc_desc_init(struct intel_guc *guc,
desc->priority = client->priority;
}
+static void guc_proc_desc_fini(struct intel_guc_client *client)
+{
+ struct guc_process_desc *desc;
+
+ desc = __get_process_desc(client);
+ memset(desc, 0, sizeof(*desc));
+}
+
static int guc_stage_desc_pool_create(struct intel_guc *guc)
{
struct i915_vma *vma;
@@ -341,9 +354,9 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
* data structures relating to this client (doorbell, process descriptor,
* write queue, etc).
*/
-static void guc_stage_desc_init(struct intel_guc *guc,
- struct intel_guc_client *client)
+static void guc_stage_desc_init(struct intel_guc_client *client)
{
+ struct intel_guc *guc = client->guc;
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
struct i915_gem_context *ctx = client->owner;
@@ -424,8 +437,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
desc->desc_private = ptr_to_u64(client);
}
-static void guc_stage_desc_fini(struct intel_guc *guc,
- struct intel_guc_client *client)
+static void guc_stage_desc_fini(struct intel_guc_client *client)
{
struct guc_stage_desc *desc;
@@ -486,14 +498,6 @@ static void guc_wq_item_append(struct intel_guc_client *client,
WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
}
-static void guc_reset_wq(struct intel_guc_client *client)
-{
- struct guc_process_desc *desc = __get_process_desc(client);
-
- desc->head = 0;
- desc->tail = 0;
-}
-
static void guc_ring_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *db;
@@ -746,30 +750,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
+ int i;
- list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+ priolist_for_each_request_consume(rq, rn, p, i) {
if (last && rq->hw_context != last->hw_context) {
- if (port == last_port) {
- __list_del_many(&p->requests,
- &rq->sched.link);
+ if (port == last_port)
goto done;
- }
if (submit)
port_assign(port, last);
port++;
}
- INIT_LIST_HEAD(&rq->sched.link);
+ list_del_init(&rq->sched.link);
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
+
last = rq;
submit = true;
}
rb_erase_cached(&p->node, &execlists->queue);
- INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
@@ -791,19 +793,8 @@ done:
static void guc_dequeue(struct intel_engine_cs *engine)
{
- unsigned long flags;
- bool submit;
-
- local_irq_save(flags);
-
- spin_lock(&engine->timeline.lock);
- submit = __guc_dequeue(engine);
- spin_unlock(&engine->timeline.lock);
-
- if (submit)
+ if (__guc_dequeue(engine))
guc_submit(engine);
-
- local_irq_restore(flags);
}
static void guc_submission_tasklet(unsigned long data)
@@ -812,6 +803,9 @@ static void guc_submission_tasklet(unsigned long data)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
struct i915_request *rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
rq = port_request(port);
while (rq && i915_request_completed(rq)) {
@@ -835,6 +829,8 @@ static void guc_submission_tasklet(unsigned long data)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
guc_dequeue(engine);
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static struct i915_request *
@@ -877,72 +873,31 @@ guc_reset_prepare(struct intel_engine_cs *engine)
/* Check that a doorbell register is in the expected state */
static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 drbregl;
bool valid;
- GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
+ GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
- drbregl = I915_READ(GEN8_DRBREGL(db_id));
- valid = drbregl & GEN8_DRB_VALID;
+ valid = __doorbell_valid(guc, db_id);
if (test_bit(db_id, guc->doorbell_bitmap) == valid)
return true;
- DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
- db_id, drbregl, yesno(valid));
+ DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
+ db_id, yesno(valid));
return false;
}
static bool guc_verify_doorbells(struct intel_guc *guc)
{
+ bool doorbells_ok = true;
u16 db_id;
for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
if (!doorbell_ok(guc, db_id))
- return false;
-
- return true;
-}
-
-static int guc_clients_doorbell_init(struct intel_guc *guc)
-{
- int ret;
-
- ret = create_doorbell(guc->execbuf_client);
- if (ret)
- return ret;
-
- if (guc->preempt_client) {
- ret = create_doorbell(guc->preempt_client);
- if (ret) {
- destroy_doorbell(guc->execbuf_client);
- return ret;
- }
- }
-
- return 0;
-}
-
-static void guc_clients_doorbell_fini(struct intel_guc *guc)
-{
- /*
- * By the time we're here, GuC has already been reset.
- * Instead of trying (in vain) to communicate with it, let's just
- * cleanup the doorbell HW and our internal state.
- */
- if (guc->preempt_client) {
- __destroy_doorbell(guc->preempt_client);
- __update_doorbell_desc(guc->preempt_client,
- GUC_DOORBELL_INVALID);
- }
+ doorbells_ok = false;
- if (guc->execbuf_client) {
- __destroy_doorbell(guc->execbuf_client);
- __update_doorbell_desc(guc->execbuf_client,
- GUC_DOORBELL_INVALID);
- }
+ return doorbells_ok;
}
/**
@@ -1005,6 +960,10 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
}
client->vaddr = vaddr;
+ ret = reserve_doorbell(client);
+ if (ret)
+ goto err_vaddr;
+
client->doorbell_offset = __select_cacheline(guc);
/*
@@ -1017,13 +976,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
else
client->proc_desc_offset = (GUC_DB_SIZE / 2);
- guc_proc_desc_init(guc, client);
- guc_stage_desc_init(guc, client);
-
- ret = reserve_doorbell(client);
- if (ret)
- goto err_vaddr;
-
DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
priority, client, client->engines, client->stage_id);
DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
@@ -1045,7 +997,6 @@ err_client:
static void guc_client_free(struct intel_guc_client *client)
{
unreserve_doorbell(client);
- guc_stage_desc_fini(client->guc, client);
i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
ida_simple_remove(&client->guc->stage_ids, client->stage_id);
kfree(client);
@@ -1112,6 +1063,69 @@ static void guc_clients_destroy(struct intel_guc *guc)
guc_client_free(client);
}
+static int __guc_client_enable(struct intel_guc_client *client)
+{
+ int ret;
+
+ guc_proc_desc_init(client);
+ guc_stage_desc_init(client);
+
+ ret = create_doorbell(client);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ guc_stage_desc_fini(client);
+ guc_proc_desc_fini(client);
+ return ret;
+}
+
+static void __guc_client_disable(struct intel_guc_client *client)
+{
+ /*
+ * By the time we're here, GuC may have already been reset. if that is
+ * the case, instead of trying (in vain) to communicate with it, let's
+ * just cleanup the doorbell HW and our internal state.
+ */
+ if (intel_guc_is_alive(client->guc))
+ destroy_doorbell(client);
+ else
+ __fini_doorbell(client);
+
+ guc_stage_desc_fini(client);
+ guc_proc_desc_fini(client);
+}
+
+static int guc_clients_enable(struct intel_guc *guc)
+{
+ int ret;
+
+ ret = __guc_client_enable(guc->execbuf_client);
+ if (ret)
+ return ret;
+
+ if (guc->preempt_client) {
+ ret = __guc_client_enable(guc->preempt_client);
+ if (ret) {
+ __guc_client_disable(guc->execbuf_client);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void guc_clients_disable(struct intel_guc *guc)
+{
+ if (guc->preempt_client)
+ __guc_client_disable(guc->preempt_client);
+
+ if (guc->execbuf_client)
+ __guc_client_disable(guc->execbuf_client);
+}
+
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
@@ -1295,15 +1309,11 @@ int intel_guc_submission_enable(struct intel_guc *guc)
GEM_BUG_ON(!guc->execbuf_client);
- guc_reset_wq(guc->execbuf_client);
- if (guc->preempt_client)
- guc_reset_wq(guc->preempt_client);
-
err = intel_guc_sample_forcewake(guc);
if (err)
return err;
- err = guc_clients_doorbell_init(guc);
+ err = guc_clients_enable(guc);
if (err)
return err;
@@ -1325,7 +1335,7 @@ void intel_guc_submission_disable(struct intel_guc *guc)
GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
guc_interrupts_release(dev_priv);
- guc_clients_doorbell_fini(guc);
+ guc_clients_disable(guc);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 26e48fc95543..1bf487f94254 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -16,6 +16,62 @@
#define KEY_LOAD_TRIES 5
+static
+bool intel_hdcp_is_ksv_valid(u8 *ksv)
+{
+ int i, ones = 0;
+ /* KSV has 20 1's and 20 0's */
+ for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
+ ones += hweight8(ksv[i]);
+ if (ones != 20)
+ return false;
+
+ return true;
+}
+
+static
+int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
+ const struct intel_hdcp_shim *shim, u8 *bksv)
+{
+ int ret, i, tries = 2;
+
+ /* HDCP spec states that we must retry the bksv if it is invalid */
+ for (i = 0; i < tries; i++) {
+ ret = shim->read_bksv(intel_dig_port, bksv);
+ if (ret)
+ return ret;
+ if (intel_hdcp_is_ksv_valid(bksv))
+ break;
+ }
+ if (i == tries) {
+ DRM_DEBUG_KMS("Bksv is invalid\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* Is HDCP1.4 capable on Platform and Sink */
+bool intel_hdcp_capable(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ const struct intel_hdcp_shim *shim = connector->hdcp.shim;
+ bool capable = false;
+ u8 bksv[5];
+
+ if (!shim)
+ return capable;
+
+ if (shim->hdcp_capable) {
+ shim->hdcp_capable(intel_dig_port, &capable);
+ } else {
+ if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
+ capable = true;
+ }
+
+ return capable;
+}
+
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim)
{
@@ -168,18 +224,6 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
}
static
-bool intel_hdcp_is_ksv_valid(u8 *ksv)
-{
- int i, ones = 0;
- /* KSV has 20 1's and 20 0's */
- for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
- ones += hweight8(ksv[i]);
- if (ones != 20)
- return false;
- return true;
-}
-
-static
int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
@@ -383,7 +427,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE,
HDCP_SHA1_COMPLETE, 1)) {
- DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n");
+ DRM_ERROR("Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
@@ -404,7 +448,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
if (ret) {
- DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
+ DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
return ret;
}
@@ -414,7 +458,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
- DRM_ERROR("Max Topology Limit Exceeded\n");
+ DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
return -EPERM;
}
@@ -450,7 +494,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
}
if (i == tries) {
- DRM_ERROR("V Prime validation failed.(%d)\n", ret);
+ DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
goto err;
}
@@ -499,7 +543,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
if (ret)
return ret;
if (!hdcp_capable) {
- DRM_ERROR("Panel is not HDCP capable\n");
+ DRM_DEBUG_KMS("Panel is not HDCP capable\n");
return -EINVAL;
}
}
@@ -527,18 +571,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
memset(&bksv, 0, sizeof(bksv));
- /* HDCP spec states that we must retry the bksv if it is invalid */
- for (i = 0; i < tries; i++) {
- ret = shim->read_bksv(intel_dig_port, bksv.shim);
- if (ret)
- return ret;
- if (intel_hdcp_is_ksv_valid(bksv.shim))
- break;
- }
- if (i == tries) {
- DRM_ERROR("HDCP failed, Bksv is invalid\n");
- return -ENODEV;
- }
+ ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
+ if (ret < 0)
+ return ret;
I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
@@ -594,8 +629,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
}
if (i == tries) {
- DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
- I915_READ(PORT_HDCP_STATUS(port)));
+ DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
+ I915_READ(PORT_HDCP_STATUS(port)));
return -ETIMEDOUT;
}
@@ -618,14 +653,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
return 0;
}
-static
-struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
-{
- return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
-}
-
static int _intel_hdcp_disable(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
@@ -641,7 +671,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
return -ETIMEDOUT;
}
- ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
+ ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
if (ret) {
DRM_ERROR("Failed to disable HDCP signalling\n");
return ret;
@@ -653,6 +683,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
static int _intel_hdcp_enable(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
int i, ret, tries = 3;
@@ -677,8 +708,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
/* Incase of authentication failures, HDCP spec expects reauth. */
for (i = 0; i < tries; i++) {
- ret = intel_hdcp_auth(conn_to_dig_port(connector),
- connector->hdcp_shim);
+ ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
if (!ret)
return 0;
@@ -688,42 +718,50 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
_intel_hdcp_disable(connector);
}
- DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret);
+ DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
return ret;
}
+static inline
+struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
+{
+ return container_of(hdcp, struct intel_connector, hdcp);
+}
+
static void intel_hdcp_check_work(struct work_struct *work)
{
- struct intel_connector *connector = container_of(to_delayed_work(work),
- struct intel_connector,
- hdcp_check_work);
+ struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
+ struct intel_hdcp,
+ check_work);
+ struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+
if (!intel_hdcp_check_link(connector))
- schedule_delayed_work(&connector->hdcp_check_work,
+ schedule_delayed_work(&hdcp->check_work,
DRM_HDCP_CHECK_PERIOD_MS);
}
static void intel_hdcp_prop_work(struct work_struct *work)
{
- struct intel_connector *connector = container_of(work,
- struct intel_connector,
- hdcp_prop_work);
+ struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
+ prop_work);
+ struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
struct drm_device *dev = connector->base.dev;
struct drm_connector_state *state;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- mutex_lock(&connector->hdcp_mutex);
+ mutex_lock(&hdcp->mutex);
/*
* This worker is only used to flip between ENABLED/DESIRED. Either of
- * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED,
+ * those to UNDESIRED is handled by core. If value == UNDESIRED,
* we're running just after hdcp has been disabled, so just exit
*/
- if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
state = connector->base.state;
- state->content_protection = connector->hdcp_value;
+ state->content_protection = hdcp->value;
}
- mutex_unlock(&connector->hdcp_mutex);
+ mutex_unlock(&hdcp->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
@@ -735,8 +773,9 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
}
int intel_hdcp_init(struct intel_connector *connector,
- const struct intel_hdcp_shim *hdcp_shim)
+ const struct intel_hdcp_shim *shim)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
ret = drm_connector_attach_content_protection_property(
@@ -744,51 +783,53 @@ int intel_hdcp_init(struct intel_connector *connector,
if (ret)
return ret;
- connector->hdcp_shim = hdcp_shim;
- mutex_init(&connector->hdcp_mutex);
- INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work);
- INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work);
+ hdcp->shim = shim;
+ mutex_init(&hdcp->mutex);
+ INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
+ INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
return 0;
}
int intel_hdcp_enable(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- if (!connector->hdcp_shim)
+ if (!hdcp->shim)
return -ENOENT;
- mutex_lock(&connector->hdcp_mutex);
+ mutex_lock(&hdcp->mutex);
ret = _intel_hdcp_enable(connector);
if (ret)
goto out;
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&connector->hdcp_prop_work);
- schedule_delayed_work(&connector->hdcp_check_work,
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ schedule_delayed_work(&hdcp->check_work,
DRM_HDCP_CHECK_PERIOD_MS);
out:
- mutex_unlock(&connector->hdcp_mutex);
+ mutex_unlock(&hdcp->mutex);
return ret;
}
int intel_hdcp_disable(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret = 0;
- if (!connector->hdcp_shim)
+ if (!hdcp->shim)
return -ENOENT;
- mutex_lock(&connector->hdcp_mutex);
+ mutex_lock(&hdcp->mutex);
- if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
ret = _intel_hdcp_disable(connector);
}
- mutex_unlock(&connector->hdcp_mutex);
- cancel_delayed_work_sync(&connector->hdcp_check_work);
+ mutex_unlock(&hdcp->mutex);
+ cancel_delayed_work_sync(&hdcp->check_work);
return ret;
}
@@ -828,17 +869,18 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
/* Implements Part 3 of the HDCP authorization procedure */
int intel_hdcp_check_link(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
int ret = 0;
- if (!connector->hdcp_shim)
+ if (!hdcp->shim)
return -ENOENT;
- mutex_lock(&connector->hdcp_mutex);
+ mutex_lock(&hdcp->mutex);
- if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
goto out;
if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
@@ -846,17 +888,15 @@ int intel_hdcp_check_link(struct intel_connector *connector)
connector->base.name, connector->base.base.id,
I915_READ(PORT_HDCP_STATUS(port)));
ret = -ENXIO;
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&connector->hdcp_prop_work);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
goto out;
}
- if (connector->hdcp_shim->check_link(intel_dig_port)) {
- if (connector->hdcp_value !=
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- connector->hdcp_value =
- DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&connector->hdcp_prop_work);
+ if (hdcp->shim->check_link(intel_dig_port)) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
}
goto out;
}
@@ -867,20 +907,20 @@ int intel_hdcp_check_link(struct intel_connector *connector)
ret = _intel_hdcp_disable(connector);
if (ret) {
DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&connector->hdcp_prop_work);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
goto out;
}
ret = _intel_hdcp_enable(connector);
if (ret) {
- DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&connector->hdcp_prop_work);
+ DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
goto out;
}
out:
- mutex_unlock(&connector->hdcp_mutex);
+ mutex_unlock(&hdcp->mutex);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a2dab0b6bde6..07e803a604bd 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -115,6 +115,8 @@ static u32 hsw_infoframe_enable(unsigned int type)
switch (type) {
case DP_SDP_VSC:
return VIDEO_DIP_ENABLE_VSC_HSW;
+ case DP_SDP_PPS:
+ return VDIP_ENABLE_PPS;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -136,6 +138,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
switch (type) {
case DP_SDP_VSC:
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
+ case DP_SDP_PPS:
+ return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_SPD:
@@ -148,14 +152,25 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
}
}
-static void g4x_write_infoframe(struct drm_encoder *encoder,
+static int hsw_dip_data_size(unsigned int type)
+{
+ switch (type) {
+ case DP_SDP_VSC:
+ return VIDEO_DIP_VSC_DATA_SIZE;
+ case DP_SDP_PPS:
+ return VIDEO_DIP_PPS_DATA_SIZE;
+ default:
+ return VIDEO_DIP_DATA_SIZE;
+ }
+}
+
+static void g4x_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
int i;
@@ -186,31 +201,29 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(VIDEO_DIP_CTL);
}
-static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
+static bool g4x_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
}
-static void ibx_write_infoframe(struct drm_encoder *encoder,
+static void ibx_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -243,11 +256,10 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
-static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
+static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
u32 val = I915_READ(reg);
@@ -255,7 +267,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@@ -263,14 +275,13 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
-static void cpt_write_infoframe(struct drm_encoder *encoder,
+static void cpt_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -306,10 +317,10 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
-static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
+static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
@@ -321,14 +332,13 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
-static void vlv_write_infoframe(struct drm_encoder *encoder,
+static void vlv_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -361,18 +371,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
-static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
+static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@@ -380,21 +389,21 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
-static void hsw_write_infoframe(struct drm_encoder *encoder,
+static void hsw_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
- int data_size = type == DP_SDP_VSC ?
- VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
+ int data_size;
int i;
u32 val = I915_READ(ctl_reg);
+ data_size = hsw_dip_data_size(type);
+
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
@@ -415,10 +424,10 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(ctl_reg);
}
-static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
+static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -443,11 +452,11 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
* trick them by giving an offset into the buffer and moving back the header
* bytes by one.
*/
-static void intel_write_infoframe(struct drm_encoder *encoder,
+static void intel_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
union hdmi_infoframe *frame)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
@@ -457,24 +466,25 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
return;
/* Insert the 'hole' (see big comment above) at position 3 */
- buffer[0] = buffer[1];
- buffer[1] = buffer[2];
- buffer[2] = buffer[3];
+ memmove(&buffer[0], &buffer[1], 3);
buffer[3] = 0;
len++;
- intel_dig_port->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
+ intel_dig_port->write_infoframe(encoder,
+ crtc_state,
+ frame->any.type, buffer, len);
}
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
struct drm_connector *connector = &intel_hdmi->attached_connector->base;
- bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+ bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported ||
+ connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420;
union hdmi_infoframe frame;
int ret;
@@ -486,8 +496,10 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
return;
}
- if (crtc_state->ycbcr420)
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+ else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
@@ -502,10 +514,11 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
conn_state);
/* TODO: handle pixel repetition for YCBCR420 outputs */
- intel_write_infoframe(encoder, crtc_state, &frame);
+ intel_write_infoframe(encoder, crtc_state,
+ &frame);
}
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
+static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
union hdmi_infoframe frame;
@@ -519,11 +532,12 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
frame.spd.sdi = HDMI_SPD_SDI_PC;
- intel_write_infoframe(encoder, crtc_state, &frame);
+ intel_write_infoframe(encoder, crtc_state,
+ &frame);
}
static void
-intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
+intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -536,20 +550,21 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
if (ret < 0)
return;
- intel_write_infoframe(encoder, crtc_state, &frame);
+ intel_write_infoframe(encoder, crtc_state,
+ &frame);
}
-static void g4x_set_infoframes(struct drm_encoder *encoder,
+static void g4x_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+ u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -657,11 +672,11 @@ static bool gcp_default_phase_possible(int pipe_bpp,
mode->crtc_htotal/2 % pixels_per_group == 0);
}
-static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
+static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg;
u32 val = 0;
@@ -689,18 +704,18 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
return val != 0;
}
-static void ibx_set_infoframes(struct drm_encoder *encoder,
+static void ibx_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+ u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -742,14 +757,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
-static void cpt_set_infoframes(struct drm_encoder *encoder,
+static void cpt_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -785,18 +800,17 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
-static void vlv_set_infoframes(struct drm_encoder *encoder,
+static void vlv_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+ u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -838,12 +852,12 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
-static void hsw_set_infoframes(struct drm_encoder *encoder,
+static void hsw_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
u32 val = I915_READ(reg);
@@ -965,13 +979,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
DRM_HDCP_AN_LEN);
if (ret) {
- DRM_ERROR("Write An over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret);
return ret;
}
ret = intel_gmbus_output_aksv(adapter);
if (ret < 0) {
- DRM_ERROR("Failed to output aksv (%d)\n", ret);
+ DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret);
return ret;
}
return 0;
@@ -984,7 +998,7 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret)
- DRM_ERROR("Read Bksv over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret);
return ret;
}
@@ -996,7 +1010,7 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret)
- DRM_ERROR("Read bstatus over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret);
return ret;
}
@@ -1009,7 +1023,7 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
return ret;
}
*repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
@@ -1024,7 +1038,7 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret)
- DRM_ERROR("Read Ri' over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret);
return ret;
}
@@ -1037,7 +1051,7 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
return ret;
}
*ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
@@ -1052,7 +1066,7 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
if (ret) {
- DRM_ERROR("Read ksv fifo over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret);
return ret;
}
return 0;
@@ -1070,7 +1084,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
part, DRM_HDCP_V_PRIME_PART_LEN);
if (ret)
- DRM_ERROR("Read V'[%d] over DDC failed (%d)\n", i, ret);
+ DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret);
return ret;
}
@@ -1217,7 +1231,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
- if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
+ if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
pipe_config->has_infoframe = true;
if (tmp & SDVO_AUDIO_ENABLE)
@@ -1438,7 +1452,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
- intel_dig_port->set_infoframes(&encoder->base, false,
+ intel_dig_port->set_infoframes(encoder,
+ false,
old_crtc_state, old_conn_state);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
@@ -1597,6 +1612,8 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
struct drm_atomic_state *state = crtc_state->base.state;
struct drm_connector_state *connector_state;
struct drm_connector *connector;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int i;
if (HAS_GMCH_DISPLAY(dev_priv))
@@ -1624,7 +1641,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
if (connector_state->crtc != crtc_state->base.crtc)
continue;
- if (crtc_state->ycbcr420) {
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
const struct drm_hdmi_info *hdmi = &info->hdmi;
if (bpc == 12 && !(hdmi->y420_dc_modes &
@@ -1645,7 +1662,14 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
/* Display WA #1139: glk */
if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
- crtc_state->base.adjusted_mode.htotal > 5460)
+ adjusted_mode->htotal > 5460)
+ return false;
+
+ /* Display Wa_1405510057:icl */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ bpc == 10 && IS_ICELAKE(dev_priv) &&
+ (adjusted_mode->crtc_hblank_end -
+ adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
return true;
@@ -1669,7 +1693,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
*clock_12bpc /= 2;
*clock_10bpc /= 2;
*clock_8bpc /= 2;
- config->ycbcr420 = true;
+ config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
/* YCBCR 420 output conversion needs a scaler */
if (skl_update_scaler_crtc(config)) {
@@ -1703,6 +1727,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
if (pipe_config->has_hdmi_sink)
@@ -1973,7 +1998,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
intel_hdmi_prepare(encoder, pipe_config);
- intel_dig_port->set_infoframes(&encoder->base,
+ intel_dig_port->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
}
@@ -1991,7 +2016,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
0x2b247878);
- dport->set_infoframes(&encoder->base,
+ dport->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
@@ -2062,7 +2087,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
/* Use 800mV-0dB */
chv_set_phy_signal_level(encoder, 128, 102, false);
- dport->set_infoframes(&encoder->base,
+ dport->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
@@ -2074,13 +2099,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
chv_phy_release_cl2_override(encoder);
}
+static int
+intel_hdmi_connector_register(struct drm_connector *connector)
+{
+ int ret;
+
+ ret = intel_connector_register(connector);
+ if (ret)
+ return ret;
+
+ i915_debugfs_connector_add(connector);
+
+ return ret;
+}
+
static void intel_hdmi_destroy(struct drm_connector *connector)
{
if (intel_attached_hdmi(connector)->cec_notifier)
cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
- kfree(to_intel_connector(connector)->detect_edid);
- drm_connector_cleanup(connector);
- kfree(connector);
+
+ intel_connector_destroy(connector);
}
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -2089,7 +2127,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_connector_register,
+ .late_register = intel_hdmi_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_hdmi_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -2109,11 +2147,16 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+
+ if (!HAS_GMCH_DISPLAY(dev_priv))
+ drm_connector_attach_max_bpc_property(connector, 8, 12);
}
/*
@@ -2324,9 +2367,18 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
intel_dig_port->set_infoframes = g4x_set_infoframes;
intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
} else if (HAS_DDI(dev_priv)) {
- intel_dig_port->write_infoframe = hsw_write_infoframe;
- intel_dig_port->set_infoframes = hsw_set_infoframes;
- intel_dig_port->infoframe_enabled = hsw_infoframe_enabled;
+ if (intel_dig_port->lspcon.active) {
+ intel_dig_port->write_infoframe =
+ lspcon_write_infoframe;
+ intel_dig_port->set_infoframes = lspcon_set_infoframes;
+ intel_dig_port->infoframe_enabled =
+ lspcon_infoframe_enabled;
+ } else {
+ intel_dig_port->set_infoframes = hsw_set_infoframes;
+ intel_dig_port->infoframe_enabled =
+ hsw_infoframe_enabled;
+ intel_dig_port->write_infoframe = hsw_write_infoframe;
+ }
} else if (HAS_PCH_IBX(dev_priv)) {
intel_dig_port->write_infoframe = ibx_write_infoframe;
intel_dig_port->set_infoframes = ibx_set_infoframes;
@@ -2485,5 +2537,6 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_infoframe_init(intel_dig_port);
+ intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 648a13c6043c..e24174d08fed 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -114,51 +114,68 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
/**
- * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
* @dev_priv: private driver data pointer
* @pin: the pin to gather stats on
+ * @long_hpd: whether the HPD IRQ was long or short
*
- * Gather stats about HPD irqs from the specified @pin, and detect irq
+ * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
* storms. Only the pin specific stats and state are changed, the caller is
* responsible for further action.
*
- * The number of irqs that are allowed within @HPD_STORM_DETECT_PERIOD is
+ * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
* stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
- * @HPD_STORM_DEFAULT_THRESHOLD. If this threshold is exceeded, it's
- * considered an irq storm and the irq state is set to @HPD_MARK_DISABLED.
+ * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
+ * short IRQs count as +1. If this threshold is exceeded, it's considered an
+ * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
+ *
+ * By default, most systems will only count long IRQs towards
+ * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
+ * suffer from short IRQ storms and must also track these. Because short IRQ
+ * storms are naturally caused by sideband interactions with DP MST devices,
+ * short IRQ detection is only enabled for systems without DP MST support.
+ * Systems which are new enough to support DP MST are far less likely to
+ * suffer from IRQ storms at all, so this is fine.
*
* The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
* and should only be adjusted for automated hotplug testing.
*
- * Return true if an irq storm was detected on @pin.
+ * Return true if an IRQ storm was detected on @pin.
*/
static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
- enum hpd_pin pin)
+ enum hpd_pin pin, bool long_hpd)
{
- unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
+ struct i915_hotplug *hpd = &dev_priv->hotplug;
+ unsigned long start = hpd->stats[pin].last_jiffies;
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
- const int threshold = dev_priv->hotplug.hpd_storm_threshold;
+ const int increment = long_hpd ? 10 : 1;
+ const int threshold = hpd->hpd_storm_threshold;
bool storm = false;
+ if (!threshold ||
+ (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
+ return false;
+
if (!time_in_range(jiffies, start, end)) {
- dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
- dev_priv->hotplug.stats[pin].count = 0;
- DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
- } else if (dev_priv->hotplug.stats[pin].count > threshold &&
- threshold) {
- dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
+ hpd->stats[pin].last_jiffies = jiffies;
+ hpd->stats[pin].count = 0;
+ }
+
+ hpd->stats[pin].count += increment;
+ if (hpd->stats[pin].count > threshold) {
+ hpd->stats[pin].state = HPD_MARK_DISABLED;
DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
} else {
- dev_priv->hotplug.stats[pin].count++;
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
- dev_priv->hotplug.stats[pin].count);
+ hpd->stats[pin].count);
}
return storm;
}
-static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
+static void
+intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct intel_connector *intel_connector;
@@ -228,7 +245,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_connector *intel_connector = to_intel_connector(connector);
- if (intel_connector->encoder->hpd_pin == pin) {
+ /* Don't check MST ports, they don't have pins */
+ if (!intel_connector->mst_port &&
+ intel_connector->encoder->hpd_pin == pin) {
if (connector->polled != intel_connector->polled)
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
connector->name);
@@ -346,8 +365,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
hpd_event_bits = dev_priv->hotplug.event_bits;
dev_priv->hotplug.event_bits = 0;
- /* Disable hotplug on connectors that hit an irq storm. */
- intel_hpd_irq_storm_disable(dev_priv);
+ /* Enable polling for connectors which had HPD IRQ storms */
+ intel_hpd_irq_storm_switch_to_polling(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -395,37 +414,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
struct intel_encoder *encoder;
bool storm_detected = false;
bool queue_dig = false, queue_hp = false;
+ u32 long_hpd_pulse_mask = 0;
+ u32 short_hpd_pulse_mask = 0;
+ enum hpd_pin pin;
if (!pin_mask)
return;
spin_lock(&dev_priv->irq_lock);
+
+ /*
+ * Determine whether ->hpd_pulse() exists for each pin, and
+ * whether we have a short or a long pulse. This is needed
+ * as each pin may have up to two encoders (HDMI and DP) and
+ * only the one of them (DP) will have ->hpd_pulse().
+ */
for_each_intel_encoder(&dev_priv->drm, encoder) {
- enum hpd_pin pin = encoder->hpd_pin;
bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
+ enum port port = encoder->port;
+ bool long_hpd;
+ pin = encoder->hpd_pin;
if (!(BIT(pin) & pin_mask))
continue;
- if (has_hpd_pulse) {
- bool long_hpd = long_mask & BIT(pin);
- enum port port = encoder->port;
+ if (!has_hpd_pulse)
+ continue;
- DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
- long_hpd ? "long" : "short");
- /*
- * For long HPD pulses we want to have the digital queue happen,
- * but we still want HPD storm detection to function.
- */
- queue_dig = true;
- if (long_hpd) {
- dev_priv->hotplug.long_port_mask |= (1 << port);
- } else {
- /* for short HPD just trigger the digital queue */
- dev_priv->hotplug.short_port_mask |= (1 << port);
- continue;
- }
+ long_hpd = long_mask & BIT(pin);
+
+ DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+ long_hpd ? "long" : "short");
+ queue_dig = true;
+
+ if (long_hpd) {
+ long_hpd_pulse_mask |= BIT(pin);
+ dev_priv->hotplug.long_port_mask |= BIT(port);
+ } else {
+ short_hpd_pulse_mask |= BIT(pin);
+ dev_priv->hotplug.short_port_mask |= BIT(port);
}
+ }
+
+ /* Now process each pin just once */
+ for_each_hpd_pin(pin) {
+ bool long_hpd;
+
+ if (!(BIT(pin) & pin_mask))
+ continue;
if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
/*
@@ -442,17 +478,30 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
continue;
- if (!has_hpd_pulse) {
+ /*
+ * Delegate to ->hpd_pulse() if one of the encoders for this
+ * pin has it, otherwise let the hotplug_work deal with this
+ * pin directly.
+ */
+ if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
+ long_hpd = long_hpd_pulse_mask & BIT(pin);
+ } else {
dev_priv->hotplug.event_bits |= BIT(pin);
+ long_hpd = true;
queue_hp = true;
}
- if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
+ if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
dev_priv->hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
+ queue_hp = true;
}
}
+ /*
+ * Disable any IRQs that storms were detected on. Polling enablement
+ * happens later in our hotplug work.
+ */
if (storm_detected && dev_priv->display_irqs_enabled)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 37ef540dd280..bc27b691d824 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -108,13 +108,14 @@ fail:
* This function reads status register to verify if HuC
* firmware was successfully loaded.
*
- * Returns positive value if HuC firmware is loaded and verified
- * and -ENODEV if HuC is not present.
+ * Returns: 1 if HuC firmware is loaded and verified,
+ * 0 if HuC firmware is not loaded and -ENODEV if HuC
+ * is not present on this platform.
*/
int intel_huc_check_status(struct intel_huc *huc)
{
struct drm_i915_private *dev_priv = huc_to_i915(huc);
- u32 status;
+ bool status;
if (!HAS_HUC(dev_priv))
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 33d87ab93fdd..802d0394ccc4 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -817,7 +817,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index cdf19553ffac..5d5336fbe7b0 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
lpe_audio_platdev_destroy(dev_priv);
irq_free_desc(dev_priv->lpe_audio.irq);
-}
+ dev_priv->lpe_audio.irq = -1;
+ dev_priv->lpe_audio.platdev = NULL;
+}
/**
* intel_lpe_audio_notify() - notify lpe audio event
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 43957bb37a42..d7fa301b5ec7 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -259,63 +259,6 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
ce->lrc_desc = desc;
}
-static struct i915_priolist *
-lookup_priolist(struct intel_engine_cs *engine, int prio)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_priolist *p;
- struct rb_node **parent, *rb;
- bool first = true;
-
- if (unlikely(execlists->no_priolist))
- prio = I915_PRIORITY_NORMAL;
-
-find_priolist:
- /* most positive priority is scheduled first, equal priorities fifo */
- rb = NULL;
- parent = &execlists->queue.rb_root.rb_node;
- while (*parent) {
- rb = *parent;
- p = to_priolist(rb);
- if (prio > p->priority) {
- parent = &rb->rb_left;
- } else if (prio < p->priority) {
- parent = &rb->rb_right;
- first = false;
- } else {
- return p;
- }
- }
-
- if (prio == I915_PRIORITY_NORMAL) {
- p = &execlists->default_priolist;
- } else {
- p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
- /* Convert an allocation failure to a priority bump */
- if (unlikely(!p)) {
- prio = I915_PRIORITY_NORMAL; /* recurses just once */
-
- /* To maintain ordering with all rendering, after an
- * allocation failure we have to disable all scheduling.
- * Requests will then be executed in fifo, and schedule
- * will ensure that dependencies are emitted in fifo.
- * There will be still some reordering with existing
- * requests, so if userspace lied about their
- * dependencies that reordering may be visible.
- */
- execlists->no_priolist = true;
- goto find_priolist;
- }
- }
-
- p->priority = prio;
- INIT_LIST_HEAD(&p->requests);
- rb_link_node(&p->node, rb, parent);
- rb_insert_color_cached(&p->node, &execlists->queue, first);
-
- return p;
-}
-
static void unwind_wa_tail(struct i915_request *rq)
{
rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
@@ -324,9 +267,9 @@ static void unwind_wa_tail(struct i915_request *rq)
static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
- struct i915_request *rq, *rn;
- struct i915_priolist *uninitialized_var(p);
- int last_prio = I915_PRIORITY_INVALID;
+ struct i915_request *rq, *rn, *active = NULL;
+ struct list_head *uninitialized_var(pl);
+ int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
lockdep_assert_held(&engine->timeline.lock);
@@ -334,19 +277,34 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
&engine->timeline.requests,
link) {
if (i915_request_completed(rq))
- return;
+ break;
__i915_request_unsubmit(rq);
unwind_wa_tail(rq);
+ GEM_BUG_ON(rq->hw_context->active);
+
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
- if (rq_prio(rq) != last_prio) {
- last_prio = rq_prio(rq);
- p = lookup_priolist(engine, last_prio);
+ if (rq_prio(rq) != prio) {
+ prio = rq_prio(rq);
+ pl = i915_sched_lookup_priolist(engine, prio);
}
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+
+ list_add(&rq->sched.link, pl);
- GEM_BUG_ON(p->priority != rq_prio(rq));
- list_add(&rq->sched.link, &p->requests);
+ active = rq;
+ }
+
+ /*
+ * The active request is now effectively the start of a new client
+ * stream, so give it the equivalent small priority bump to prevent
+ * it being gazumped a second time by another peer.
+ */
+ if (!(prio & I915_PRIORITY_NEWCLIENT)) {
+ prio |= I915_PRIORITY_NEWCLIENT;
+ list_move_tail(&active->sched.link,
+ i915_sched_lookup_priolist(engine, prio));
}
}
@@ -355,13 +313,8 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
{
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
- unsigned long flags;
-
- spin_lock_irqsave(&engine->timeline.lock, flags);
__unwind_incomplete_requests(engine);
-
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static inline void
@@ -394,13 +347,17 @@ execlists_user_end(struct intel_engine_execlists *execlists)
static inline void
execlists_context_schedule_in(struct i915_request *rq)
{
+ GEM_BUG_ON(rq->hw_context->active);
+
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(rq->engine);
+ rq->hw_context->active = rq->engine;
}
static inline void
execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
{
+ rq->hw_context->active = NULL;
intel_engine_context_out(rq->engine);
execlists_context_status_change(rq, status);
trace_i915_request_out(rq);
@@ -417,21 +374,32 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
static u64 execlists_update_context(struct i915_request *rq)
{
+ struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
struct intel_context *ce = rq->hw_context;
- struct i915_hw_ppgtt *ppgtt =
- rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
- /* True 32b PPGTT with dynamic page allocation: update PDP
+ /*
+ * True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page.
* PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode.
*/
- if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
+ if (!i915_vm_is_48bit(&ppgtt->vm))
execlists_update_context_pdps(ppgtt, reg_state);
+ /*
+ * Make sure the context image is complete before we submit it to HW.
+ *
+ * Ostensibly, writes (including the WCB) should be flushed prior to
+ * an uncached write such as our mmio register access, the empirical
+ * evidence (esp. on Braswell) suggests that the WC write into memory
+ * may not be visible to the HW prior to the completion of the UC
+ * register write and that we may begin execution from the context
+ * before its image is complete leading to invalid PD chasing.
+ */
+ wmb();
return ce->lrc_desc;
}
@@ -669,8 +637,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
+ int i;
- list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+ priolist_for_each_request_consume(rq, rn, p, i) {
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@@ -689,11 +658,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* combine this request with the last, then we
* are done.
*/
- if (port == last_port) {
- __list_del_many(&p->requests,
- &rq->sched.link);
+ if (port == last_port)
goto done;
- }
/*
* If GVT overrides us we only ever submit
@@ -703,11 +669,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* request) to the second port.
*/
if (ctx_single_port_submission(last->hw_context) ||
- ctx_single_port_submission(rq->hw_context)) {
- __list_del_many(&p->requests,
- &rq->sched.link);
+ ctx_single_port_submission(rq->hw_context))
goto done;
- }
GEM_BUG_ON(last->hw_context == rq->hw_context);
@@ -718,15 +681,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(port_isset(port));
}
- INIT_LIST_HEAD(&rq->sched.link);
+ list_del_init(&rq->sched.link);
+
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
+
last = rq;
submit = true;
}
rb_erase_cached(&p->node, &execlists->queue);
- INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
@@ -803,6 +767,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
static void reset_csb_pointers(struct intel_engine_execlists *execlists)
{
+ const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
+
/*
* After a reset, the HW starts writing into CSB entry [0]. We
* therefore have to set our HEAD pointer back one entry so that
@@ -812,8 +778,8 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
* inline comparison of our cached head position against the last HW
* write works even before the first interrupt.
*/
- execlists->csb_head = execlists->csb_write_reset;
- WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset);
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
}
static void nop_submission_tasklet(unsigned long data)
@@ -854,27 +820,34 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline.requests, link) {
GEM_BUG_ON(!rq->global_seqno);
- if (!i915_request_completed(rq))
- dma_fence_set_error(&rq->fence, -EIO);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ continue;
+
+ dma_fence_set_error(&rq->fence, -EIO);
}
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
+ int i;
- list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
- INIT_LIST_HEAD(&rq->sched.link);
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ list_del_init(&rq->sched.link);
dma_fence_set_error(&rq->fence, -EIO);
__i915_request_submit(rq);
}
rb_erase_cached(&p->node, &execlists->queue);
- INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
+ intel_write_status_page(engine,
+ I915_GEM_HWS_INDEX,
+ intel_engine_last_submit(engine));
+
/* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority = INT_MIN;
@@ -1076,13 +1049,7 @@ static void queue_request(struct intel_engine_cs *engine,
struct i915_sched_node *node,
int prio)
{
- list_add_tail(&node->link,
- &lookup_priolist(engine, prio)->requests);
-}
-
-static void __update_queue(struct intel_engine_cs *engine, int prio)
-{
- engine->execlists.queue_priority = prio;
+ list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
}
static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -1101,7 +1068,7 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
static void submit_queue(struct intel_engine_cs *engine, int prio)
{
if (prio > engine->execlists.queue_priority) {
- __update_queue(engine, prio);
+ engine->execlists.queue_priority = prio;
__submit_queue_imm(engine);
}
}
@@ -1124,139 +1091,6 @@ static void execlists_submit_request(struct i915_request *request)
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-static struct i915_request *sched_to_request(struct i915_sched_node *node)
-{
- return container_of(node, struct i915_request, sched);
-}
-
-static struct intel_engine_cs *
-sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
-{
- struct intel_engine_cs *engine = sched_to_request(node)->engine;
-
- GEM_BUG_ON(!locked);
-
- if (engine != locked) {
- spin_unlock(&locked->timeline.lock);
- spin_lock(&engine->timeline.lock);
- }
-
- return engine;
-}
-
-static void execlists_schedule(struct i915_request *request,
- const struct i915_sched_attr *attr)
-{
- struct i915_priolist *uninitialized_var(pl);
- struct intel_engine_cs *engine, *last;
- struct i915_dependency *dep, *p;
- struct i915_dependency stack;
- const int prio = attr->priority;
- LIST_HEAD(dfs);
-
- GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
-
- if (i915_request_completed(request))
- return;
-
- if (prio <= READ_ONCE(request->sched.attr.priority))
- return;
-
- /* Need BKL in order to use the temporary link inside i915_dependency */
- lockdep_assert_held(&request->i915->drm.struct_mutex);
-
- stack.signaler = &request->sched;
- list_add(&stack.dfs_link, &dfs);
-
- /*
- * Recursively bump all dependent priorities to match the new request.
- *
- * A naive approach would be to use recursion:
- * static void update_priorities(struct i915_sched_node *node, prio) {
- * list_for_each_entry(dep, &node->signalers_list, signal_link)
- * update_priorities(dep->signal, prio)
- * queue_request(node);
- * }
- * but that may have unlimited recursion depth and so runs a very
- * real risk of overunning the kernel stack. Instead, we build
- * a flat list of all dependencies starting with the current request.
- * As we walk the list of dependencies, we add all of its dependencies
- * to the end of the list (this may include an already visited
- * request) and continue to walk onwards onto the new dependencies. The
- * end result is a topological list of requests in reverse order, the
- * last element in the list is the request we must execute first.
- */
- list_for_each_entry(dep, &dfs, dfs_link) {
- struct i915_sched_node *node = dep->signaler;
-
- /*
- * Within an engine, there can be no cycle, but we may
- * refer to the same dependency chain multiple times
- * (redundant dependencies are not eliminated) and across
- * engines.
- */
- list_for_each_entry(p, &node->signalers_list, signal_link) {
- GEM_BUG_ON(p == dep); /* no cycles! */
-
- if (i915_sched_node_signaled(p->signaler))
- continue;
-
- GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
- if (prio > READ_ONCE(p->signaler->attr.priority))
- list_move_tail(&p->dfs_link, &dfs);
- }
- }
-
- /*
- * If we didn't need to bump any existing priorities, and we haven't
- * yet submitted this request (i.e. there is no potential race with
- * execlists_submit_request()), we can set our own priority and skip
- * acquiring the engine locks.
- */
- if (request->sched.attr.priority == I915_PRIORITY_INVALID) {
- GEM_BUG_ON(!list_empty(&request->sched.link));
- request->sched.attr = *attr;
- if (stack.dfs_link.next == stack.dfs_link.prev)
- return;
- __list_del_entry(&stack.dfs_link);
- }
-
- last = NULL;
- engine = request->engine;
- spin_lock_irq(&engine->timeline.lock);
-
- /* Fifo and depth-first replacement ensure our deps execute before us */
- list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
- struct i915_sched_node *node = dep->signaler;
-
- INIT_LIST_HEAD(&dep->dfs_link);
-
- engine = sched_lock_engine(node, engine);
-
- if (prio <= node->attr.priority)
- continue;
-
- node->attr.priority = prio;
- if (!list_empty(&node->link)) {
- if (last != engine) {
- pl = lookup_priolist(engine, prio);
- last = engine;
- }
- GEM_BUG_ON(pl->priority != prio);
- list_move_tail(&node->link, &pl->requests);
- }
-
- if (prio > engine->execlists.queue_priority &&
- i915_sw_fence_done(&sched_to_request(node)->submit)) {
- /* defer submission until after all of our updates */
- __update_queue(engine, prio);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- }
- }
-
- spin_unlock_irq(&engine->timeline.lock);
-}
-
static void execlists_context_destroy(struct intel_context *ce)
{
GEM_BUG_ON(ce->pin_count);
@@ -1272,6 +1106,28 @@ static void execlists_context_destroy(struct intel_context *ce)
static void execlists_context_unpin(struct intel_context *ce)
{
+ struct intel_engine_cs *engine;
+
+ /*
+ * The tasklet may still be using a pointer to our state, via an
+ * old request. However, since we know we only unpin the context
+ * on retirement of the following request, we know that the last
+ * request referencing us will have had a completion CS interrupt.
+ * If we see that it is still active, it means that the tasklet hasn't
+ * had the chance to run yet; let it run before we teardown the
+ * reference it may use.
+ */
+ engine = READ_ONCE(ce->active);
+ if (unlikely(engine)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ process_csb(engine);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+ GEM_BUG_ON(READ_ONCE(ce->active));
+ }
+
i915_gem_context_unpin_hw_id(ce->gem_context);
intel_ring_unpin(ce->ring);
@@ -1375,6 +1231,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+ GEM_BUG_ON(!ctx->ppgtt);
if (likely(ce->pin_count++))
return ce;
@@ -1431,9 +1288,10 @@ static int execlists_request_alloc(struct i915_request *request)
static u32 *
gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
{
+ /* NB no one else is allowed to scribble over scratch + 256! */
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
*batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1447,7 +1305,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
return batch;
@@ -1484,7 +1342,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch) +
+ i915_scratch_offset(engine->i915) +
2 * CACHELINE_BYTES);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1553,18 +1411,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
- /* WaClearSlmSpaceAtContextSwitch:kbl */
- /* Actual scratch location is at 128 bytes offset */
- if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
- batch = gen8_emit_pipe_control(batch,
- PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch)
- + 2 * CACHELINE_BYTES);
- }
-
/* WaMediaPoolStateCmdInWABB:bxt,glk */
if (HAS_POOLED_EU(engine->i915)) {
/*
@@ -1679,7 +1525,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
unsigned int i;
int ret;
- if (GEM_WARN_ON(engine->id != RCS))
+ if (GEM_DEBUG_WARN_ON(engine->id != RCS))
return -EINVAL;
switch (INTEL_GEN(engine->i915)) {
@@ -1718,8 +1564,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
*/
for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
wa_bb[i]->offset = batch_ptr - batch;
- if (GEM_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
- CACHELINE_BYTES))) {
+ if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
+ CACHELINE_BYTES))) {
ret = -EINVAL;
break;
}
@@ -1781,6 +1627,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
+ intel_engine_apply_workarounds(engine);
+
intel_mocs_init_engine(engine);
intel_engine_reset_breadcrumbs(engine);
@@ -1805,7 +1653,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
+ intel_engine_apply_whitelist(engine);
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
@@ -1828,7 +1676,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
+ intel_engine_apply_whitelist(engine);
return 0;
}
@@ -1902,7 +1750,7 @@ static void execlists_reset(struct intel_engine_cs *engine,
unsigned long flags;
u32 *regs;
- GEM_TRACE("%s request global=%x, current=%d\n",
+ GEM_TRACE("%s request global=%d, current=%d\n",
engine->name, request ? request->global_seqno : 0,
intel_engine_get_seqno(engine));
@@ -2029,8 +1877,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
* it is unsafe in case of lite-restore (because the ctx is
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
- if (rq->gem_context->ppgtt &&
- (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
+ if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
!i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
!intel_vgpu_active(rq->i915)) {
ret = intel_logical_ring_emit_pdps(rq);
@@ -2109,7 +1956,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
if (mode & EMIT_INVALIDATE) {
cmd |= MI_INVALIDATE_TLB;
- if (request->engine->id == VCS)
+ if (request->engine->class == VIDEO_DECODE_CLASS)
cmd |= MI_INVALIDATE_BSD;
}
@@ -2127,7 +1974,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
{
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr =
- i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@@ -2241,7 +2088,7 @@ static int gen8_init_rcs_context(struct i915_request *rq)
{
int ret;
- ret = intel_ctx_workarounds_emit(rq);
+ ret = intel_engine_emit_ctx_wa(rq);
if (ret)
return ret;
@@ -2294,7 +2141,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
engine->cancel_requests = execlists_cancel_requests;
- engine->schedule = execlists_schedule;
+ engine->schedule = i915_schedule;
engine->execlists.tasklet.func = execlists_submission_tasklet;
engine->reset.prepare = execlists_reset_prepare;
@@ -2382,12 +2229,6 @@ logical_ring_setup(struct intel_engine_cs *engine)
logical_ring_default_irqs(engine);
}
-static bool csb_force_mmio(struct drm_i915_private *i915)
-{
- /* Older GVT emulation depends upon intercepting CSB mmio */
- return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915);
-}
-
static int logical_ring_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -2417,24 +2258,12 @@ static int logical_ring_init(struct intel_engine_cs *engine)
upper_32_bits(ce->lrc_desc);
}
- execlists->csb_read =
- i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
- if (csb_force_mmio(i915)) {
- execlists->csb_status = (u32 __force *)
- (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+ execlists->csb_status =
+ &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
- execlists->csb_write = (u32 __force *)execlists->csb_read;
- execlists->csb_write_reset =
- _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK,
- GEN8_CSB_ENTRIES - 1);
- } else {
- execlists->csb_status =
- &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+ execlists->csb_write =
+ &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
- execlists->csb_write =
- &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
- execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1;
- }
reset_csb_pointers(execlists);
return 0;
@@ -2464,10 +2293,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
if (ret)
return ret;
- ret = intel_engine_create_scratch(engine, PAGE_SIZE);
- if (ret)
- goto err_cleanup_common;
-
ret = intel_init_workaround_bb(engine);
if (ret) {
/*
@@ -2479,11 +2304,10 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
- return 0;
+ intel_engine_init_whitelist(engine);
+ intel_engine_init_workarounds(engine);
-err_cleanup_common:
- intel_engine_cleanup_common(engine);
- return ret;
+ return 0;
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)
@@ -2632,7 +2456,6 @@ static void execlists_init_reg_state(u32 *regs,
struct intel_ring *ring)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
u32 base = engine->mmio_base;
bool rcs = engine->class == RENDER_CLASS;
@@ -2704,12 +2527,12 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
- if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
+ if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
*/
- ASSIGN_CTX_PML4(ppgtt, regs);
+ ASSIGN_CTX_PML4(ctx->ppgtt, regs);
}
if (rcs) {
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 3e085c5f2b81..96a8d9524b0c 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -27,6 +27,22 @@
#include <drm/drm_dp_dual_mode_helper.h>
#include "intel_drv.h"
+/* LSPCON OUI Vendor ID(signatures) */
+#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
+#define LSPCON_VENDOR_MCA_OUI 0x0060AD
+
+/* AUX addresses to write MCA AVI IF */
+#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
+#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
+#define LSPCON_MCA_AVI_IF_KICKOFF (1 << 0)
+#define LSPCON_MCA_AVI_IF_HANDLED (1 << 1)
+
+/* AUX addresses to write Parade AVI IF */
+#define LSPCON_PARADE_AVI_IF_WRITE_OFFSET 0x516
+#define LSPCON_PARADE_AVI_IF_CTRL 0x51E
+#define LSPCON_PARADE_AVI_IF_KICKOFF (1 << 7)
+#define LSPCON_PARADE_AVI_IF_DATA_SIZE 32
+
static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
{
struct intel_digital_port *dig_port =
@@ -50,6 +66,40 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode)
}
}
+static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+ struct drm_dp_dpcd_ident *ident;
+ u32 vendor_oui;
+
+ if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) {
+ DRM_ERROR("Can't read description\n");
+ return false;
+ }
+
+ ident = &dp->desc.ident;
+ vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) |
+ ident->oui[2];
+
+ switch (vendor_oui) {
+ case LSPCON_VENDOR_MCA_OUI:
+ lspcon->vendor = LSPCON_VENDOR_MCA;
+ DRM_DEBUG_KMS("Vendor: Mega Chips\n");
+ break;
+
+ case LSPCON_VENDOR_PARADE_OUI:
+ lspcon->vendor = LSPCON_VENDOR_PARADE;
+ DRM_DEBUG_KMS("Vendor: Parade Tech\n");
+ break;
+
+ default:
+ DRM_ERROR("Invalid/Unknown vendor OUI\n");
+ return false;
+ }
+
+ return true;
+}
+
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode current_mode;
@@ -130,6 +180,21 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
return true;
}
+void lspcon_ycbcr420_config(struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_info *info = &connector->display_info;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+
+ if (drm_mode_is_420_only(info, adjusted_mode) &&
+ connector->ycbcr_420_allowed) {
+ crtc_state->port_clock /= 2;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+ crtc_state->lspcon_downsampling = true;
+ }
+}
+
static bool lspcon_probe(struct intel_lspcon *lspcon)
{
int retry;
@@ -159,7 +224,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
/* Yay ... got a LSPCON device */
DRM_DEBUG_KMS("LSPCON detected\n");
lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
- lspcon->active = true;
+
+ /*
+ * In the SW state machine, lets Put LSPCON in PCON mode only.
+ * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
+ * 2.0 sinks.
+ */
+ if (lspcon->mode != DRM_LSPCON_MODE_PCON) {
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
+ DRM_ERROR("LSPCON mode change to PCON failed\n");
+ return false;
+ }
+ }
return true;
}
@@ -185,6 +261,255 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
}
+static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
+{
+ u8 avi_if_ctrl;
+ u8 retry;
+ ssize_t ret;
+
+ /* Check if LSPCON FW is ready for data */
+ for (retry = 0; retry < 5; retry++) {
+ if (retry)
+ usleep_range(200, 300);
+
+ ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL,
+ &avi_if_ctrl, 1);
+ if (ret < 0) {
+ DRM_ERROR("Failed to read AVI IF control\n");
+ return false;
+ }
+
+ if ((avi_if_ctrl & LSPCON_PARADE_AVI_IF_KICKOFF) == 0)
+ return true;
+ }
+
+ DRM_ERROR("Parade FW not ready to accept AVI IF\n");
+ return false;
+}
+
+static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
+ uint8_t *avi_buf)
+{
+ u8 avi_if_ctrl;
+ u8 block_count = 0;
+ u8 *data;
+ uint16_t reg;
+ ssize_t ret;
+
+ while (block_count < 4) {
+ if (!lspcon_parade_fw_ready(aux)) {
+ DRM_DEBUG_KMS("LSPCON FW not ready, block %d\n",
+ block_count);
+ return false;
+ }
+
+ reg = LSPCON_PARADE_AVI_IF_WRITE_OFFSET;
+ data = avi_buf + block_count * 8;
+ ret = drm_dp_dpcd_write(aux, reg, data, 8);
+ if (ret < 0) {
+ DRM_ERROR("Failed to write AVI IF block %d\n",
+ block_count);
+ return false;
+ }
+
+ /*
+ * Once a block of data is written, we have to inform the FW
+ * about this by writing into avi infoframe control register:
+ * - set the kickoff bit[7] to 1
+ * - write the block no. to bits[1:0]
+ */
+ reg = LSPCON_PARADE_AVI_IF_CTRL;
+ avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count;
+ ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1);
+ if (ret < 0) {
+ DRM_ERROR("Failed to update (0x%x), block %d\n",
+ reg, block_count);
+ return false;
+ }
+
+ block_count++;
+ }
+
+ DRM_DEBUG_KMS("Wrote AVI IF blocks successfully\n");
+ return true;
+}
+
+static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
+ const uint8_t *frame,
+ ssize_t len)
+{
+ uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
+
+ /*
+ * Parade's frames contains 32 bytes of data, divided
+ * into 4 frames:
+ * Token byte (first byte of first frame, must be non-zero)
+ * HB0 to HB2 from AVI IF (3 bytes header)
+ * PB0 to PB27 from AVI IF (28 bytes data)
+ * So it should look like this
+ * first block: | <token> <HB0-HB2> <DB0-DB3> |
+ * next 3 blocks: |<DB4-DB11>|<DB12-DB19>|<DB20-DB28>|
+ */
+
+ if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) {
+ DRM_ERROR("Invalid length of infoframes\n");
+ return false;
+ }
+
+ memcpy(&avi_if[1], frame, len);
+
+ if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) {
+ DRM_DEBUG_KMS("Failed to write infoframe blocks\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux,
+ const uint8_t *buffer, ssize_t len)
+{
+ int ret;
+ uint32_t val = 0;
+ uint32_t retry;
+ uint16_t reg;
+ const uint8_t *data = buffer;
+
+ reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET;
+ while (val < len) {
+ /* DPCD write for AVI IF can fail on a slow FW day, so retry */
+ for (retry = 0; retry < 5; retry++) {
+ ret = drm_dp_dpcd_write(aux, reg, (void *)data, 1);
+ if (ret == 1) {
+ break;
+ } else if (retry < 4) {
+ mdelay(50);
+ continue;
+ } else {
+ DRM_ERROR("DPCD write failed at:0x%x\n", reg);
+ return false;
+ }
+ }
+ val++; reg++; data++;
+ }
+
+ val = 0;
+ reg = LSPCON_MCA_AVI_IF_CTRL;
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ /* Indicate LSPCON chip about infoframe, clear bit 1 and set bit 0 */
+ val &= ~LSPCON_MCA_AVI_IF_HANDLED;
+ val |= LSPCON_MCA_AVI_IF_KICKOFF;
+
+ ret = drm_dp_dpcd_write(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ val = 0;
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ if (val == LSPCON_MCA_AVI_IF_HANDLED)
+ DRM_DEBUG_KMS("AVI IF handled by FW\n");
+
+ return true;
+}
+
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
+{
+ bool ret;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
+
+ /* LSPCON only needs AVI IF */
+ if (type != HDMI_INFOFRAME_TYPE_AVI)
+ return;
+
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
+ frame, len);
+ else
+ ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
+ frame, len);
+
+ if (!ret) {
+ DRM_ERROR("Failed to write AVI infoframes\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
+}
+
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ ssize_t ret;
+ union hdmi_infoframe frame;
+ uint8_t buf[VIDEO_DIP_DATA_SIZE];
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct drm_connector *connector = &intel_dp->attached_connector->base;
+ const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode;
+ bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+
+ if (!lspcon->active) {
+ DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
+ return;
+ }
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ mode, is_hdmi2_sink);
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill AVI infoframe\n");
+ return;
+ }
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
+ if (crtc_state->lspcon_downsampling)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+ else
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+ } else {
+ frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+ }
+
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL,
+ false, is_hdmi2_sink);
+
+ ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
+ if (ret < 0) {
+ DRM_ERROR("Failed to pack AVI IF\n");
+ return;
+ }
+
+ dig_port->write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI,
+ buf, ret);
+}
+
+bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ return enc_to_intel_lspcon(&encoder->base)->active;
+}
+
void lspcon_resume(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode expected_mode;
@@ -216,6 +541,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_connector *connector = &dp->attached_connector->base;
if (!HAS_LSPCON(dev_priv)) {
DRM_ERROR("LSPCON is not supported on this platform\n");
@@ -230,25 +556,18 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
return false;
}
- /*
- * In the SW state machine, lets Put LSPCON in PCON mode only.
- * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
- * 2.0 sinks.
- */
- if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
- if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
- DRM_ERROR("LSPCON mode change to PCON failed\n");
- return false;
- }
- }
-
if (!intel_dp_read_dpcd(dp)) {
DRM_ERROR("LSPCON DPCD read failed\n");
return false;
}
- drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd));
+ if (!lspcon_detect_vendor(lspcon)) {
+ DRM_ERROR("LSPCON vendor detection failed\n");
+ return false;
+ }
+ connector->ycbcr_420_allowed = true;
+ lspcon->active = true;
DRM_DEBUG_KMS("Success: LSPCON init\n");
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f9f3b0885ba5..e6c5d985ea0a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -42,10 +42,6 @@
#include <linux/acpi.h>
/* Private structure for the integrated LVDS support */
-struct intel_lvds_connector {
- struct intel_connector base;
-};
-
struct intel_lvds_pps {
/* 100us units */
int t1_t2;
@@ -70,7 +66,7 @@ struct intel_lvds_encoder {
struct intel_lvds_pps init_pps;
u32 init_lvds_val;
- struct intel_lvds_connector *attached_connector;
+ struct intel_connector *attached_connector;
};
static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
@@ -78,11 +74,6 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct intel_lvds_encoder, base.base);
}
-static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct intel_lvds_connector, base.base);
-}
-
bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t lvds_reg, enum pipe *pipe)
{
@@ -396,7 +387,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
- &lvds_encoder->attached_connector->base;
+ lvds_encoder->attached_connector;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
unsigned int lvds_bpp;
@@ -418,6 +409,8 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
pipe_config->pipe_bpp = lvds_bpp;
}
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
/*
* We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -461,15 +454,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
- struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
/* use cached edid if we have one */
- if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
- return drm_add_edid_modes(connector, lvds_connector->base.edid);
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ return drm_add_edid_modes(connector, intel_connector->edid);
- mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
+ mode = drm_mode_duplicate(dev, intel_connector->panel.fixed_mode);
if (mode == NULL)
return 0;
@@ -477,27 +470,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return 1;
}
-/**
- * intel_lvds_destroy - unregister and free LVDS structures
- * @connector: connector to free
- *
- * Unregister the DDC bus for this connector then free the driver private
- * structure.
- */
-static void intel_lvds_destroy(struct drm_connector *connector)
-{
- struct intel_lvds_connector *lvds_connector =
- to_lvds_connector(connector);
-
- if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
- kfree(lvds_connector->base.edid);
-
- intel_panel_fini(&lvds_connector->base.panel);
-
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
@@ -511,7 +483,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_lvds_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
@@ -802,8 +774,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
return i915_modparams.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
- if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
- > 112999)
+ if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999)
return true;
if (dmi_check_system(intel_dual_link_lvds))
@@ -858,7 +829,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
- struct intel_lvds_connector *lvds_connector;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
@@ -911,23 +881,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
if (!lvds_encoder)
return;
- lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
- if (!lvds_connector) {
- kfree(lvds_encoder);
- return;
- }
-
- if (intel_connector_init(&lvds_connector->base) < 0) {
- kfree(lvds_connector);
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
kfree(lvds_encoder);
return;
}
- lvds_encoder->attached_connector = lvds_connector;
+ lvds_encoder->attached_connector = intel_connector;
intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base;
- intel_connector = &lvds_connector->base;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -1008,7 +971,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
} else {
edid = ERR_PTR(-ENOENT);
}
- lvds_connector->base.edid = edid;
+ intel_connector->edid = edid;
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
@@ -1072,6 +1035,6 @@ failed:
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(lvds_encoder);
- kfree(lvds_connector);
+ intel_connector_free(intel_connector);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index e034b4166d32..b8f106d9ecf8 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -773,70 +773,6 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
opregion->acpi->cadl[i] = 0;
}
-void intel_opregion_register(struct drm_i915_private *dev_priv)
-{
- struct intel_opregion *opregion = &dev_priv->opregion;
-
- if (!opregion->header)
- return;
-
- if (opregion->acpi) {
- intel_didl_outputs(dev_priv);
- intel_setup_cadls(dev_priv);
-
- /* Notify BIOS we are ready to handle ACPI video ext notifs.
- * Right now, all the events are handled by the ACPI video module.
- * We don't actually need to do anything with them. */
- opregion->acpi->csts = 0;
- opregion->acpi->drdy = 1;
-
- opregion->acpi_notifier.notifier_call = intel_opregion_video_event;
- register_acpi_notifier(&opregion->acpi_notifier);
- }
-
- if (opregion->asle) {
- opregion->asle->tche = ASLE_TCHE_BLC_EN;
- opregion->asle->ardy = ASLE_ARDY_READY;
- }
-}
-
-void intel_opregion_unregister(struct drm_i915_private *dev_priv)
-{
- struct intel_opregion *opregion = &dev_priv->opregion;
-
- if (!opregion->header)
- return;
-
- if (opregion->asle)
- opregion->asle->ardy = ASLE_ARDY_NOT_READY;
-
- cancel_work_sync(&dev_priv->opregion.asle_work);
-
- if (opregion->acpi) {
- opregion->acpi->drdy = 0;
-
- unregister_acpi_notifier(&opregion->acpi_notifier);
- opregion->acpi_notifier.notifier_call = NULL;
- }
-
- /* just clear all opregion memory pointers now */
- memunmap(opregion->header);
- if (opregion->rvda) {
- memunmap(opregion->rvda);
- opregion->rvda = NULL;
- }
- if (opregion->vbt_firmware) {
- kfree(opregion->vbt_firmware);
- opregion->vbt_firmware = NULL;
- }
- opregion->header = NULL;
- opregion->acpi = NULL;
- opregion->swsci = NULL;
- opregion->asle = NULL;
- opregion->vbt = NULL;
- opregion->lid_state = NULL;
-}
-
static void swsci_setup(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
@@ -1115,3 +1051,97 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
return ret - 1;
}
+
+void intel_opregion_register(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ opregion->acpi_notifier.notifier_call =
+ intel_opregion_video_event;
+ register_acpi_notifier(&opregion->acpi_notifier);
+ }
+
+ intel_opregion_resume(i915);
+}
+
+void intel_opregion_resume(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ intel_didl_outputs(i915);
+ intel_setup_cadls(i915);
+
+ /*
+ * Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video
+ * module. We don't actually need to do anything with them.
+ */
+ opregion->acpi->csts = 0;
+ opregion->acpi->drdy = 1;
+ }
+
+ if (opregion->asle) {
+ opregion->asle->tche = ASLE_TCHE_BLC_EN;
+ opregion->asle->ardy = ASLE_ARDY_READY;
+ }
+
+ intel_opregion_notify_adapter(i915, PCI_D0);
+}
+
+void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+
+ if (!opregion->header)
+ return;
+
+ intel_opregion_notify_adapter(i915, state);
+
+ if (opregion->asle)
+ opregion->asle->ardy = ASLE_ARDY_NOT_READY;
+
+ cancel_work_sync(&i915->opregion.asle_work);
+
+ if (opregion->acpi)
+ opregion->acpi->drdy = 0;
+}
+
+void intel_opregion_unregister(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+
+ intel_opregion_suspend(i915, PCI_D1);
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi_notifier.notifier_call) {
+ unregister_acpi_notifier(&opregion->acpi_notifier);
+ opregion->acpi_notifier.notifier_call = NULL;
+ }
+
+ /* just clear all opregion memory pointers now */
+ memunmap(opregion->header);
+ if (opregion->rvda) {
+ memunmap(opregion->rvda);
+ opregion->rvda = NULL;
+ }
+ if (opregion->vbt_firmware) {
+ kfree(opregion->vbt_firmware);
+ opregion->vbt_firmware = NULL;
+ }
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+ opregion->lid_state = NULL;
+}
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
index e8498a8cda3d..4aa68ffbd30e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -57,8 +57,14 @@ struct intel_opregion {
#ifdef CONFIG_ACPI
int intel_opregion_setup(struct drm_i915_private *dev_priv);
+
void intel_opregion_register(struct drm_i915_private *dev_priv);
void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+
+void intel_opregion_resume(struct drm_i915_private *dev_priv);
+void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+ pci_power_t state);
+
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
@@ -81,6 +87,15 @@ static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
{
}
+static inline void intel_opregion_resume(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+ pci_power_t state)
+{
+}
+
static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 72eb7e48e8bc..20ea7c99d13a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1338,7 +1338,7 @@ err_put_bo:
return err;
}
-void intel_setup_overlay(struct drm_i915_private *dev_priv)
+void intel_overlay_setup(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
int ret;
@@ -1387,7 +1387,7 @@ out_free:
kfree(overlay);
}
-void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4a9f139e7b73..e6cd7b55c018 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -111,7 +111,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
/* Native modes don't need fitting */
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
- !pipe_config->ycbcr420)
+ pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
goto done;
switch (fitting_mode) {
@@ -505,7 +505,7 @@ static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
static u32 vlv_get_backlight(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum pipe pipe = intel_get_pipe_from_connector(connector);
+ enum pipe pipe = intel_connector_get_pipe(connector);
return _vlv_get_backlight(dev_priv, pipe);
}
@@ -763,7 +763,7 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta
struct intel_panel *panel = &connector->panel;
/* Disable the backlight */
- pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS);
+ intel_panel_actually_set_backlight(old_conn_state, 0);
usleep_range(2000, 3000);
pwm_disable(panel->backlight.pwm);
}
@@ -1814,11 +1814,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
return 0;
}
-void intel_panel_destroy_backlight(struct drm_connector *connector)
+static void intel_panel_destroy_backlight(struct intel_panel *panel)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_panel *panel = &intel_connector->panel;
-
/* dispose of the pwm */
if (panel->backlight.pwm)
pwm_put(panel->backlight.pwm);
@@ -1923,6 +1920,8 @@ void intel_panel_fini(struct intel_panel *panel)
struct intel_connector *intel_connector =
container_of(panel, struct intel_connector, panel);
+ intel_panel_destroy_backlight(panel);
+
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1db9b8328275..a26b4eddda25 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
uint32_t method1, method2;
int cpp;
+ if (mem_value == 0)
+ return U32_MAX;
+
if (!intel_wm_plane_visible(cstate, pstate))
return 0;
@@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
uint32_t method1, method2;
int cpp;
+ if (mem_value == 0)
+ return U32_MAX;
+
if (!intel_wm_plane_visible(cstate, pstate))
return 0;
@@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
{
int cpp;
+ if (mem_value == 0)
+ return U32_MAX;
+
if (!intel_wm_plane_visible(cstate, pstate))
return 0;
@@ -2881,8 +2890,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
* any underrun. If not able to get Dimm info assume 16GB dimm
* to avoid any underrun.
*/
- if (!dev_priv->dram_info.valid_dimm ||
- dev_priv->dram_info.is_16gb_dimm)
+ if (dev_priv->dram_info.is_16gb_dimm)
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3009,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
}
+static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+{
+ /*
+ * On some SNB machines (Thinkpad X220 Tablet at least)
+ * LP3 usage can cause vblank interrupts to be lost.
+ * The DEIIR bit will go high but it looks like the CPU
+ * never gets interrupted.
+ *
+ * It's not clear whether other interrupt source could
+ * be affected or if this is somehow limited to vblank
+ * interrupts only. To play it safe we disable LP3
+ * watermarks entirely.
+ */
+ if (dev_priv->wm.pri_latency[3] == 0 &&
+ dev_priv->wm.spr_latency[3] == 0 &&
+ dev_priv->wm.cur_latency[3] == 0)
+ return;
+
+ dev_priv->wm.pri_latency[3] = 0;
+ dev_priv->wm.spr_latency[3] = 0;
+ dev_priv->wm.cur_latency[3] = 0;
+
+ DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+}
+
static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
{
intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
@@ -3025,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
- if (IS_GEN6(dev_priv))
+ if (IS_GEN6(dev_priv)) {
snb_wm_latency_quirk(dev_priv);
+ snb_wm_lp3_irq_quirk(dev_priv);
+ }
}
static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
@@ -3160,7 +3198,8 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* and after the vblank.
*/
*a = newstate->wm.ilk.optimal;
- if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base))
+ if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
+ intel_state->skip_intermediate_wm)
return 0;
a->pipe_enabled |= b->pipe_enabled;
@@ -3612,15 +3651,8 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
- IS_CANNONLAKE(dev_priv))
- return true;
-
- if (IS_SKYLAKE(dev_priv) &&
- dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
- return true;
-
- return false;
+ return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
+ dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
/*
@@ -3784,7 +3816,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *cstate,
- const unsigned int total_data_rate,
+ const u64 total_data_rate,
const int num_active,
struct skl_ddb_allocation *ddb)
{
@@ -3798,12 +3830,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
return ddb_size - 4; /* 4 blocks for bypass path allocation */
adjusted_mode = &cstate->base.adjusted_mode;
- total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode);
+ total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
/*
* 12GB/s is maximum BW supported by single DBuf slice.
*/
- if (total_data_bw >= GBps(12) || num_active > 1) {
+ if (num_active > 1 || total_data_bw >= GBps(12)) {
ddb->enabled_slices = 2;
} else {
ddb->enabled_slices = 1;
@@ -3814,16 +3846,15 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
}
static void
-skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
+skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *cstate,
- const unsigned int total_data_rate,
+ const u64 total_data_rate,
struct skl_ddb_allocation *ddb,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
struct drm_atomic_state *state = cstate->base.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *for_crtc = cstate->base.crtc;
const struct drm_crtc_state *crtc_state;
const struct drm_crtc *crtc;
@@ -3920,73 +3951,68 @@ static void
skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
const enum pipe pipe,
const enum plane_id plane_id,
- struct skl_ddb_allocation *ddb /* out */)
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv)
{
- u32 val, val2 = 0;
- int fourcc, pixel_format;
+ u32 val, val2;
+ u32 fourcc = 0;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
val = I915_READ(CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val);
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
return;
}
val = I915_READ(PLANE_CTL(pipe, plane_id));
/* No DDB allocated for disabled planes */
- if (!(val & PLANE_CTL_ENABLE))
- return;
+ if (val & PLANE_CTL_ENABLE)
+ fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
+ val & PLANE_CTL_ORDER_RGBX,
+ val & PLANE_CTL_ALPHA_MASK);
- pixel_format = val & PLANE_CTL_FORMAT_MASK;
- fourcc = skl_format_to_fourcc(pixel_format,
- val & PLANE_CTL_ORDER_RGBX,
- val & PLANE_CTL_ALPHA_MASK);
-
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
- /*
- * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed
- * registers for now.
- */
- if (INTEL_GEN(dev_priv) < 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ } else {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
- if (fourcc == DRM_FORMAT_NV12) {
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val2);
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->uv_plane[pipe][plane_id], val);
- } else {
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val);
+ if (fourcc == DRM_FORMAT_NV12)
+ swap(val, val2);
+
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
}
}
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */)
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv)
{
- struct intel_crtc *crtc;
-
- memset(ddb, 0, sizeof(*ddb));
-
- ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- enum intel_display_power_domain power_domain;
- enum plane_id plane_id;
- enum pipe pipe = crtc->pipe;
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return;
- power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
- continue;
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ skl_ddb_get_hw_plane_state(dev_priv, pipe,
+ plane_id,
+ &ddb_y[plane_id],
+ &ddb_uv[plane_id]);
- for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(dev_priv, pipe,
- plane_id, ddb);
+ intel_display_power_put(dev_priv, power_domain);
+}
- intel_display_power_put(dev_priv, power_domain);
- }
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */)
+{
+ ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
}
/*
@@ -4139,23 +4165,24 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
return 0;
}
-static unsigned int
+static u64
skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
- const struct drm_plane_state *pstate,
+ const struct intel_plane_state *intel_pstate,
const int plane)
{
- struct intel_plane *intel_plane = to_intel_plane(pstate->plane);
- struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
+ struct intel_plane *intel_plane =
+ to_intel_plane(intel_pstate->base.plane);
uint32_t data_rate;
uint32_t width = 0, height = 0;
struct drm_framebuffer *fb;
u32 format;
uint_fixed_16_16_t down_scale_amount;
+ u64 rate;
if (!intel_pstate->base.visible)
return 0;
- fb = pstate->fb;
+ fb = intel_pstate->base.fb;
format = fb->format->format;
if (intel_plane->id == PLANE_CURSOR)
@@ -4177,28 +4204,26 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
height /= 2;
}
- data_rate = width * height * fb->format->cpp[plane];
+ data_rate = width * height;
down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
- return mul_round_up_u32_fixed16(data_rate, down_scale_amount);
+ rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
+
+ rate *= fb->format->cpp[plane];
+ return rate;
}
-/*
- * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
- * a 8192x4096@32bpp framebuffer:
- * 3 * 4096 * 8192 * 4 < 2^32
- */
-static unsigned int
+static u64
skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
- unsigned int *plane_data_rate,
- unsigned int *uv_plane_data_rate)
+ u64 *plane_data_rate,
+ u64 *uv_plane_data_rate)
{
struct drm_crtc_state *cstate = &intel_cstate->base;
struct drm_atomic_state *state = cstate->state;
struct drm_plane *plane;
const struct drm_plane_state *pstate;
- unsigned int total_data_rate = 0;
+ u64 total_data_rate = 0;
if (WARN_ON(!state))
return 0;
@@ -4206,26 +4231,81 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
/* Calculate and cache data rate for each plane */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
enum plane_id plane_id = to_intel_plane(plane)->id;
- unsigned int rate;
+ u64 rate;
+ const struct intel_plane_state *intel_pstate =
+ to_intel_plane_state(pstate);
/* packed/y */
rate = skl_plane_relative_data_rate(intel_cstate,
- pstate, 0);
+ intel_pstate, 0);
plane_data_rate[plane_id] = rate;
-
total_data_rate += rate;
/* uv-plane */
rate = skl_plane_relative_data_rate(intel_cstate,
- pstate, 1);
+ intel_pstate, 1);
uv_plane_data_rate[plane_id] = rate;
-
total_data_rate += rate;
}
return total_data_rate;
}
+static u64
+icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+ u64 *plane_data_rate)
+{
+ struct drm_crtc_state *cstate = &intel_cstate->base;
+ struct drm_atomic_state *state = cstate->state;
+ struct drm_plane *plane;
+ const struct drm_plane_state *pstate;
+ u64 total_data_rate = 0;
+
+ if (WARN_ON(!state))
+ return 0;
+
+ /* Calculate and cache data rate for each plane */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
+ const struct intel_plane_state *intel_pstate =
+ to_intel_plane_state(pstate);
+ enum plane_id plane_id = to_intel_plane(plane)->id;
+ u64 rate;
+
+ if (!intel_pstate->linked_plane) {
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ intel_pstate, 0);
+ plane_data_rate[plane_id] = rate;
+ total_data_rate += rate;
+ } else {
+ enum plane_id y_plane_id;
+
+ /*
+ * The slave plane might not iterate in
+ * drm_atomic_crtc_state_for_each_plane_state(),
+ * and needs the master plane state which may be
+ * NULL if we try get_new_plane_state(), so we
+ * always calculate from the master.
+ */
+ if (intel_pstate->slave)
+ continue;
+
+ /* Y plane rate is calculated on the slave */
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ intel_pstate, 0);
+ y_plane_id = intel_pstate->linked_plane->id;
+ plane_data_rate[y_plane_id] = rate;
+ total_data_rate += rate;
+
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ intel_pstate, 1);
+ plane_data_rate[plane_id] = rate;
+ total_data_rate += rate;
+ }
+ }
+
+ return total_data_rate;
+}
+
static uint16_t
skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
{
@@ -4298,15 +4378,25 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
enum plane_id plane_id = to_intel_plane(plane)->id;
+ struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
if (plane_id == PLANE_CURSOR)
continue;
- if (!pstate->visible)
+ /* slave plane must be invisible and calculated from master */
+ if (!pstate->visible || WARN_ON(plane_state->slave))
continue;
- minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
- uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+ if (!plane_state->linked_plane) {
+ minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
+ uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+ } else {
+ enum plane_id y_plane_id =
+ plane_state->linked_plane->id;
+
+ minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
+ minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+ }
}
minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
@@ -4318,23 +4408,22 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
{
struct drm_atomic_state *state = cstate->base.state;
struct drm_crtc *crtc = cstate->base.crtc;
- struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
uint16_t alloc_size, start;
uint16_t minimum[I915_MAX_PLANES] = {};
uint16_t uv_minimum[I915_MAX_PLANES] = {};
- unsigned int total_data_rate;
+ u64 total_data_rate;
enum plane_id plane_id;
int num_active;
- unsigned int plane_data_rate[I915_MAX_PLANES] = {};
- unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {};
+ u64 plane_data_rate[I915_MAX_PLANES] = {};
+ u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
uint16_t total_min_blocks = 0;
/* Clear the partitioning for disabled planes. */
- memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe]));
+ memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
+ memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
if (WARN_ON(!state))
return 0;
@@ -4344,11 +4433,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
- total_data_rate = skl_get_total_relative_data_rate(cstate,
- plane_data_rate,
- uv_plane_data_rate);
- skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb,
- alloc, &num_active);
+ if (INTEL_GEN(dev_priv) < 11)
+ total_data_rate =
+ skl_get_total_relative_data_rate(cstate,
+ plane_data_rate,
+ uv_plane_data_rate);
+ else
+ total_data_rate =
+ icl_get_total_relative_data_rate(cstate,
+ plane_data_rate);
+
+ skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
+ ddb, alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
@@ -4374,8 +4470,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
}
alloc_size -= total_min_blocks;
- ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
- ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
/*
* 2. Distribute the remaining space in proportion to the amount of
@@ -4388,7 +4484,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- unsigned int data_rate, uv_data_rate;
+ u64 data_rate, uv_data_rate;
uint16_t plane_blocks, uv_plane_blocks;
if (plane_id == PLANE_CURSOR)
@@ -4402,13 +4498,12 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* result is < available as data_rate / total_data_rate < 1
*/
plane_blocks = minimum[plane_id];
- plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
- total_data_rate);
+ plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate);
/* Leave disabled planes at (0,0) */
if (data_rate) {
- ddb->plane[pipe][plane_id].start = start;
- ddb->plane[pipe][plane_id].end = start + plane_blocks;
+ cstate->wm.skl.plane_ddb_y[plane_id].start = start;
+ cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
}
start += plane_blocks;
@@ -4417,12 +4512,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
uv_data_rate = uv_plane_data_rate[plane_id];
uv_plane_blocks = uv_minimum[plane_id];
- uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate,
- total_data_rate);
+ uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate);
+
+ /* Gen11+ uses a separate plane for UV watermarks */
+ WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
if (uv_data_rate) {
- ddb->uv_plane[pipe][plane_id].start = start;
- ddb->uv_plane[pipe][plane_id].end =
+ cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
+ cstate->wm.skl.plane_ddb_uv[plane_id].end =
start + uv_plane_blocks;
}
@@ -4476,7 +4573,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
}
static uint_fixed_16_16_t
-intel_get_linetime_us(struct intel_crtc_state *cstate)
+intel_get_linetime_us(const struct intel_crtc_state *cstate)
{
uint32_t pixel_rate;
uint32_t crtc_htotal;
@@ -4519,12 +4616,12 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
}
static int
-skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
- struct intel_crtc_state *cstate,
+skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
- struct skl_wm_params *wp, int plane_id)
+ struct skl_wm_params *wp, int color_plane)
{
struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
const struct drm_framebuffer *fb = pstate->fb;
uint32_t interm_pbpl;
@@ -4532,11 +4629,8 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
- if (!intel_wm_plane_visible(cstate, intel_pstate))
- return 0;
-
/* only NV12 format has two planes */
- if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) {
+ if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) {
DRM_DEBUG_KMS("Non NV12 format have single plane\n");
return -EINVAL;
}
@@ -4561,10 +4655,10 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
}
- if (plane_id == 1 && wp->is_planar)
+ if (color_plane == 1 && wp->is_planar)
wp->width /= 2;
- wp->cpp = fb->format->cpp[plane_id];
+ wp->cpp = fb->format->cpp[color_plane];
wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
intel_pstate);
@@ -4626,8 +4720,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
return 0;
}
-static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
- struct intel_crtc_state *cstate,
+static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
uint16_t ddb_allocation,
int level,
@@ -4635,6 +4728,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_pstate->base.plane->dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
uint32_t latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
@@ -4645,11 +4740,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
uint32_t min_disp_buf_needed;
- if (latency == 0 ||
- !intel_wm_plane_visible(cstate, intel_pstate)) {
- result->plane_en = false;
- return 0;
- }
+ if (latency == 0)
+ return level == 0 ? -EINVAL : 0;
/* Display WA #1141: kbl,cfl */
if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
@@ -4672,15 +4764,24 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
} else {
if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
wp->dbuf_block_size < 1) &&
- (wp->plane_bytes_per_line / wp->dbuf_block_size < 1))
+ (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
- else if (ddb_allocation >=
- fixed16_to_u32_round_up(wp->plane_blocks_per_line))
- selected_result = min_fixed16(method1, method2);
- else if (latency >= wp->linetime_us)
- selected_result = min_fixed16(method1, method2);
- else
+ } else if (ddb_allocation >=
+ fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
+ if (IS_GEN9(dev_priv) &&
+ !IS_GEMINILAKE(dev_priv))
+ selected_result = min_fixed16(method1, method2);
+ else
+ selected_result = method2;
+ } else if (latency >= wp->linetime_us) {
+ if (IS_GEN9(dev_priv) &&
+ !IS_GEMINILAKE(dev_priv))
+ selected_result = min_fixed16(method1, method2);
+ else
+ selected_result = method2;
+ } else {
selected_result = method1;
+ }
}
res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
@@ -4737,8 +4838,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if ((level > 0 && res_lines > 31) ||
res_blocks >= ddb_allocation ||
min_disp_buf_needed >= ddb_allocation) {
- result->plane_en = false;
-
/*
* If there are no valid level 0 watermarks, then we can't
* support this display configuration.
@@ -4756,17 +4855,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
}
- /*
- * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A)
- * disable wm level 1-7 on NV12 planes
- */
- if (wp->is_planar && level >= 1 &&
- (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
- IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) {
- result->plane_en = false;
- return 0;
- }
-
/* The number of lines are ignored for the level 0 watermark. */
result->plane_res_b = res_blocks;
result->plane_res_l = res_lines;
@@ -4776,43 +4864,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
static int
-skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb,
- struct intel_crtc_state *cstate,
+skl_compute_wm_levels(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
+ uint16_t ddb_blocks,
const struct skl_wm_params *wm_params,
- struct skl_plane_wm *wm,
- int plane_id)
+ struct skl_wm_level *levels)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- struct drm_plane *plane = intel_pstate->base.plane;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- uint16_t ddb_blocks;
- enum pipe pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_pstate->base.plane->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum plane_id intel_plane_id = intel_plane->id;
+ struct skl_wm_level *result_prev = &levels[0];
int ret;
- if (WARN_ON(!intel_pstate->base.fb))
- return -EINVAL;
-
- ddb_blocks = plane_id ?
- skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) :
- skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]);
-
for (level = 0; level <= max_level; level++) {
- struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] :
- &wm->wm[level];
- struct skl_wm_level *result_prev;
+ struct skl_wm_level *result = &levels[level];
- if (level)
- result_prev = plane_id ? &wm->uv_wm[level - 1] :
- &wm->wm[level - 1];
- else
- result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0];
-
- ret = skl_compute_plane_wm(dev_priv,
- cstate,
+ ret = skl_compute_plane_wm(cstate,
intel_pstate,
ddb_blocks,
level,
@@ -4821,16 +4888,15 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
result);
if (ret)
return ret;
- }
- if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
- wm->is_planar = true;
+ result_prev = result;
+ }
return 0;
}
static uint32_t
-skl_compute_linetime_wm(struct intel_crtc_state *cstate)
+skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
struct drm_atomic_state *state = cstate->base.state;
struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -4852,42 +4918,50 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
return linetime_wm;
}
-static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
- struct skl_wm_params *wp,
- struct skl_wm_level *wm_l0,
- uint16_t ddb_allocation,
- struct skl_wm_level *trans_wm /* out */)
+static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
+ const struct skl_wm_params *wp,
+ struct skl_plane_wm *wm,
+ uint16_t ddb_allocation)
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
uint16_t trans_min, trans_y_tile_min;
const uint16_t trans_amount = 10; /* This is configurable amount */
- uint16_t trans_offset_b, res_blocks;
-
- if (!cstate->base.active)
- goto exit;
+ uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
/* Transition WM are not recommended by HW team for GEN9 */
if (INTEL_GEN(dev_priv) <= 9)
- goto exit;
+ return;
/* Transition WM don't make any sense if ipc is disabled */
if (!dev_priv->ipc_enabled)
- goto exit;
+ return;
- trans_min = 0;
- if (INTEL_GEN(dev_priv) >= 10)
+ trans_min = 14;
+ if (INTEL_GEN(dev_priv) >= 11)
trans_min = 4;
trans_offset_b = trans_min + trans_amount;
+ /*
+ * The spec asks for Selected Result Blocks for wm0 (the real value),
+ * not Result Blocks (the integer value). Pay attention to the capital
+ * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
+ * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
+ * and since we later will have to get the ceiling of the sum in the
+ * transition watermarks calculation, we can just pretend Selected
+ * Result Blocks is Result Blocks minus 1 and it should work for the
+ * current platforms.
+ */
+ wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
+
if (wp->y_tiled) {
trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
wp->y_tile_minimum);
- res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) +
+ res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
trans_offset_b;
} else {
- res_blocks = wm_l0->plane_res_b + trans_offset_b;
+ res_blocks = wm0_sel_res_b + trans_offset_b;
/* WA BUG:1938466 add one block for non y-tile planes */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
@@ -4898,25 +4972,132 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
res_blocks += 1;
if (res_blocks < ddb_allocation) {
- trans_wm->plane_res_b = res_blocks;
- trans_wm->plane_en = true;
- return;
+ wm->trans_wm.plane_res_b = res_blocks;
+ wm->trans_wm.plane_en = true;
+ }
+}
+
+static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ enum plane_id plane_id, int color_plane)
+{
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+ u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
+ struct skl_wm_params wm_params;
+ int ret;
+
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, color_plane);
+ if (ret)
+ return ret;
+
+ ret = skl_compute_wm_levels(crtc_state, plane_state,
+ ddb_blocks, &wm_params, wm->wm);
+ if (ret)
+ return ret;
+
+ skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks);
+
+ return 0;
+}
+
+static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ enum plane_id plane_id)
+{
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+ u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
+ struct skl_wm_params wm_params;
+ int ret;
+
+ wm->is_planar = true;
+
+ /* uv plane watermarks must also be validated for NV12/Planar */
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, 1);
+ if (ret)
+ return ret;
+
+ ret = skl_compute_wm_levels(crtc_state, plane_state,
+ ddb_blocks, &wm_params, wm->uv_wm);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum plane_id plane_id = plane->id;
+ int ret;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 0);
+ if (ret)
+ return ret;
+
+ if (fb->format->is_yuv && fb->format->num_planes > 1) {
+ ret = skl_build_plane_wm_uv(crtc_state, plane_state,
+ plane_id);
+ if (ret)
+ return ret;
}
-exit:
- trans_wm->plane_en = false;
+ return 0;
+}
+
+static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
+ int ret;
+
+ /* Watermarks calculated in master */
+ if (plane_state->slave)
+ return 0;
+
+ if (plane_state->linked_plane) {
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum plane_id y_plane_id = plane_state->linked_plane->id;
+
+ WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
+ WARN_ON(!fb->format->is_yuv ||
+ fb->format->num_planes == 1);
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ y_plane_id, 0);
+ if (ret)
+ return ret;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 1);
+ if (ret)
+ return ret;
+ } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
- struct skl_ddb_allocation *ddb,
struct skl_pipe_wm *pipe_wm)
{
- struct drm_device *dev = cstate->base.crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct drm_crtc_state *crtc_state = &cstate->base;
- const struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_plane *plane;
const struct drm_plane_state *pstate;
- struct skl_plane_wm *wm;
int ret;
/*
@@ -4928,44 +5109,15 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
const struct intel_plane_state *intel_pstate =
to_intel_plane_state(pstate);
- enum plane_id plane_id = to_intel_plane(plane)->id;
- struct skl_wm_params wm_params;
- enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
- uint16_t ddb_blocks;
-
- wm = &pipe_wm->planes[plane_id];
- ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
- ret = skl_compute_plane_wm_params(dev_priv, cstate,
- intel_pstate, &wm_params, 0);
- if (ret)
- return ret;
-
- ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
- intel_pstate, &wm_params, wm, 0);
+ if (INTEL_GEN(dev_priv) >= 11)
+ ret = icl_build_plane_wm(pipe_wm,
+ cstate, intel_pstate);
+ else
+ ret = skl_build_plane_wm(pipe_wm,
+ cstate, intel_pstate);
if (ret)
return ret;
-
- skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
- ddb_blocks, &wm->trans_wm);
-
- /* uv plane watermarks must also be validated for NV12/Planar */
- if (wm_params.is_planar) {
- memset(&wm_params, 0, sizeof(struct skl_wm_params));
- wm->is_planar = true;
-
- ret = skl_compute_plane_wm_params(dev_priv, cstate,
- intel_pstate,
- &wm_params, 1);
- if (ret)
- return ret;
-
- ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
- intel_pstate, &wm_params,
- wm, 1);
- if (ret)
- return ret;
- }
}
pipe_wm->linetime = skl_compute_linetime_wm(cstate);
@@ -4978,9 +5130,9 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
if (entry->end)
- I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
+ I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
else
- I915_WRITE(reg, 0);
+ I915_WRITE_FW(reg, 0);
}
static void skl_write_wm_level(struct drm_i915_private *dev_priv,
@@ -4995,19 +5147,22 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
}
- I915_WRITE(reg, val);
+ I915_WRITE_FW(reg, val);
}
-static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb,
- enum plane_id plane_id)
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum pipe pipe = intel_crtc->pipe;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_ddb_entry *ddb_uv =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
@@ -5016,35 +5171,32 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
&wm->trans_wm);
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- /* FIXME: add proper NV12 support for ICL. */
- if (INTEL_GEN(dev_priv) >= 11)
- return skl_ddb_entry_write(dev_priv,
- PLANE_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- if (wm->is_planar) {
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->uv_plane[pipe][plane_id]);
+ if (INTEL_GEN(dev_priv) >= 11) {
skl_ddb_entry_write(dev_priv,
- PLANE_NV12_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- } else {
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
+ PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+ return;
}
+
+ if (wm->is_planar)
+ swap(ddb_y, ddb_uv);
+
+ skl_ddb_entry_write(dev_priv,
+ PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+ skl_ddb_entry_write(dev_priv,
+ PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
}
-static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb)
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum pipe pipe = intel_crtc->pipe;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
@@ -5052,22 +5204,30 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
}
skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
- skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
- &ddb->plane[pipe][PLANE_CURSOR]);
+ skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
}
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2)
{
- if (l1->plane_en != l2->plane_en)
- return false;
+ return l1->plane_en == l2->plane_en &&
+ l1->plane_res_l == l2->plane_res_l &&
+ l1->plane_res_b == l2->plane_res_b;
+}
- /* If both planes aren't enabled, the rest shouldn't matter */
- if (!l1->plane_en)
- return true;
+static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
+ const struct skl_plane_wm *wm1,
+ const struct skl_plane_wm *wm2)
+{
+ int level, max_level = ilk_wm_max_level(dev_priv);
+
+ for (level = 0; level <= max_level; level++) {
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
+ !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
+ return false;
+ }
- return (l1->plane_res_l == l2->plane_res_l &&
- l1->plane_res_b == l2->plane_res_b);
+ return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
}
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -5076,16 +5236,15 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
-bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
- const struct skl_ddb_entry **entries,
- const struct skl_ddb_entry *ddb,
- int ignore)
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry entries[],
+ int num_entries, int ignore_idx)
{
- enum pipe pipe;
+ int i;
- for_each_pipe(dev_priv, pipe) {
- if (pipe != ignore && entries[pipe] &&
- skl_ddb_entries_overlap(ddb, entries[pipe]))
+ for (i = 0; i < num_entries; i++) {
+ if (i != ignore_idx &&
+ skl_ddb_entries_overlap(ddb, &entries[i]))
return true;
}
@@ -5095,13 +5254,12 @@ bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
- struct skl_ddb_allocation *ddb, /* out */
bool *changed /* out */)
{
struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
int ret;
- ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
+ ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
if (ret)
return ret;
@@ -5127,32 +5285,29 @@ pipes_modified(struct drm_atomic_state *state)
}
static int
-skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
+skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_atomic_state *state = cstate->base.state;
- struct drm_device *dev = state->dev;
- struct drm_crtc *crtc = cstate->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
- struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
- struct drm_plane_state *plane_state;
- struct drm_plane *plane;
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
- drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
- enum plane_id plane_id = to_intel_plane(plane)->id;
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
- if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
- &new_ddb->plane[pipe][plane_id]) &&
- skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id],
- &new_ddb->uv_plane[pipe][plane_id]))
+ if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
+ skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
continue;
- plane_state = drm_atomic_get_plane_state(state, plane);
+ plane_state = intel_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
}
return 0;
@@ -5164,18 +5319,21 @@ skl_compute_ddb(struct drm_atomic_state *state)
const struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+ struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
int ret, i;
memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
- for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) {
- ret = skl_allocate_pipe_ddb(cstate, ddb);
+ for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
if (ret)
return ret;
- ret = skl_ddb_add_affected_planes(cstate);
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
if (ret)
return ret;
}
@@ -5184,38 +5342,31 @@ skl_compute_ddb(struct drm_atomic_state *state)
}
static void
-skl_print_wm_changes(const struct drm_atomic_state *state)
+skl_print_wm_changes(struct intel_atomic_state *state)
{
- const struct drm_device *dev = state->dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
- const struct drm_crtc *crtc;
- const struct drm_crtc_state *cstate;
- const struct intel_plane *intel_plane;
- const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
- const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
int i;
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
-
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- enum plane_id plane_id = intel_plane->id;
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
- old = &old_ddb->plane[pipe][plane_id];
- new = &new_ddb->plane[pipe][plane_id];
+ old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
+ new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
if (skl_ddb_entry_equal(old, new))
continue;
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
- intel_plane->base.base.id,
- intel_plane->base.name,
- old->start, old->end,
- new->start, new->end);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+ plane->base.base.id, plane->base.name,
+ old->start, old->end,
+ new->start, new->end);
}
}
}
@@ -5310,6 +5461,66 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
return 0;
}
+/*
+ * To make sure the cursor watermark registers are always consistent
+ * with our computed state the following scenario needs special
+ * treatment:
+ *
+ * 1. enable cursor
+ * 2. move cursor entirely offscreen
+ * 3. disable cursor
+ *
+ * Step 2. does call .disable_plane() but does not zero the watermarks
+ * (since we consider an offscreen cursor still active for the purposes
+ * of watermarks). Step 3. would not normally call .disable_plane()
+ * because the actual plane visibility isn't changing, and we don't
+ * deallocate the cursor ddb until the pipe gets disabled. So we must
+ * force step 3. to call .disable_plane() to update the watermark
+ * registers properly.
+ *
+ * Other planes do not suffer from this issues as their watermarks are
+ * calculated based on the actual plane visibility. The only time this
+ * can trigger for the other planes is during the initial readout as the
+ * default value of the watermarks registers is not zero.
+ */
+static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
+
+ /*
+ * Force a full wm update for every plane on modeset.
+ * Required because the reset value of the wm registers
+ * is non-zero, whereas we want all disabled planes to
+ * have zero watermarks. So if we turn off the relevant
+ * power well the hardware state will go out of sync
+ * with the software state.
+ */
+ if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
+ skl_plane_wm_equals(dev_priv,
+ &old_crtc_state->wm.skl.optimal.planes[plane_id],
+ &new_crtc_state->wm.skl.optimal.planes[plane_id]))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
+ }
+
+ return 0;
+}
+
static int
skl_compute_wm(struct drm_atomic_state *state)
{
@@ -5349,8 +5560,12 @@ skl_compute_wm(struct drm_atomic_state *state)
&to_intel_crtc_state(crtc->state)->wm.skl.optimal;
pipe_wm = &intel_cstate->wm.skl.optimal;
- ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
- &results->ddb, &changed);
+ ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
+ if (ret)
+ return ret;
+
+ ret = skl_wm_add_affected_planes(intel_state,
+ to_intel_crtc(crtc));
if (ret)
return ret;
@@ -5364,7 +5579,7 @@ skl_compute_wm(struct drm_atomic_state *state)
intel_cstate->update_wm_pre = true;
}
- skl_print_wm_changes(state);
+ skl_print_wm_changes(intel_state);
return 0;
}
@@ -5375,23 +5590,12 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
return;
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id != PLANE_CURSOR)
- skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
- ddb, plane_id);
- else
- skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
- ddb);
- }
}
static void skl_initial_wm(struct intel_atomic_state *state,
@@ -5401,8 +5605,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_values *results = &state->wm_results;
- struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw;
- enum pipe pipe = intel_crtc->pipe;
if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
return;
@@ -5412,11 +5614,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
if (cstate->base.active_changed)
skl_atomic_update_crtc_wm(state, cstate);
- memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe],
- sizeof(hw_vals->ddb.uv_plane[pipe]));
- memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe],
- sizeof(hw_vals->ddb.plane[pipe]));
-
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5567,13 +5764,6 @@ void skl_wm_get_hw_state(struct drm_device *dev)
if (dev_priv->active_crtcs) {
/* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true;
- } else {
- /*
- * Easy/common case; just sanitize DDB now if everything off
- * Keep dbuf slice info intact
- */
- memset(ddb->plane, 0, sizeof(ddb->plane));
- memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
}
}
@@ -6117,14 +6307,8 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
{
u32 val;
- /* Display WA #0477 WaDisableIPC: skl */
- if (IS_SKYLAKE(dev_priv))
- dev_priv->ipc_enabled = false;
-
- /* Display WA #1141: SKL:all KBL:all CFL */
- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
- !dev_priv->dram_info.symmetric_memory)
- dev_priv->ipc_enabled = false;
+ if (!HAS_IPC(dev_priv))
+ return;
val = I915_READ(DISP_ARB_CTL2);
@@ -6138,11 +6322,15 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
void intel_init_ipc(struct drm_i915_private *dev_priv)
{
- dev_priv->ipc_enabled = false;
if (!HAS_IPC(dev_priv))
return;
- dev_priv->ipc_enabled = true;
+ /* Display WA #1141: SKL:all KBL:all CFL */
+ if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ dev_priv->ipc_enabled = dev_priv->dram_info.symmetric_memory;
+ else
+ dev_priv->ipc_enabled = true;
+
intel_enable_ipc(dev_priv);
}
@@ -8736,6 +8924,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
/* This is not an Wa. Enable to reduce Sampler power */
I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
+
+ /* WaEnable32PlaneMode:icl */
+ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
+ _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
}
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9313,8 +9505,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_i915_private *dev_priv)
{
- intel_fbc_init(dev_priv);
-
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
i915_pineview_get_mem_freq(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index b6838b525502..419e56342523 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -71,6 +71,14 @@ static bool psr_global_enabled(u32 debug)
static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
+ /* Disable PSR2 by default for all platforms */
+ if (i915_modparams.enable_psr == -1)
+ return false;
+
+ /* Cannot enable DSC and PSR2 simultaneously */
+ WARN_ON(crtc_state->dsc_params.compression_enable &&
+ crtc_state->has_psr2);
+
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
@@ -79,25 +87,42 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
}
}
+static int edp_psr_shift(enum transcoder cpu_transcoder)
+{
+ switch (cpu_transcoder) {
+ case TRANSCODER_A:
+ return EDP_PSR_TRANSCODER_A_SHIFT;
+ case TRANSCODER_B:
+ return EDP_PSR_TRANSCODER_B_SHIFT;
+ case TRANSCODER_C:
+ return EDP_PSR_TRANSCODER_C_SHIFT;
+ default:
+ MISSING_CASE(cpu_transcoder);
+ /* fallthrough */
+ case TRANSCODER_EDP:
+ return EDP_PSR_TRANSCODER_EDP_SHIFT;
+ }
+}
+
void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
{
u32 debug_mask, mask;
+ enum transcoder cpu_transcoder;
+ u32 transcoders = BIT(TRANSCODER_EDP);
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ transcoders |= BIT(TRANSCODER_A) |
+ BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C);
+
+ debug_mask = 0;
+ mask = 0;
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ int shift = edp_psr_shift(cpu_transcoder);
- mask = EDP_PSR_ERROR(TRANSCODER_EDP);
- debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
- EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
-
- if (INTEL_GEN(dev_priv) >= 8) {
- mask |= EDP_PSR_ERROR(TRANSCODER_A) |
- EDP_PSR_ERROR(TRANSCODER_B) |
- EDP_PSR_ERROR(TRANSCODER_C);
-
- debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) |
- EDP_PSR_PRE_ENTRY(TRANSCODER_A) |
- EDP_PSR_POST_EXIT(TRANSCODER_B) |
- EDP_PSR_PRE_ENTRY(TRANSCODER_B) |
- EDP_PSR_POST_EXIT(TRANSCODER_C) |
- EDP_PSR_PRE_ENTRY(TRANSCODER_C);
+ mask |= EDP_PSR_ERROR(shift);
+ debug_mask |= EDP_PSR_POST_EXIT(shift) |
+ EDP_PSR_PRE_ENTRY(shift);
}
if (debug & I915_PSR_DEBUG_IRQ)
@@ -148,6 +173,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
u32 transcoders = BIT(TRANSCODER_EDP);
enum transcoder cpu_transcoder;
ktime_t time_ns = ktime_get();
+ u32 mask = 0;
if (INTEL_GEN(dev_priv) >= 8)
transcoders |= BIT(TRANSCODER_A) |
@@ -155,18 +181,32 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
BIT(TRANSCODER_C);
for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- /* FIXME: Exit PSR and link train manually when this happens. */
- if (psr_iir & EDP_PSR_ERROR(cpu_transcoder))
- DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
- transcoder_name(cpu_transcoder));
+ int shift = edp_psr_shift(cpu_transcoder);
+
+ if (psr_iir & EDP_PSR_ERROR(shift)) {
+ DRM_WARN("[transcoder %s] PSR aux error\n",
+ transcoder_name(cpu_transcoder));
+
+ dev_priv->psr.irq_aux_error = true;
- if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) {
+ /*
+ * If this interruption is not masked it will keep
+ * interrupting so fast that it prevents the scheduled
+ * work to run.
+ * Also after a PSR error, we don't want to arm PSR
+ * again so we don't care about unmask the interruption
+ * or unset irq_aux_error.
+ */
+ mask |= EDP_PSR_ERROR(shift);
+ }
+
+ if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
dev_priv->psr.last_entry_attempt = time_ns;
DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
- if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) {
+ if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
dev_priv->psr.last_exit = time_ns;
DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
transcoder_name(cpu_transcoder));
@@ -180,6 +220,13 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
}
}
}
+
+ if (mask) {
+ mask |= I915_READ(EDP_PSR_IMR);
+ I915_WRITE(EDP_PSR_IMR, mask);
+
+ schedule_work(&dev_priv->psr.work);
+ }
}
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
@@ -294,7 +341,8 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
psr_vsc.sdp_header.HB3 = 0x8;
}
- intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
+ intel_dig_port->write_infoframe(&intel_dig_port->base,
+ crtc_state,
DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
}
@@ -458,6 +506,16 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!dev_priv->psr.sink_psr2_support)
return false;
+ /*
+ * DSC and PSR2 cannot be enabled simultaneously. If a requested
+ * resolution requires DSC to be enabled, priority is given to DSC
+ * over PSR2.
+ */
+ if (crtc_state->dsc_params.compression_enable) {
+ DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
+ return false;
+ }
+
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
@@ -503,10 +561,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- if (IS_HASWELL(dev_priv) &&
- I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
- S3D_ENABLE) {
- DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+ if (dev_priv->psr.sink_not_reliable) {
+ DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
return;
}
@@ -553,11 +609,31 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
dev_priv->psr.active = true;
}
+static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ static const i915_reg_t regs[] = {
+ [TRANSCODER_A] = CHICKEN_TRANS_A,
+ [TRANSCODER_B] = CHICKEN_TRANS_B,
+ [TRANSCODER_C] = CHICKEN_TRANS_C,
+ [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
+ };
+
+ WARN_ON(INTEL_GEN(dev_priv) < 9);
+
+ if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
+ !regs[cpu_transcoder].reg))
+ cpu_transcoder = TRANSCODER_A;
+
+ return regs[cpu_transcoder];
+}
+
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 mask;
/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
* use hardcoded values PSR AUX transactions
@@ -566,37 +642,34 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
hsw_psr_setup_aux(intel_dp);
if (dev_priv->psr.psr2_enabled) {
- u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));
+ i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
+ cpu_transcoder);
+ u32 chicken = I915_READ(reg);
- if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv))
+ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
| PSR2_ADD_VERTICAL_LINE_COUNT);
else
chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
- I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
-
- I915_WRITE(EDP_PSR_DEBUG,
- EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP |
- EDP_PSR_DEBUG_MASK_MAX_SLEEP |
- EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
- } else {
- /*
- * Per Spec: Avoid continuous PSR exit by masking MEMUP
- * and HPD. also mask LPSP to avoid dependency on other
- * drivers that might block runtime_pm besides
- * preventing other hw tracking issues now we can rely
- * on frontbuffer tracking.
- */
- I915_WRITE(EDP_PSR_DEBUG,
- EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP |
- EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
- EDP_PSR_DEBUG_MASK_MAX_SLEEP);
+ I915_WRITE(reg, chicken);
}
+
+ /*
+ * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
+ * mask LPSP to avoid dependency on other drivers that might block
+ * runtime_pm besides preventing other hw tracking issues now we
+ * can rely on frontbuffer tracking.
+ */
+ mask = EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP |
+ EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
+
+ I915_WRITE(EDP_PSR_DEBUG, mask);
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
@@ -646,6 +719,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.prepared = true;
+ dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
if (psr_global_enabled(dev_priv->psr.debug))
intel_psr_enable_locked(dev_priv, crtc_state);
@@ -656,49 +730,34 @@ unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void
-intel_psr_disable_source(struct intel_dp *intel_dp)
+static void intel_psr_exit(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (dev_priv->psr.active) {
- i915_reg_t psr_status;
- u32 psr_status_mask;
-
- if (dev_priv->psr.psr2_enabled) {
- psr_status = EDP_PSR2_STATUS;
- psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
-
- I915_WRITE(EDP_PSR2_CTL,
- I915_READ(EDP_PSR2_CTL) &
- ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
-
- } else {
- psr_status = EDP_PSR_STATUS;
- psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
-
- I915_WRITE(EDP_PSR_CTL,
- I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
- }
+ u32 val;
- /* Wait till PSR is idle */
- if (intel_wait_for_register(dev_priv,
- psr_status, psr_status_mask, 0,
- 2000))
- DRM_ERROR("Timed out waiting for PSR Idle State\n");
+ if (!dev_priv->psr.active) {
+ if (INTEL_GEN(dev_priv) >= 9)
+ WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
+ WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ return;
+ }
- dev_priv->psr.active = false;
+ if (dev_priv->psr.psr2_enabled) {
+ val = I915_READ(EDP_PSR2_CTL);
+ WARN_ON(!(val & EDP_PSR2_ENABLE));
+ I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
} else {
- if (dev_priv->psr.psr2_enabled)
- WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- else
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ val = I915_READ(EDP_PSR_CTL);
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+ I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
}
+ dev_priv->psr.active = false;
}
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ i915_reg_t psr_status;
+ u32 psr_status_mask;
lockdep_assert_held(&dev_priv->psr.lock);
@@ -707,7 +766,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Disabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
- intel_psr_disable_source(intel_dp);
+
+ intel_psr_exit(dev_priv);
+
+ if (dev_priv->psr.psr2_enabled) {
+ psr_status = EDP_PSR2_STATUS;
+ psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
+ } else {
+ psr_status = EDP_PSR_STATUS;
+ psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
+ }
+
+ /* Wait till PSR is idle */
+ if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
+ 2000))
+ DRM_ERROR("Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -893,6 +966,16 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
return ret;
}
+static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+{
+ struct i915_psr *psr = &dev_priv->psr;
+
+ intel_psr_disable_locked(psr->dp);
+ psr->sink_not_reliable = true;
+ /* let's make sure that sink is awaken */
+ drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+}
+
static void intel_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
@@ -903,6 +986,9 @@ static void intel_psr_work(struct work_struct *work)
if (!dev_priv->psr.enabled)
goto unlock;
+ if (READ_ONCE(dev_priv->psr.irq_aux_error))
+ intel_psr_handle_irq(dev_priv);
+
/*
* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
@@ -925,25 +1011,6 @@ unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void intel_psr_exit(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- if (!dev_priv->psr.active)
- return;
-
- if (dev_priv->psr.psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
- WARN_ON(!(val & EDP_PSR2_ENABLE));
- I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
- } else {
- val = I915_READ(EDP_PSR_CTL);
- WARN_ON(!(val & EDP_PSR_ENABLE));
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
- }
- dev_priv->psr.active = false;
-}
-
/**
* intel_psr_invalidate - Invalidade PSR
* @dev_priv: i915 device
@@ -960,9 +1027,6 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_crtc *crtc;
- enum pipe pipe;
-
if (!CAN_PSR(dev_priv))
return;
@@ -975,10 +1039,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
return;
}
- crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
if (frontbuffer_bits)
@@ -1003,9 +1064,6 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_crtc *crtc;
- enum pipe pipe;
-
if (!CAN_PSR(dev_priv))
return;
@@ -1018,28 +1076,21 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
return;
}
- crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* By definition flush = invalidate + flush */
if (frontbuffer_bits) {
- if (dev_priv->psr.psr2_enabled) {
- intel_psr_exit(dev_priv);
- } else {
- /*
- * Display WA #0884: all
- * This documented WA for bxt can be safely applied
- * broadly so we can force HW tracking to exit PSR
- * instead of disabling and re-enabling.
- * Workaround tells us to write 0 to CUR_SURFLIVE_A,
- * but it makes more sense write to the current active
- * pipe.
- */
- I915_WRITE(CURSURFLIVE(pipe), 0);
- }
+ /*
+ * Display WA #0884: all
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ */
+ I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
}
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
@@ -1056,6 +1107,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
*/
void intel_psr_init(struct drm_i915_private *dev_priv)
{
+ u32 val;
+
if (!HAS_PSR(dev_priv))
return;
@@ -1065,11 +1118,24 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
if (!dev_priv->psr.sink_support)
return;
- if (i915_modparams.enable_psr == -1) {
- i915_modparams.enable_psr = dev_priv->vbt.psr.enable;
+ if (i915_modparams.enable_psr == -1)
+ if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
+ i915_modparams.enable_psr = 0;
- /* Per platform default: all disabled. */
- i915_modparams.enable_psr = 0;
+ /*
+ * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
+ * will still keep the error set even after the reset done in the
+ * irq_preinstall and irq_uninstall hooks.
+ * And enabling in this situation cause the screen to freeze in the
+ * first time that PSR HW tries to activate so lets keep PSR disabled
+ * to avoid any rendering problems.
+ */
+ val = I915_READ(EDP_PSR_IIR);
+ val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
+ if (val) {
+ DRM_DEBUG_KMS("PSR interruption error set\n");
+ dev_priv->psr.sink_not_reliable = true;
+ return;
}
/* Set link_standby x link_off defaults */
@@ -1109,6 +1175,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
}
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
@@ -1126,12 +1193,27 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (val & ~errors)
DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
val & ~errors);
- if (val & errors)
+ if (val & errors) {
intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ }
/* clear status register */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
-
- /* TODO: handle PSR2 errors */
exit:
mutex_unlock(&psr->lock);
}
+
+bool intel_psr_enabled(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ bool ret;
+
+ if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ return false;
+
+ mutex_lock(&dev_priv->psr.lock);
+ ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
+ mutex_unlock(&dev_priv->psr.lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_quirks.c b/drivers/gpu/drm/i915/intel_quirks.c
new file mode 100644
index 000000000000..ec2b0fc92b8b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_quirks.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/dmi.h>
+
+#include "intel_drv.h"
+
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ DRM_INFO("applying inverted panel brightness quirk\n");
+}
+
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
+ DRM_INFO("applying backlight present quirk\n");
+}
+
+/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
+ * which is 300 ms greater than eDP spec T12 min.
+ */
+static void quirk_increase_t12_delay(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_INCREASE_T12_DELAY;
+ DRM_INFO("Applying T12 delay quirk\n");
+}
+
+/*
+ * GeminiLake NUC HDMI outputs require additional off time
+ * this allows the onboard retimer to correctly sync to signal
+ */
+static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+ DRM_INFO("Applying Increase DDI Disabled quirk\n");
+}
+
+struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+ void (*hook)(struct drm_i915_private *i915);
+};
+
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+ void (*hook)(struct drm_i915_private *i915);
+ const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+ DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+ return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+ {
+ .dmi_id_list = &(const struct dmi_system_id[]) {
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "NCR Corporation",
+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
+ },
+ },
+ { } /* terminating entry */
+ },
+ .hook = quirk_invert_brightness,
+ },
+};
+
+static struct intel_quirk intel_quirks[] = {
+ /* Lenovo U160 cannot use SSC on LVDS */
+ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+ /* Sony Vaio Y cannot use SSC on LVDS */
+ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Acer Aspire 5336 */
+ { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+ /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+ { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+ /* Acer C720 Chromebook (Core i3 4005U) */
+ { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
+
+ /* Apple Macbook 2,1 (Core 2 T7400) */
+ { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
+
+ /* Apple Macbook 4,1 */
+ { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
+
+ /* Toshiba CB35 Chromebook (Celeron 2955U) */
+ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+ /* HP Chromebook 14 (Celeron 2955U) */
+ { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
+
+ /* Dell Chromebook 11 */
+ { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
+
+ /* Dell Chromebook 11 (2015 version) */
+ { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
+
+ /* Toshiba Satellite P50-C-18C */
+ { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+
+ /* GeminiLake NUC */
+ { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ /* ASRock ITX*/
+ { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+};
+
+void intel_init_quirks(struct drm_i915_private *i915)
+{
+ struct pci_dev *d = i915->drm.pdev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+ struct intel_quirk *q = &intel_quirks[i];
+
+ if (d->device == q->device &&
+ (d->subsystem_vendor == q->subsystem_vendor ||
+ q->subsystem_vendor == PCI_ANY_ID) &&
+ (d->subsystem_device == q->subsystem_device ||
+ q->subsystem_device == PCI_ANY_ID))
+ q->hook(i915);
+ }
+ for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+ if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+ intel_dmi_quirks[i].hook(i915);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d0ef50bf930a..c5eb26a7ee79 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -91,6 +91,7 @@ static int
gen4_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
+ int i;
/*
* read/write caches:
@@ -127,12 +128,43 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
cmd |= MI_INVALIDATE_ISP;
}
- cs = intel_ring_begin(rq, 2);
+ i = 2;
+ if (mode & EMIT_INVALIDATE)
+ i += 20;
+
+ cs = intel_ring_begin(rq, i);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
- *cs++ = MI_NOOP;
+
+ /*
+ * A random delay to let the CS invalidate take effect? Without this
+ * delay, the GPU relocation path fails as the CS does not see
+ * the updated contents. Just as important, if we apply the flushes
+ * to the EMIT_FLUSH branch (i.e. immediately after the relocation
+ * write and before the invalidate on the next batch), the relocations
+ * still fail. This implies that is a delay following invalidation
+ * that is required to reset the caches as opposed to a delay to
+ * ensure the memory is written.
+ */
+ if (mode & EMIT_INVALIDATE) {
+ *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ for (i = 0; i < 12; i++)
+ *cs++ = MI_FLUSH;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+ }
+
+ *cs++ = cmd;
+
intel_ring_advance(rq, cs);
return 0;
@@ -178,8 +210,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
static int
intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs;
cs = intel_ring_begin(rq, 6);
@@ -212,8 +243,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
static int
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
int ret;
@@ -282,8 +312,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
static int
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
/*
@@ -495,6 +524,13 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
+ if (HAS_LEGACY_SEMAPHORES(engine->i915)) {
+ I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+ I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+ if (HAS_VEBOX(dev_priv))
+ I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
+ }
+
/* Enforce ordering by reading HEAD register back */
I915_READ_HEAD(engine);
@@ -512,10 +548,11 @@ static int init_ring_common(struct intel_engine_cs *engine)
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
-
intel_ring_update_space(ring);
+
+ /* First wake the ring up to an empty/idle ring */
I915_WRITE_HEAD(engine, ring->head);
- I915_WRITE_TAIL(engine, ring->tail);
+ I915_WRITE_TAIL(engine, ring->head);
(void)I915_READ_TAIL(engine);
I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
@@ -540,6 +577,12 @@ static int init_ring_common(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) > 2)
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
+ /* Now awake, let it get started */
+ if (ring->tail != ring->head) {
+ I915_WRITE_TAIL(engine, ring->tail);
+ (void)I915_READ_TAIL(engine);
+ }
+
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_wakeup(engine);
out:
@@ -574,7 +617,9 @@ static void skip_request(struct i915_request *rq)
static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
{
- GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0);
+ GEM_TRACE("%s request global=%d, current=%d\n",
+ engine->name, rq ? rq->global_seqno : 0,
+ intel_engine_get_seqno(engine));
/*
* Try to restore the logical GPU state to match the continuation
@@ -606,7 +651,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq)
{
int ret;
- ret = intel_ctx_workarounds_emit(rq);
+ ret = intel_engine_emit_ctx_wa(rq);
if (ret != 0)
return ret;
@@ -624,8 +669,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
-
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (IS_GEN(dev_priv, 4, 6))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
@@ -707,9 +750,18 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, link) {
GEM_BUG_ON(!request->global_seqno);
- if (!i915_request_completed(request))
- dma_fence_set_error(&request->fence, -EIO);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &request->fence.flags))
+ continue;
+
+ dma_fence_set_error(&request->fence, -EIO);
}
+
+ intel_write_status_page(engine,
+ I915_GEM_HWS_INDEX,
+ intel_engine_last_submit(engine));
+
/* Remaining _unready_ requests will be nop'ed when submitted */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -937,7 +989,7 @@ i965_emit_bb_start(struct i915_request *rq,
}
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
-#define I830_BATCH_LIMIT (256*1024)
+#define I830_BATCH_LIMIT SZ_256K
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
@@ -945,7 +997,9 @@ i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
+ u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+
+ GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1021,8 +1075,7 @@ i915_emit_bb_start(struct i915_request *rq,
int intel_ring_pin(struct intel_ring *ring)
{
struct i915_vma *vma = ring->vma;
- enum i915_map_type map =
- HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC;
+ enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
unsigned int flags;
void *addr;
int ret;
@@ -1403,7 +1456,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
struct i915_timeline *timeline;
struct intel_ring *ring;
- unsigned int size;
int err;
intel_engine_setup_common(engine);
@@ -1428,21 +1480,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
- size = PAGE_SIZE;
- if (HAS_BROKEN_CS_TLB(engine->i915))
- size = I830_WA_SIZE;
- err = intel_engine_create_scratch(engine, size);
- if (err)
- goto err_unpin;
-
err = intel_engine_init_common(engine);
if (err)
- goto err_scratch;
+ goto err_unpin;
return 0;
-err_scratch:
- intel_engine_cleanup_scratch(engine);
err_unpin:
intel_ring_unpin(ring);
err_ring:
@@ -1516,7 +1559,7 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
@@ -1625,7 +1668,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2dfa585712c2..72edaa7ff411 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
@@ -15,6 +15,7 @@
#include "i915_selftest.h"
#include "i915_timeline.h"
#include "intel_gpu_commands.h"
+#include "intel_workarounds.h"
struct drm_printer;
struct i915_sched_attr;
@@ -93,11 +94,11 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
#define I915_MAX_SUBSLICES 8
#define instdone_slice_mask(dev_priv__) \
- (INTEL_GEN(dev_priv__) == 7 ? \
+ (IS_GEN7(dev_priv__) ? \
1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
#define instdone_subslice_mask(dev_priv__) \
- (INTEL_GEN(dev_priv__) == 7 ? \
+ (IS_GEN7(dev_priv__) ? \
1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
@@ -190,11 +191,22 @@ enum intel_engine_id {
};
struct i915_priolist {
+ struct list_head requests[I915_PRIORITY_COUNT];
struct rb_node node;
- struct list_head requests;
+ unsigned long used;
int priority;
};
+#define priolist_for_each_request(it, plist, idx) \
+ for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+ list_for_each_entry(it, &(plist)->requests[idx], sched.link)
+
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+ for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+ list_for_each_entry_safe(it, n, \
+ &(plist)->requests[idx - 1], \
+ sched.link)
+
struct st_preempt_hang {
struct completion completion;
bool inject_hang;
@@ -302,13 +314,6 @@ struct intel_engine_execlists {
struct rb_root_cached queue;
/**
- * @csb_read: control register for Context Switch buffer
- *
- * Note this register is always in mmio.
- */
- u32 __iomem *csb_read;
-
- /**
* @csb_write: control register for Context Switch buffer
*
* Note this register may be either mmio or HWSP shadow.
@@ -328,15 +333,6 @@ struct intel_engine_execlists {
u32 preempt_complete_status;
/**
- * @csb_write_reset: reset value for CSB write pointer
- *
- * As the CSB write pointer maybe either in HWSP or as a field
- * inside an mmio register, we want to reprogram it slightly
- * differently to avoid later confusion.
- */
- u32 csb_write_reset;
-
- /**
* @csb_head: context status buffer head
*/
u8 csb_head;
@@ -440,7 +436,9 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
- struct i915_vma *scratch;
+ struct i915_wa_list ctx_wa_list;
+ struct i915_wa_list wa_list;
+ struct i915_wa_list whitelist;
u32 irq_keep_mask; /* always keep these interrupts */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -487,11 +485,10 @@ struct intel_engine_cs {
*/
void (*submit_request)(struct i915_request *rq);
- /* Call when the priority on a request has changed and it and its
+ /*
+ * Call when the priority on a request has changed and it and its
* dependencies may need rescheduling. Note the request itself may
* not be ready to run!
- *
- * Called under the struct_mutex.
*/
void (*schedule)(struct i915_request *request,
const struct i915_sched_attr *attr);
@@ -898,10 +895,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size);
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
-
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 0fdabce647ab..4350a5270423 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -76,6 +76,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "TRANSCODER_C";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
+ case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
+ return "TRANSCODER_EDP_VDSC";
case POWER_DOMAIN_TRANSCODER_DSI_A:
return "TRANSCODER_DSI_A";
case POWER_DOMAIN_TRANSCODER_DSI_C:
@@ -208,7 +210,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
is_enabled = true;
- for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
+ for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
if (power_well->desc->always_on)
continue;
@@ -436,6 +438,15 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+ /* Display WA #1178: icl */
+ if (IS_ICELAKE(dev_priv) &&
+ pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
+ !intel_bios_is_port_edp(dev_priv, port)) {
+ val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
+ val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
+ I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
+ }
}
static void
@@ -456,6 +467,25 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
+#define ICL_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+static void
+icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
+ u32 val;
+
+ val = I915_READ(DP_AUX_CH_CTL(aux_ch));
+ val &= ~DP_AUX_CH_CTL_TBT_IO;
+ if (power_well->desc->hsw.is_tc_tbt)
+ val |= DP_AUX_CH_CTL_TBT_IO;
+ I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
+
+ hsw_power_well_enable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -465,11 +495,25 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ enum i915_power_well_id id = power_well->desc->id;
int pw_idx = power_well->desc->hsw.idx;
u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
HSW_PWR_WELL_CTL_STATE(pw_idx);
+ u32 val;
- return (I915_READ(regs->driver) & mask) == mask;
+ val = I915_READ(regs->driver);
+
+ /*
+ * On GEN9 big core due to a DMC bug the driver's request bits for PW1
+ * and the MISC_IO PW will be not restored, so check instead for the
+ * BIOS's own request bits, which are forced-on for these power wells
+ * when exiting DC5/6.
+ */
+ if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) &&
+ (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
+ val |= I915_READ(regs->bios);
+
+ return (val & mask) == mask;
}
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
@@ -551,7 +595,9 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
- if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
+ else if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_EN_DC9;
else
mask |= DC_STATE_EN_UPTO_DC6;
@@ -624,8 +670,13 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
assert_can_enable_dc9(dev_priv);
DRM_DEBUG_KMS("Enabling DC9\n");
-
- intel_power_sequencer_reset(dev_priv);
+ /*
+ * Power sequencer reset is not needed on
+ * platforms with South Display Engine on PCH,
+ * because PPS registers are always on.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv))
+ intel_power_sequencer_reset(dev_priv);
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
@@ -707,7 +758,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-static void skl_enable_dc6(struct drm_i915_private *dev_priv)
+void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc6(dev_priv);
@@ -808,6 +859,14 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
if (IS_GEN9_LP(dev_priv))
bxt_verify_ddi_phy_power_wells(dev_priv);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ /*
+ * DMC retains HW context only for port A, the other combo
+ * PHY's HW context for port B is lost after DC transitions,
+ * so we need to restore it manually.
+ */
+ icl_combo_phys_init(dev_priv);
}
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1608,7 +1667,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_display_power_domain_str(domain));
power_domains->domain_use_count[domain]--;
- for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
+ for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
intel_power_well_put(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
@@ -1971,9 +2030,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
*/
#define ICL_PW_2_POWER_DOMAINS ( \
ICL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
BIT_ULL(POWER_DOMAIN_INIT))
/*
- * - eDP/DSI VDSC
* - KVMR (HW control)
*/
#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
@@ -2041,7 +2100,7 @@ static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2058,7 +2117,7 @@ static const struct i915_power_well_ops i830_pipes_power_well_ops = {
static const struct i915_power_well_desc i830_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2102,7 +2161,7 @@ static const struct i915_power_well_regs hsw_power_well_regs = {
static const struct i915_power_well_desc hsw_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2123,7 +2182,7 @@ static const struct i915_power_well_desc hsw_power_wells[] = {
static const struct i915_power_well_desc bdw_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2166,7 +2225,7 @@ static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
static const struct i915_power_well_desc vlv_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2242,7 +2301,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = {
static const struct i915_power_well_desc chv_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2293,7 +2352,7 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
static const struct i915_power_well_desc skl_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2301,6 +2360,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
{
.name = "power well 1",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2313,6 +2373,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
{
.name = "MISC IO power well",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_MISC_IO,
@@ -2385,13 +2446,15 @@ static const struct i915_power_well_desc skl_power_wells[] = {
static const struct i915_power_well_desc bxt_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
},
{
.name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2443,7 +2506,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = {
static const struct i915_power_well_desc glk_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2451,6 +2514,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
{
.name = "power well 1",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2571,7 +2635,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
static const struct i915_power_well_desc cnl_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2579,6 +2643,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
{
.name = "power well 1",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2716,6 +2781,13 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
.is_enabled = hsw_power_well_enabled,
};
+static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = icl_tc_phy_aux_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
static const struct i915_power_well_regs icl_aux_power_well_regs = {
.bios = ICL_PWR_WELL_CTL_AUX1,
.driver = ICL_PWR_WELL_CTL_AUX2,
@@ -2731,7 +2803,7 @@ static const struct i915_power_well_regs icl_ddi_power_well_regs = {
static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2739,6 +2811,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "power well 1",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2749,6 +2822,12 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
+ .name = "DC off",
+ .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
.name = "power well 2",
.domains = ICL_PW_2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
@@ -2760,12 +2839,6 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "DC off",
- .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
.name = "power well 3",
.domains = ICL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
@@ -2861,81 +2934,89 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX C",
.domains = ICL_AUX_C_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+ .hsw.is_tc_tbt = false,
},
},
{
.name = "AUX D",
.domains = ICL_AUX_D_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
+ .hsw.is_tc_tbt = false,
},
},
{
.name = "AUX E",
.domains = ICL_AUX_E_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
+ .hsw.is_tc_tbt = false,
},
},
{
.name = "AUX F",
.domains = ICL_AUX_F_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
+ .hsw.is_tc_tbt = false,
},
},
{
.name = "AUX TBT1",
.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
+ .hsw.is_tc_tbt = true,
},
},
{
.name = "AUX TBT2",
.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
+ .hsw.is_tc_tbt = true,
},
},
{
.name = "AUX TBT3",
.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
+ .hsw.is_tc_tbt = true,
},
},
{
.name = "AUX TBT4",
.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
+ .hsw.is_tc_tbt = true,
},
},
{
@@ -2969,17 +3050,20 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
- if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) {
+ if (INTEL_GEN(dev_priv) >= 11) {
max_dc = 2;
- mask = 0;
- } else if (IS_GEN9_LP(dev_priv)) {
- max_dc = 1;
/*
* DC9 has a separate HW flow from the rest of the DC states,
* not depending on the DMC firmware. It's needed by system
* suspend/resume, so allow it unconditionally.
*/
mask = DC_STATE_EN_DC9;
+ } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) {
+ max_dc = 2;
+ mask = 0;
+ } else if (IS_GEN9_LP(dev_priv)) {
+ max_dc = 1;
+ mask = DC_STATE_EN_DC9;
} else {
max_dc = 0;
mask = 0;
@@ -3075,12 +3159,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (IS_ICELAKE(dev_priv)) {
err = set_power_wells(power_domains, icl_power_wells);
- } else if (IS_HASWELL(dev_priv)) {
- err = set_power_wells(power_domains, hsw_power_wells);
- } else if (IS_BROADWELL(dev_priv)) {
- err = set_power_wells(power_domains, bdw_power_wells);
- } else if (IS_GEN9_BC(dev_priv)) {
- err = set_power_wells(power_domains, skl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
err = set_power_wells(power_domains, cnl_power_wells);
@@ -3092,13 +3170,18 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (!IS_CNL_WITH_PORT_F(dev_priv))
power_domains->power_well_count -= 2;
-
- } else if (IS_BROXTON(dev_priv)) {
- err = set_power_wells(power_domains, bxt_power_wells);
} else if (IS_GEMINILAKE(dev_priv)) {
err = set_power_wells(power_domains, glk_power_wells);
+ } else if (IS_BROXTON(dev_priv)) {
+ err = set_power_wells(power_domains, bxt_power_wells);
+ } else if (IS_GEN9_BC(dev_priv)) {
+ err = set_power_wells(power_domains, skl_power_wells);
} else if (IS_CHERRYVIEW(dev_priv)) {
err = set_power_wells(power_domains, chv_power_wells);
+ } else if (IS_BROADWELL(dev_priv)) {
+ err = set_power_wells(power_domains, bdw_power_wells);
+ } else if (IS_HASWELL(dev_priv)) {
+ err = set_power_wells(power_domains, hsw_power_wells);
} else if (IS_VALLEYVIEW(dev_priv)) {
err = set_power_wells(power_domains, vlv_power_wells);
} else if (IS_I830(dev_priv)) {
@@ -3176,8 +3259,7 @@ static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- u32 val;
+ const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
bool ret;
if (req_slices > intel_dbuf_max_slices(dev_priv)) {
@@ -3188,7 +3270,6 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
if (req_slices == hw_enabled_slices || req_slices == 0)
return;
- val = I915_READ(DBUF_CTL_S2);
if (req_slices > hw_enabled_slices)
ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
else
@@ -3240,18 +3321,40 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
I915_WRITE(MBUS_ABOX_CTL, val);
}
+static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ i915_reg_t reg;
+ u32 reset_bits, val;
+
+ if (IS_IVYBRIDGE(dev_priv)) {
+ reg = GEN7_MSG_CTL;
+ reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
+ } else {
+ reg = HSW_NDE_RSTWRN_OPT;
+ reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
+ }
+
+ val = I915_READ(reg);
+
+ if (enable)
+ val |= reset_bits;
+ else
+ val &= ~reset_bits;
+
+ I915_WRITE(reg, val);
+}
+
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- uint32_t val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* enable PCH reset handshake */
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
/* enable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
@@ -3307,7 +3410,6 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- uint32_t val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -3317,9 +3419,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
* Move the handshake programming to initialization sequence.
* Previously was left up to BIOS.
*/
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- val &= ~RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+ intel_pch_reset_handshake(dev_priv, false);
/* Enable PG1 */
mutex_lock(&power_domains->lock);
@@ -3365,101 +3465,18 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
usleep_range(10, 30); /* 10 us delay per Bspec */
}
-enum {
- PROCMON_0_85V_DOT_0,
- PROCMON_0_95V_DOT_0,
- PROCMON_0_95V_DOT_1,
- PROCMON_1_05V_DOT_0,
- PROCMON_1_05V_DOT_1,
-};
-
-static const struct cnl_procmon {
- u32 dw1, dw9, dw10;
-} cnl_procmon_values[] = {
- [PROCMON_0_85V_DOT_0] =
- { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
- [PROCMON_0_95V_DOT_0] =
- { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
- [PROCMON_0_95V_DOT_1] =
- { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
- [PROCMON_1_05V_DOT_0] =
- { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
- [PROCMON_1_05V_DOT_1] =
- { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
-};
-
-/*
- * CNL has just one set of registers, while ICL has two sets: one for port A and
- * the other for port B. The CNL registers are equivalent to the ICL port A
- * registers, that's why we call the ICL macros even though the function has CNL
- * on its name.
- */
-static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
- enum port port)
-{
- const struct cnl_procmon *procmon;
- u32 val;
-
- val = I915_READ(ICL_PORT_COMP_DW3(port));
- switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
- default:
- MISSING_CASE(val);
- /* fall through */
- case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
- procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
- break;
- case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
- procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
- break;
- case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
- procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
- break;
- case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
- procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
- break;
- case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
- procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
- break;
- }
-
- val = I915_READ(ICL_PORT_COMP_DW1(port));
- val &= ~((0xff << 16) | 0xff);
- val |= procmon->dw1;
- I915_WRITE(ICL_PORT_COMP_DW1(port), val);
-
- I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
- I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
-}
-
static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* 1. Enable PCH Reset Handshake */
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- val |= RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
- /* 2. Enable Comp */
- val = I915_READ(CHICKEN_MISC_2);
- val &= ~CNL_COMP_PWR_DOWN;
- I915_WRITE(CHICKEN_MISC_2, val);
-
- /* Dummy PORT_A to get the correct CNL register from the ICL macro */
- cnl_set_procmon_ref_values(dev_priv, PORT_A);
-
- val = I915_READ(CNL_PORT_COMP_DW0);
- val |= COMP_INIT;
- I915_WRITE(CNL_PORT_COMP_DW0, val);
-
- /* 3. */
- val = I915_READ(CNL_PORT_CL1CM_DW5);
- val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+ /* 2-3. */
+ cnl_combo_phys_init(dev_priv);
/*
* 4. Enable Power Well 1 (PG1).
@@ -3484,7 +3501,6 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -3508,44 +3524,23 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
usleep_range(10, 30); /* 10 us delay per Bspec */
- /* 5. Disable Comp */
- val = I915_READ(CHICKEN_MISC_2);
- val |= CNL_COMP_PWR_DOWN;
- I915_WRITE(CHICKEN_MISC_2, val);
+ /* 5. */
+ cnl_combo_phys_uninit(dev_priv);
}
-static void icl_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
+void icl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- enum port port;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* 1. Enable PCH reset handshake. */
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- val |= RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
-
- for (port = PORT_A; port <= PORT_B; port++) {
- /* 2. Enable DDI combo PHY comp. */
- val = I915_READ(ICL_PHY_MISC(port));
- val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(port), val);
-
- cnl_set_procmon_ref_values(dev_priv, port);
-
- val = I915_READ(ICL_PORT_COMP_DW0(port));
- val |= COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(port), val);
-
- /* 3. Set power down enable. */
- val = I915_READ(ICL_PORT_CL_DW5(port));
- val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(ICL_PORT_CL_DW5(port), val);
- }
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+ /* 2-3. */
+ icl_combo_phys_init(dev_priv);
/*
* 4. Enable Power Well 1 (PG1).
@@ -3569,12 +3564,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_csr_load_program(dev_priv);
}
-static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+void icl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- enum port port;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -3596,12 +3589,8 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
- /* 5. Disable Comp */
- for (port = PORT_A; port <= PORT_B; port++) {
- val = I915_READ(ICL_PHY_MISC(port));
- val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(port), val);
- }
+ /* 5. */
+ icl_combo_phys_uninit(dev_priv);
}
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
@@ -3759,7 +3748,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
- }
+ } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
/*
* Keep all power wells enabled for any dependent HW access during
@@ -3953,14 +3943,6 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
int domains_count;
bool enabled;
- /*
- * Power wells not belonging to any domain (like the MISC_IO
- * and PW1 power wells) are under FW control, so ignore them,
- * since their state can change asynchronously.
- */
- if (!power_well->desc->domains)
- continue;
-
enabled = power_well->desc->ops->is_enabled(dev_priv,
power_well);
if ((power_well->count || power_well->desc->always_on) !=
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 701372e512a8..5805ec1aba12 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -105,11 +105,6 @@ struct intel_sdvo {
bool has_hdmi_audio;
bool rgb_quant_range_selectable;
- /**
- * This is sdvo fixed pannel mode pointer
- */
- struct drm_display_mode *sdvo_lvds_fixed_mode;
-
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
@@ -765,10 +760,14 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
args.height = height;
args.interlace = 0;
- if (IS_LVDS(intel_sdvo_connector) &&
- (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
- intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
- args.scaled = 1;
+ if (IS_LVDS(intel_sdvo_connector)) {
+ const struct drm_display_mode *fixed_mode =
+ intel_sdvo_connector->base.panel.fixed_mode;
+
+ if (fixed_mode->hdisplay != width ||
+ fixed_mode->vdisplay != height)
+ args.scaled = 1;
+ }
return intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
@@ -1123,6 +1122,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
pipe_config->has_pch_encoder = true;
@@ -1144,7 +1144,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->sdvo_tv_clock = true;
} else if (IS_LVDS(intel_sdvo_connector)) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
- intel_sdvo->sdvo_lvds_fixed_mode))
+ intel_sdvo_connector->base.panel.fixed_mode))
return false;
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
@@ -1301,7 +1301,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
/* lvds has a special fixed output timing. */
if (IS_LVDS(intel_sdvo_connector))
intel_sdvo_get_dtd_from_mode(&output_dtd,
- intel_sdvo->sdvo_lvds_fixed_mode);
+ intel_sdvo_connector->base.panel.fixed_mode);
else
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
@@ -1642,10 +1642,13 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
if (IS_LVDS(intel_sdvo_connector)) {
- if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+ const struct drm_display_mode *fixed_mode =
+ intel_sdvo_connector->base.panel.fixed_mode;
+
+ if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+ if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
@@ -2058,14 +2061,6 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
return !list_empty(&connector->probed_modes);
}
-static void intel_sdvo_destroy(struct drm_connector *connector)
-{
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-
- drm_connector_cleanup(connector);
- kfree(intel_sdvo_connector);
-}
-
static int
intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
@@ -2228,7 +2223,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.atomic_set_property = intel_sdvo_connector_atomic_set_property,
.late_register = intel_sdvo_connector_register,
.early_unregister = intel_sdvo_connector_unregister,
- .destroy = intel_sdvo_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_sdvo_connector_duplicate_state,
};
@@ -2267,10 +2262,6 @@ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
- if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
- drm_mode_destroy(encoder->dev,
- intel_sdvo->sdvo_lvds_fixed_mode);
-
i2c_del_adapter(&intel_sdvo->ddc);
intel_encoder_destroy(encoder);
}
@@ -2583,7 +2574,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
return true;
err:
- intel_sdvo_destroy(connector);
+ intel_connector_destroy(connector);
return false;
}
@@ -2663,19 +2654,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
list_for_each_entry(mode, &connector->probed_modes, head) {
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
- intel_sdvo->sdvo_lvds_fixed_mode =
+ struct drm_display_mode *fixed_mode =
drm_mode_duplicate(connector->dev, mode);
+
+ intel_panel_init(&intel_connector->panel,
+ fixed_mode, NULL);
break;
}
}
- if (!intel_sdvo->sdvo_lvds_fixed_mode)
+ if (!intel_connector->panel.fixed_mode)
goto err;
return true;
err:
- intel_sdvo_destroy(connector);
+ intel_connector_destroy(connector);
return false;
}
@@ -2745,7 +2739,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
&dev->mode_config.connector_list, head) {
if (intel_attached_encoder(connector) == &intel_sdvo->base) {
drm_connector_unregister(connector);
- intel_sdvo_destroy(connector);
+ intel_connector_destroy(connector);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 5fd2f7bf3927..d2e003d8f3db 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -40,6 +40,7 @@
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include <drm/drm_color_mgmt.h>
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
@@ -275,17 +276,24 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
src->y2 = (src_y + src_h) << 16;
if (fb->format->is_yuv &&
- fb->format->format != DRM_FORMAT_NV12 &&
(src_x & 1 || src_w & 1)) {
DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
src_x, src_w);
return -EINVAL;
}
+ if (fb->format->is_yuv &&
+ fb->format->num_planes > 1 &&
+ (src_y & 1 || src_h & 1)) {
+ DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n",
+ src_y, src_h);
+ return -EINVAL;
+ }
+
return 0;
}
-unsigned int
+static unsigned int
skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation)
@@ -302,108 +310,292 @@ skl_plane_max_stride(struct intel_plane *plane,
return min(8192 * cpp, 32768);
}
-void
-skl_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static void
+skl_program_scaler(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ int scaler_id = plane_state->scaler_id;
+ const struct intel_scaler *scaler =
+ &crtc_state->scaler_state.scalers[scaler_id];
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ u16 y_hphase, uv_rgb_hphase;
+ u16 y_vphase, uv_rgb_vphase;
+ int hscale, vscale;
+
+ hscale = drm_rect_calc_hscale(&plane_state->base.src,
+ &plane_state->base.dst,
+ 0, INT_MAX);
+ vscale = drm_rect_calc_vscale(&plane_state->base.src,
+ &plane_state->base.dst,
+ 0, INT_MAX);
+
+ /* TODO: handle sub-pixel coordinates */
+ if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 &&
+ !icl_is_hdr_plane(plane)) {
+ y_hphase = skl_scaler_calc_phase(1, hscale, false);
+ y_vphase = skl_scaler_calc_phase(1, vscale, false);
+
+ /* MPEG2 chroma siting convention */
+ uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
+ uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
+ } else {
+ /* not used */
+ y_hphase = 0;
+ y_vphase = 0;
+
+ uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+ }
+
+ I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
+ PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
+ I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
+ I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
+}
+
+/* Preoffset values for YUV to RGB Conversion */
+#define PREOFF_YUV_TO_RGB_HI 0x1800
+#define PREOFF_YUV_TO_RGB_ME 0x1F00
+#define PREOFF_YUV_TO_RGB_LO 0x1800
+
+#define ROFF(x) (((x) & 0xffff) << 16)
+#define GOFF(x) (((x) & 0xffff) << 0)
+#define BOFF(x) (((x) & 0xffff) << 16)
+
+static void
+icl_program_input_csc(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+
+ static const u16 input_csc_matrix[][9] = {
+ /*
+ * BT.601 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.371,
+ * 1.000, -0.336, -0.698,
+ * 1.000, 1.732, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x7AF8, 0x7800, 0x0,
+ 0x8B28, 0x7800, 0x9AC0,
+ 0x0, 0x7800, 0x7DD8,
+ },
+ /*
+ * BT.709 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.574,
+ * 1.000, -0.187, -0.468,
+ * 1.000, 1.855, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x7C98, 0x7800, 0x0,
+ 0x9EF8, 0x7800, 0xABF8,
+ 0x0, 0x7800, 0x7ED8,
+ },
+ };
+
+ /* Matrix for Limited Range to Full Range Conversion */
+ static const u16 input_csc_matrix_lr[][9] = {
+ /*
+ * BT.601 Limted range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.164384, 0.000, 1.596370,
+ * 1.138393, -0.382500, -0.794598,
+ * 1.138393, 1.971696, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x7CC8, 0x7950, 0x0,
+ 0x8CB8, 0x7918, 0x9C40,
+ 0x0, 0x7918, 0x7FC8,
+ },
+ /*
+ * BT.709 Limited range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.164, 0.000, 1.833671,
+ * 1.138393, -0.213249, -0.532909,
+ * 1.138393, 2.112402, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x7EA8, 0x7950, 0x0,
+ 0x8888, 0x7918, 0xADA8,
+ 0x0, 0x7918, 0x6870,
+ },
+ };
+ const u16 *csc;
+
+ if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = input_csc_matrix[plane_state->base.color_encoding];
+ else
+ csc = input_csc_matrix_lr[plane_state->base.color_encoding];
+
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
+ GOFF(csc[1]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) |
+ GOFF(csc[4]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) |
+ GOFF(csc[7]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8]));
+
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+ PREOFF_YUV_TO_RGB_HI);
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+ PREOFF_YUV_TO_RGB_LO);
+ I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+ I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+ I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+}
+
+static void
+skl_program_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane, bool slave, u32 plane_ctl)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- u32 plane_ctl = plane_state->ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 surf_addr = plane_state->color_plane[0].offset;
- u32 stride = skl_plane_stride(plane_state, 0);
+ u32 surf_addr = plane_state->color_plane[color_plane].offset;
+ u32 stride = skl_plane_stride(plane_state, color_plane);
u32 aux_stride = skl_plane_stride(plane_state, 1);
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->color_plane[0].x;
- uint32_t y = plane_state->color_plane[0].y;
+ uint32_t x = plane_state->color_plane[color_plane].x;
+ uint32_t y = plane_state->color_plane[color_plane].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ struct intel_plane *linked = plane_state->linked_plane;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ u8 alpha = plane_state->base.alpha >> 8;
unsigned long irqflags;
+ u32 keymsk, keymax;
/* Sizes are 0 based */
src_w--;
src_h--;
- crtc_w--;
- crtc_h--;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
- plane_state->color_ctl);
+ keymsk = key->channel_mask & 0x3ffffff;
+ if (alpha < 0xff)
+ keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
- if (key->flags) {
- I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), key->max_value);
- I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), key->channel_mask);
+ /* The scaler will handle the output position */
+ if (plane_state->scaler_id >= 0) {
+ crtc_x = 0;
+ crtc_y = 0;
}
- I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
+ I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
(plane_state->color_plane[1].offset - surf_addr) | aux_stride);
- I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
- (plane_state->color_plane[1].y << 16) |
- plane_state->color_plane[1].x);
-
- /* program plane scaler */
- if (plane_state->scaler_id >= 0) {
- int scaler_id = plane_state->scaler_id;
- const struct intel_scaler *scaler =
- &crtc_state->scaler_state.scalers[scaler_id];
- u16 y_hphase, uv_rgb_hphase;
- u16 y_vphase, uv_rgb_vphase;
-
- /* TODO: handle sub-pixel coordinates */
- if (fb->format->format == DRM_FORMAT_NV12) {
- y_hphase = skl_scaler_calc_phase(1, false);
- y_vphase = skl_scaler_calc_phase(1, false);
-
- /* MPEG2 chroma siting convention */
- uv_rgb_hphase = skl_scaler_calc_phase(2, true);
- uv_rgb_vphase = skl_scaler_calc_phase(2, false);
- } else {
- /* not used */
- y_hphase = 0;
- y_vphase = 0;
- uv_rgb_hphase = skl_scaler_calc_phase(1, false);
- uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+ if (icl_is_hdr_plane(plane)) {
+ u32 cus_ctl = 0;
+
+ if (linked) {
+ /* Enable and use MPEG-2 chroma siting */
+ cus_ctl = PLANE_CUS_ENABLE |
+ PLANE_CUS_HPHASE_0 |
+ PLANE_CUS_VPHASE_SIGN_NEGATIVE |
+ PLANE_CUS_VPHASE_0_25;
+
+ if (linked->id == PLANE_SPRITE5)
+ cus_ctl |= PLANE_CUS_PLANE_7;
+ else if (linked->id == PLANE_SPRITE4)
+ cus_ctl |= PLANE_CUS_PLANE_6;
+ else
+ MISSING_CASE(linked->id);
}
- I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
- PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
- I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
- I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
- I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
- ((crtc_w + 1) << 16)|(crtc_h + 1));
-
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
- } else {
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
}
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
+ plane_state->color_ctl);
+
+ if (fb->format->is_yuv && icl_is_hdr_plane(plane))
+ icl_program_input_csc(plane, crtc_state, plane_state);
+
+ skl_write_plane_wm(plane, crtc_state);
+
+ I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
+ I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
+ I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
+
+ I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+
+ if (INTEL_GEN(dev_priv) < 11)
+ I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
+ (plane_state->color_plane[1].y << 16) |
+ plane_state->color_plane[1].x);
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
- POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
+
+ if (!slave && plane_state->scaler_id >= 0)
+ skl_program_scaler(plane, crtc_state, plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void
-skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+static void
+skl_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ int color_plane = 0;
+
+ if (plane_state->linked_plane) {
+ /* Program the UV plane */
+ color_plane = 1;
+ }
+
+ skl_program_plane(plane, crtc_state, plane_state,
+ color_plane, false, plane_state->ctl);
+}
+
+static void
+icl_update_slave(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ skl_program_plane(plane, crtc_state, plane_state, 0, true,
+ plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE);
+}
+
+static void
+skl_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
@@ -412,15 +604,15 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
+ skl_write_plane_wm(plane, crtc_state);
+ I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
- POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-bool
+static bool
skl_plane_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
@@ -613,7 +805,6 @@ vlv_update_plane(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
u32 sprctl = plane_state->ctl;
@@ -636,38 +827,41 @@ vlv_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- vlv_update_clrc(plane_state);
+ I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
+ plane_state->color_plane[0].stride);
+ I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+ I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(plane_state);
if (key->flags) {
I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
+ I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
}
- I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
- plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
- if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
- else
- I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+ I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+ I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
- I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
-
- I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
I915_WRITE_FW(SPSURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
- POSTING_READ_FW(SPSURF(pipe, plane_id));
+
+ vlv_update_clrc(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+vlv_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -677,9 +871,7 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
-
I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
- POSTING_READ_FW(SPSURF(pipe, plane_id));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -774,7 +966,6 @@ ivb_update_plane(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
u32 sprctl = plane_state->ctl, sprscale = 0;
u32 sprsurf_offset = plane_state->color_plane[0].offset;
@@ -803,37 +994,42 @@ ivb_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
+ I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ if (IS_IVYBRIDGE(dev_priv))
+ I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+
if (key->flags) {
I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
+ I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
}
- I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
- else if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
- else
+ } else {
I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
+ I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
+ }
- I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
- if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(SPRCTL(pipe), sprctl);
I915_WRITE_FW(SPRSURF(pipe),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
- POSTING_READ_FW(SPRSURF(pipe));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+ivb_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -842,12 +1038,10 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
I915_WRITE_FW(SPRCTL(pipe), 0);
- /* Can't leave the scaler enabled... */
+ /* Disable the scaler */
if (IS_IVYBRIDGE(dev_priv))
I915_WRITE_FW(SPRSCALE(pipe), 0);
-
I915_WRITE_FW(SPRSURF(pipe), 0);
- POSTING_READ_FW(SPRSURF(pipe));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -946,7 +1140,6 @@ g4x_update_plane(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
u32 dvscntr = plane_state->ctl, dvsscale = 0;
u32 dvssurf_offset = plane_state->color_plane[0].offset;
@@ -975,32 +1168,35 @@ g4x_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
+ I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+
if (key->flags) {
I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
+ I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
}
- I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
+ I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
- if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
- else
- I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
-
- I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
I915_WRITE_FW(DVSSURF(pipe),
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
- POSTING_READ_FW(DVSSURF(pipe));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+g4x_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1011,9 +1207,7 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
I915_WRITE_FW(DVSCNTR(pipe), 0);
/* Disable the scaler */
I915_WRITE_FW(DVSSCALE(pipe), 0);
-
I915_WRITE_FW(DVSSURF(pipe), 0);
- POSTING_READ_FW(DVSSURF(pipe));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1039,6 +1233,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static bool intel_fb_scalable(const struct drm_framebuffer *fb)
+{
+ if (!fb)
+ return false;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ return false;
+ default:
+ return true;
+ }
+}
+
static int
g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
@@ -1106,18 +1313,18 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int max_scale, min_scale;
+ int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
- if (INTEL_GEN(dev_priv) < 7) {
- min_scale = 1;
- max_scale = 16 << 16;
- } else if (IS_IVYBRIDGE(dev_priv)) {
- min_scale = 1;
- max_scale = 2 << 16;
- } else {
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ if (intel_fb_scalable(plane_state->base.fb)) {
+ if (INTEL_GEN(dev_priv) < 7) {
+ min_scale = 1;
+ max_scale = 16 << 16;
+ } else if (IS_IVYBRIDGE(dev_priv)) {
+ min_scale = 1;
+ max_scale = 2 << 16;
+ }
}
ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1204,6 +1411,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
struct drm_format_name_buf format_name;
@@ -1232,13 +1441,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
}
/*
- * 90/270 is not allowed with RGB64 16:16:16:16,
- * RGB 16-bit 5:6:5, and Indexed 8-bit.
- * TBD: Add RGB64 case once its added in supported format list.
+ * 90/270 is not allowed with RGB64 16:16:16:16 and
+ * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
+ * TBD: Add RGB64 case once its added in supported format
+ * list.
*/
switch (fb->format->format) {
- case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
+ if (INTEL_GEN(dev_priv) >= 11)
+ break;
+ /* fall through */
+ case DRM_FORMAT_C8:
DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
drm_get_format_name(fb->format->format,
&format_name));
@@ -1292,12 +1505,31 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
return 0;
}
-int skl_plane_check(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+
+ /* Display WA #1106 */
+ if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 &&
+ (rotation == DRM_MODE_ROTATE_270 ||
+ rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
+ DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int skl_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int max_scale, min_scale;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
ret = skl_plane_check_fb(crtc_state, plane_state);
@@ -1305,15 +1537,9 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
return ret;
/* use scaler when colorkey is not required */
- if (!plane_state->ckey.flags) {
- const struct drm_framebuffer *fb = plane_state->base.fb;
-
+ if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
min_scale = 1;
- max_scale = skl_max_scale(crtc_state,
- fb ? fb->format->format : 0);
- } else {
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ max_scale = skl_max_scale(crtc_state, fb->format->format);
}
ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1334,10 +1560,18 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ ret = skl_plane_check_nv12_rotation(plane_state);
+ if (ret)
+ return ret;
+
ret = skl_check_plane_surface(plane_state);
if (ret)
return ret;
+ /* HW only has 8 bits pixel precision, disable plane if invisible */
+ if (!(plane_state->base.alpha >> 8))
+ plane_state->base.visible = false;
+
plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -1502,24 +1736,30 @@ static const uint32_t vlv_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static uint32_t skl_plane_formats[] = {
+static const uint32_t skl_plane_formats[] = {
+ DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
};
-static uint32_t skl_planar_formats[] = {
+static const uint32_t skl_planar_formats[] = {
+ DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -1724,8 +1964,36 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.format_mod_supported = skl_plane_format_mod_supported,
};
-bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
+static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
+}
+
+static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (INTEL_GEN(dev_priv) >= 11)
+ return plane_id <= PLANE_SPRITE3;
+
+ /* Display WA #0870: skl, bxt */
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return false;
+
+ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+ return false;
+
+ if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
+ return false;
+
+ return true;
+}
+
+static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
{
if (plane_id == PLANE_CURSOR)
return false;
@@ -1742,109 +2010,173 @@ bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
}
struct intel_plane *
-intel_sprite_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe, int plane)
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
{
- struct intel_plane *intel_plane = NULL;
- struct intel_plane_state *state = NULL;
- const struct drm_plane_funcs *plane_funcs;
- unsigned long possible_crtcs;
- const uint32_t *plane_formats;
- const uint64_t *modifiers;
+ struct intel_plane *plane;
+ enum drm_plane_type plane_type;
unsigned int supported_rotations;
- int num_plane_formats;
+ unsigned int possible_crtcs;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
int ret;
- intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
- if (!intel_plane) {
- ret = -ENOMEM;
- goto fail;
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ plane->pipe = pipe;
+ plane->id = plane_id;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
+
+ plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
+ if (plane->has_fbc) {
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}
- state = intel_create_plane_state(&intel_plane->base);
- if (!state) {
- ret = -ENOMEM;
- goto fail;
+ plane->max_stride = skl_plane_max_stride;
+ plane->update_plane = skl_update_plane;
+ plane->disable_plane = skl_disable_plane;
+ plane->get_hw_state = skl_plane_get_hw_state;
+ plane->check_plane = skl_plane_check;
+ if (icl_is_nv12_y_plane(plane_id))
+ plane->update_slave = icl_update_slave;
+
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ formats = skl_planar_formats;
+ num_formats = ARRAY_SIZE(skl_planar_formats);
+ } else {
+ formats = skl_plane_formats;
+ num_formats = ARRAY_SIZE(skl_plane_formats);
}
- intel_plane->base.state = &state->base;
- if (INTEL_GEN(dev_priv) >= 9) {
- state->scaler_id = -1;
+ plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
+ if (plane->has_ccs)
+ modifiers = skl_plane_format_modifiers_ccs;
+ else
+ modifiers = skl_plane_format_modifiers_noccs;
- intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
- PLANE_SPRITE0 + plane);
+ if (plane_id == PLANE_PRIMARY)
+ plane_type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ plane_type = DRM_PLANE_TYPE_OVERLAY;
- intel_plane->max_stride = skl_plane_max_stride;
- intel_plane->update_plane = skl_update_plane;
- intel_plane->disable_plane = skl_disable_plane;
- intel_plane->get_hw_state = skl_plane_get_hw_state;
- intel_plane->check_plane = skl_plane_check;
+ possible_crtcs = BIT(pipe);
- if (skl_plane_has_planar(dev_priv, pipe,
- PLANE_SPRITE0 + plane)) {
- plane_formats = skl_planar_formats;
- num_plane_formats = ARRAY_SIZE(skl_planar_formats);
- } else {
- plane_formats = skl_plane_formats;
- num_plane_formats = ARRAY_SIZE(skl_plane_formats);
- }
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ possible_crtcs, &skl_plane_funcs,
+ formats, num_formats, modifiers,
+ plane_type,
+ "plane %d%c", plane_id + 1,
+ pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ supported_rotations |= DRM_MODE_REFLECT_X;
+
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+
+ drm_plane_create_color_properties(&plane->base,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+ drm_plane_create_alpha_property(&plane->base);
+ drm_plane_create_blend_mode_property(&plane->base,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
- if (intel_plane->has_ccs)
- modifiers = skl_plane_format_modifiers_ccs;
- else
- modifiers = skl_plane_format_modifiers_noccs;
-
- plane_funcs = &skl_plane_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- intel_plane->max_stride = i9xx_plane_max_stride;
- intel_plane->update_plane = vlv_update_plane;
- intel_plane->disable_plane = vlv_disable_plane;
- intel_plane->get_hw_state = vlv_plane_get_hw_state;
- intel_plane->check_plane = vlv_sprite_check;
-
- plane_formats = vlv_plane_formats;
- num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+ return plane;
+
+fail:
+ intel_plane_free(plane);
+
+ return ERR_PTR(ret);
+}
+
+struct intel_plane *
+intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int sprite)
+{
+ struct intel_plane *plane;
+ const struct drm_plane_funcs *plane_funcs;
+ unsigned long possible_crtcs;
+ unsigned int supported_rotations;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
+ int ret;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ return skl_universal_plane_create(dev_priv, pipe,
+ PLANE_SPRITE0 + sprite);
+
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ plane->max_stride = i9xx_plane_max_stride;
+ plane->update_plane = vlv_update_plane;
+ plane->disable_plane = vlv_disable_plane;
+ plane->get_hw_state = vlv_plane_get_hw_state;
+ plane->check_plane = vlv_sprite_check;
+
+ formats = vlv_plane_formats;
+ num_formats = ARRAY_SIZE(vlv_plane_formats);
modifiers = i9xx_plane_format_modifiers;
plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
- intel_plane->max_stride = g4x_sprite_max_stride;
- intel_plane->update_plane = ivb_update_plane;
- intel_plane->disable_plane = ivb_disable_plane;
- intel_plane->get_hw_state = ivb_plane_get_hw_state;
- intel_plane->check_plane = g4x_sprite_check;
-
- plane_formats = snb_plane_formats;
- num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ plane->max_stride = g4x_sprite_max_stride;
+ plane->update_plane = ivb_update_plane;
+ plane->disable_plane = ivb_disable_plane;
+ plane->get_hw_state = ivb_plane_get_hw_state;
+ plane->check_plane = g4x_sprite_check;
+
+ formats = snb_plane_formats;
+ num_formats = ARRAY_SIZE(snb_plane_formats);
modifiers = i9xx_plane_format_modifiers;
plane_funcs = &snb_sprite_funcs;
} else {
- intel_plane->max_stride = g4x_sprite_max_stride;
- intel_plane->update_plane = g4x_update_plane;
- intel_plane->disable_plane = g4x_disable_plane;
- intel_plane->get_hw_state = g4x_plane_get_hw_state;
- intel_plane->check_plane = g4x_sprite_check;
+ plane->max_stride = g4x_sprite_max_stride;
+ plane->update_plane = g4x_update_plane;
+ plane->disable_plane = g4x_disable_plane;
+ plane->get_hw_state = g4x_plane_get_hw_state;
+ plane->check_plane = g4x_sprite_check;
modifiers = i9xx_plane_format_modifiers;
if (IS_GEN6(dev_priv)) {
- plane_formats = snb_plane_formats;
- num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ formats = snb_plane_formats;
+ num_formats = ARRAY_SIZE(snb_plane_formats);
plane_funcs = &snb_sprite_funcs;
} else {
- plane_formats = g4x_plane_formats;
- num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
+ formats = g4x_plane_formats;
+ num_formats = ARRAY_SIZE(g4x_plane_formats);
plane_funcs = &g4x_sprite_funcs;
}
}
- if (INTEL_GEN(dev_priv) >= 9) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
- } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X;
@@ -1853,35 +2185,25 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
}
- intel_plane->pipe = pipe;
- intel_plane->i9xx_plane = plane;
- intel_plane->id = PLANE_SPRITE0 + plane;
- intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id);
+ plane->pipe = pipe;
+ plane->id = PLANE_SPRITE0 + sprite;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- possible_crtcs = (1 << pipe);
+ possible_crtcs = BIT(pipe);
- if (INTEL_GEN(dev_priv) >= 9)
- ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, plane_funcs,
- plane_formats, num_plane_formats,
- modifiers,
- DRM_PLANE_TYPE_OVERLAY,
- "plane %d%c", plane + 2, pipe_name(pipe));
- else
- ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, plane_funcs,
- plane_formats, num_plane_formats,
- modifiers,
- DRM_PLANE_TYPE_OVERLAY,
- "sprite %c", sprite_name(pipe, plane));
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ possible_crtcs, plane_funcs,
+ formats, num_formats, modifiers,
+ DRM_PLANE_TYPE_OVERLAY,
+ "sprite %c", sprite_name(pipe, sprite));
if (ret)
goto fail;
- drm_plane_create_rotation_property(&intel_plane->base,
+ drm_plane_create_rotation_property(&plane->base,
DRM_MODE_ROTATE_0,
supported_rotations);
- drm_plane_create_color_properties(&intel_plane->base,
+ drm_plane_create_color_properties(&plane->base,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
@@ -1889,13 +2211,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
- drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
- return intel_plane;
+ return plane;
fail:
- kfree(state);
- kfree(intel_plane);
+ intel_plane_free(plane);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b5b04cb892e9..860f306a23ba 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -885,6 +885,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
adjusted_mode->crtc_clock = tv_mode->clock;
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
@@ -1377,17 +1378,10 @@ intel_tv_get_modes(struct drm_connector *connector)
return count;
}
-static void
-intel_tv_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_tv_destroy,
+ .destroy = intel_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index b1b3e81b6e24..b34c318b238d 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -376,7 +376,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
intel_guc_init_params(guc);
ret = intel_guc_fw_upload(guc);
- if (ret == 0 || ret != -EAGAIN)
+ if (ret == 0 || ret != -ETIMEDOUT)
break;
DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index 87910aa83267..0e3bd580e267 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -115,9 +115,14 @@ static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
return uc_fw->path != NULL;
}
+static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
+{
+ return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS;
+}
+
static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
{
- if (uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS)
+ if (intel_uc_fw_is_loaded(uc_fw))
uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 3ad302c66254..9289515108c3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
FORCEWAKE_MEDIA_VEBOX_GEN11(i),
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
- } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
+ } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_fallback;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index bba98cf83cbd..bf3662ad5fed 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -326,6 +326,13 @@ enum vbt_gmbus_ddi {
ICL_DDC_BUS_PORT_4,
};
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
+#define DP_AUX_F 0x60
+
#define VBT_DP_MAX_LINK_RATE_HBR3 0
#define VBT_DP_MAX_LINK_RATE_HBR2 1
#define VBT_DP_MAX_LINK_RATE_HBR 2
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
new file mode 100644
index 000000000000..c56ba0e04044
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Author: Gaurav K Singh <gaurav.k.singh@intel.com>
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+enum ROW_INDEX_BPP {
+ ROW_INDEX_6BPP = 0,
+ ROW_INDEX_8BPP,
+ ROW_INDEX_10BPP,
+ ROW_INDEX_12BPP,
+ ROW_INDEX_15BPP,
+ MAX_ROW_INDEX
+};
+
+enum COLUMN_INDEX_BPC {
+ COLUMN_INDEX_8BPC = 0,
+ COLUMN_INDEX_10BPC,
+ COLUMN_INDEX_12BPC,
+ COLUMN_INDEX_14BPC,
+ COLUMN_INDEX_16BPC,
+ MAX_COLUMN_INDEX
+};
+
+#define DSC_SUPPORTED_VERSION_MIN 1
+
+/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
+static u16 rc_buf_thresh[] = {
+ 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
+ 7744, 7872, 8000, 8064
+};
+
+struct rc_parameters {
+ u16 initial_xmit_delay;
+ u8 first_line_bpg_offset;
+ u16 initial_offset;
+ u8 flatness_min_qp;
+ u8 flatness_max_qp;
+ u8 rc_quant_incr_limit0;
+ u8 rc_quant_incr_limit1;
+ struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+};
+
+/*
+ * Selected Rate Control Related Parameter Recommended Values
+ * from DSC_v1.11 spec & C Model release: DSC_model_20161212
+ */
+static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = {
+{
+ /* 6BPP/8BPC */
+ { 768, 15, 6144, 3, 13, 11, 11, {
+ { 0, 4, 0 }, { 1, 6, -2 }, { 3, 8, -2 }, { 4, 8, -4 },
+ { 5, 9, -6 }, { 5, 9, -6 }, { 6, 9, -6 }, { 6, 10, -8 },
+ { 7, 11, -8 }, { 8, 12, -10 }, { 9, 12, -10 }, { 10, 12, -12 },
+ { 10, 12, -12 }, { 11, 12, -12 }, { 13, 14, -12 }
+ }
+ },
+ /* 6BPP/10BPC */
+ { 768, 15, 6144, 7, 17, 15, 15, {
+ { 0, 8, 0 }, { 3, 10, -2 }, { 7, 12, -2 }, { 8, 12, -4 },
+ { 9, 13, -6 }, { 9, 13, -6 }, { 10, 13, -6 }, { 10, 14, -8 },
+ { 11, 15, -8 }, { 12, 16, -10 }, { 13, 16, -10 },
+ { 14, 16, -12 }, { 14, 16, -12 }, { 15, 16, -12 },
+ { 17, 18, -12 }
+ }
+ },
+ /* 6BPP/12BPC */
+ { 768, 15, 6144, 11, 21, 19, 19, {
+ { 0, 12, 0 }, { 5, 14, -2 }, { 11, 16, -2 }, { 12, 16, -4 },
+ { 13, 17, -6 }, { 13, 17, -6 }, { 14, 17, -6 }, { 14, 18, -8 },
+ { 15, 19, -8 }, { 16, 20, -10 }, { 17, 20, -10 },
+ { 18, 20, -12 }, { 18, 20, -12 }, { 19, 20, -12 },
+ { 21, 22, -12 }
+ }
+ },
+ /* 6BPP/14BPC */
+ { 768, 15, 6144, 15, 25, 23, 27, {
+ { 0, 16, 0 }, { 7, 18, -2 }, { 15, 20, -2 }, { 16, 20, -4 },
+ { 17, 21, -6 }, { 17, 21, -6 }, { 18, 21, -6 }, { 18, 22, -8 },
+ { 19, 23, -8 }, { 20, 24, -10 }, { 21, 24, -10 },
+ { 22, 24, -12 }, { 22, 24, -12 }, { 23, 24, -12 },
+ { 25, 26, -12 }
+ }
+ },
+ /* 6BPP/16BPC */
+ { 768, 15, 6144, 19, 29, 27, 27, {
+ { 0, 20, 0 }, { 9, 22, -2 }, { 19, 24, -2 }, { 20, 24, -4 },
+ { 21, 25, -6 }, { 21, 25, -6 }, { 22, 25, -6 }, { 22, 26, -8 },
+ { 23, 27, -8 }, { 24, 28, -10 }, { 25, 28, -10 },
+ { 26, 28, -12 }, { 26, 28, -12 }, { 27, 28, -12 },
+ { 29, 30, -12 }
+ }
+ },
+},
+{
+ /* 8BPP/8BPC */
+ { 512, 12, 6144, 3, 12, 11, 11, {
+ { 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, { 5, 12, -12 },
+ { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+ }
+ },
+ /* 8BPP/10BPC */
+ { 512, 12, 6144, 7, 16, 15, 15, {
+ { 0, 4, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 },
+ { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+ { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+ }
+ },
+ /* 8BPP/12BPC */
+ { 512, 12, 6144, 11, 20, 19, 19, {
+ { 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 },
+ { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+ { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+ { 21, 23, -12 }
+ }
+ },
+ /* 8BPP/14BPC */
+ { 512, 12, 6144, 15, 24, 23, 23, {
+ { 0, 12, 0 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 },
+ { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 },
+ { 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 },
+ { 24, 25, -12 }
+ }
+ },
+ /* 8BPP/16BPC */
+ { 512, 12, 6144, 19, 28, 27, 27, {
+ { 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 },
+ { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 },
+ { 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 },
+ { 28, 29, -12 }
+ }
+ },
+},
+{
+ /* 10BPP/8BPC */
+ { 410, 15, 5632, 3, 12, 11, 11, {
+ { 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 },
+ { 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 }
+ }
+ },
+ /* 10BPP/10BPC */
+ { 410, 15, 5632, 7, 16, 15, 15, {
+ { 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 },
+ { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 },
+ { 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 }
+ }
+ },
+ /* 10BPP/12BPC */
+ { 410, 15, 5632, 11, 20, 19, 19, {
+ { 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 },
+ { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 },
+ { 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 },
+ { 19, 20, -12 }
+ }
+ },
+ /* 10BPP/14BPC */
+ { 410, 15, 5632, 15, 24, 23, 23, {
+ { 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 },
+ { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 },
+ { 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 },
+ { 23, 24, -12 }
+ }
+ },
+ /* 10BPP/16BPC */
+ { 410, 15, 5632, 19, 28, 27, 27, {
+ { 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 },
+ { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 },
+ { 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 },
+ { 27, 28, -12 }
+ }
+ },
+},
+{
+ /* 12BPP/8BPC */
+ { 341, 15, 2048, 3, 12, 11, 11, {
+ { 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 },
+ { 5, 12, -12 }, { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+ }
+ },
+ /* 12BPP/10BPC */
+ { 341, 15, 2048, 7, 16, 15, 15, {
+ { 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 },
+ { 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+ { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+ }
+ },
+ /* 12BPP/12BPC */
+ { 341, 15, 2048, 11, 20, 19, 19, {
+ { 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 },
+ { 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+ { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+ { 21, 23, -12 }
+ }
+ },
+ /* 12BPP/14BPC */
+ { 341, 15, 2048, 15, 24, 23, 23, {
+ { 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 },
+ { 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 },
+ { 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 },
+ { 22, 23, -12 }
+ }
+ },
+ /* 12BPP/16BPC */
+ { 341, 15, 2048, 19, 28, 27, 27, {
+ { 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 },
+ { 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 },
+ { 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 },
+ { 26, 27, -12 }
+ }
+ },
+},
+{
+ /* 15BPP/8BPC */
+ { 273, 15, 2048, 3, 12, 11, 11, {
+ { 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 },
+ { 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 },
+ { 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 },
+ { 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 }
+ }
+ },
+ /* 15BPP/10BPC */
+ { 273, 15, 2048, 7, 16, 15, 15, {
+ { 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 },
+ { 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 },
+ { 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 },
+ { 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 }
+ }
+ },
+ /* 15BPP/12BPC */
+ { 273, 15, 2048, 11, 20, 19, 19, {
+ { 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 },
+ { 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 },
+ { 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 },
+ { 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 },
+ { 16, 17, -12 }
+ }
+ },
+ /* 15BPP/14BPC */
+ { 273, 15, 2048, 15, 24, 23, 23, {
+ { 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 },
+ { 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 },
+ { 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 },
+ { 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 },
+ { 20, 21, -12 }
+ }
+ },
+ /* 15BPP/16BPC */
+ { 273, 15, 2048, 19, 28, 27, 27, {
+ { 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 },
+ { 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 },
+ { 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 },
+ { 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 },
+ { 24, 25, -12 }
+ }
+ }
+}
+
+};
+
+static int get_row_index_for_rc_params(u16 compressed_bpp)
+{
+ switch (compressed_bpp) {
+ case 6:
+ return ROW_INDEX_6BPP;
+ case 8:
+ return ROW_INDEX_8BPP;
+ case 10:
+ return ROW_INDEX_10BPP;
+ case 12:
+ return ROW_INDEX_12BPP;
+ case 15:
+ return ROW_INDEX_15BPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int get_column_index_for_rc_params(u8 bits_per_component)
+{
+ switch (bits_per_component) {
+ case 8:
+ return COLUMN_INDEX_8BPC;
+ case 10:
+ return COLUMN_INDEX_10BPC;
+ case 12:
+ return COLUMN_INDEX_12BPC;
+ case 14:
+ return COLUMN_INDEX_14BPC;
+ case 16:
+ return COLUMN_INDEX_16BPC;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
+{
+ unsigned long groups_per_line = 0;
+ unsigned long groups_total = 0;
+ unsigned long num_extra_mux_bits = 0;
+ unsigned long slice_bits = 0;
+ unsigned long hrd_delay = 0;
+ unsigned long final_scale = 0;
+ unsigned long rbs_min = 0;
+
+ /* Number of groups used to code each line of a slice */
+ groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
+ DSC_RC_PIXELS_PER_GROUP);
+
+ /* chunksize in Bytes */
+ vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
+ vdsc_cfg->bits_per_pixel,
+ (8 * 16));
+
+ if (vdsc_cfg->convert_rgb)
+ num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4)
+ - 2);
+ else
+ num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4) +
+ 2 * (4 * vdsc_cfg->bits_per_component) - 2;
+ /* Number of bits in one Slice */
+ slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
+
+ while ((num_extra_mux_bits > 0) &&
+ ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
+ num_extra_mux_bits--;
+
+ if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
+ vdsc_cfg->initial_scale_value = groups_per_line + 8;
+
+ /* scale_decrement_interval calculation according to DSC spec 1.11 */
+ if (vdsc_cfg->initial_scale_value > 8)
+ vdsc_cfg->scale_decrement_interval = groups_per_line /
+ (vdsc_cfg->initial_scale_value - 8);
+ else
+ vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
+
+ vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
+ (vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
+
+ if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
+ DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
+ return -ERANGE;
+ }
+
+ final_scale = (vdsc_cfg->rc_model_size * 8) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
+ if (vdsc_cfg->slice_height > 1)
+ /*
+ * NflBpgOffset is 16 bit value with 11 fractional bits
+ * hence we multiply by 2^11 for preserving the
+ * fractional part
+ */
+ vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
+ (vdsc_cfg->slice_height - 1));
+ else
+ vdsc_cfg->nfl_bpg_offset = 0;
+
+ /* 2^16 - 1 */
+ if (vdsc_cfg->nfl_bpg_offset > 65535) {
+ DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
+ return -ERANGE;
+ }
+
+ /* Number of groups used to code the entire slice */
+ groups_total = groups_per_line * vdsc_cfg->slice_height;
+
+ /* slice_bpg_offset is 16 bit value with 11 fractional bits */
+ vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
+ vdsc_cfg->initial_offset +
+ num_extra_mux_bits) << 11),
+ groups_total);
+
+ if (final_scale > 9) {
+ /*
+ * ScaleIncrementInterval =
+ * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
+ * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
+ * we need divide by 2^11 from pstDscCfg values
+ */
+ vdsc_cfg->scale_increment_interval =
+ (vdsc_cfg->final_offset * (1 << 11)) /
+ ((vdsc_cfg->nfl_bpg_offset +
+ vdsc_cfg->slice_bpg_offset) *
+ (final_scale - 9));
+ } else {
+ /*
+ * If finalScaleValue is less than or equal to 9, a value of 0 should
+ * be used to disable the scale increment at the end of the slice
+ */
+ vdsc_cfg->scale_increment_interval = 0;
+ }
+
+ if (vdsc_cfg->scale_increment_interval > 65535) {
+ DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
+ return -ERANGE;
+ }
+
+ /*
+ * DSC spec mentions that bits_per_pixel specifies the target
+ * bits/pixel (bpp) rate that is used by the encoder,
+ * in steps of 1/16 of a bit per pixel
+ */
+ rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
+ DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel, 16) +
+ groups_per_line * vdsc_cfg->first_line_bpg_offset;
+
+ hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
+ vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
+ vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
+
+ return 0;
+}
+
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg;
+ u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp;
+ u8 i = 0;
+ int row_index = 0;
+ int column_index = 0;
+ u8 line_buf_depth = 0;
+
+ vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
+ vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
+ vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
+ pipe_config->dsc_params.slice_count);
+ /*
+ * Slice Height of 8 works for all currently available panels. So start
+ * with that if pic_height is an integral multiple of 8.
+ * Eventually add logic to try multiple slice heights.
+ */
+ if (vdsc_cfg->pic_height % 8 == 0)
+ vdsc_cfg->slice_height = 8;
+ else if (vdsc_cfg->pic_height % 4 == 0)
+ vdsc_cfg->slice_height = 4;
+ else
+ vdsc_cfg->slice_height = 2;
+
+ /* Values filled from DSC Sink DPCD */
+ vdsc_cfg->dsc_version_major =
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
+ vdsc_cfg->dsc_version_minor =
+ min(DSC_SUPPORTED_VERSION_MIN,
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+
+ vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
+ DP_DSC_RGB;
+
+ line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
+ if (!line_buf_depth) {
+ DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+ return -EINVAL;
+ }
+ if (vdsc_cfg->dsc_version_minor == 2)
+ vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
+ else
+ vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
+
+ /* Gen 11 does not support YCbCr */
+ vdsc_cfg->enable422 = false;
+ /* Gen 11 does not support VBR */
+ vdsc_cfg->vbr_enable = false;
+ vdsc_cfg->block_pred_enable =
+ intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
+
+ /* Gen 11 only supports integral values of bpp */
+ vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
+ vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ /*
+ * six 0s are appended to the lsb of each threshold value
+ * internally in h/w.
+ * Only 8 bits are allowed for programming RcBufThreshold
+ */
+ vdsc_cfg->rc_buf_thresh[i] = rc_buf_thresh[i] >> 6;
+ }
+
+ /*
+ * For 6bpp, RC Buffer threshold 12 and 13 need a different value
+ * as per C Model
+ */
+ if (compressed_bpp == 6) {
+ vdsc_cfg->rc_buf_thresh[12] = 0x7C;
+ vdsc_cfg->rc_buf_thresh[13] = 0x7D;
+ }
+
+ row_index = get_row_index_for_rc_params(compressed_bpp);
+ column_index =
+ get_column_index_for_rc_params(vdsc_cfg->bits_per_component);
+
+ if (row_index < 0 || column_index < 0)
+ return -EINVAL;
+
+ vdsc_cfg->first_line_bpg_offset =
+ rc_params[row_index][column_index].first_line_bpg_offset;
+ vdsc_cfg->initial_xmit_delay =
+ rc_params[row_index][column_index].initial_xmit_delay;
+ vdsc_cfg->initial_offset =
+ rc_params[row_index][column_index].initial_offset;
+ vdsc_cfg->flatness_min_qp =
+ rc_params[row_index][column_index].flatness_min_qp;
+ vdsc_cfg->flatness_max_qp =
+ rc_params[row_index][column_index].flatness_max_qp;
+ vdsc_cfg->rc_quant_incr_limit0 =
+ rc_params[row_index][column_index].rc_quant_incr_limit0;
+ vdsc_cfg->rc_quant_incr_limit1 =
+ rc_params[row_index][column_index].rc_quant_incr_limit1;
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ vdsc_cfg->rc_range_params[i].range_min_qp =
+ rc_params[row_index][column_index].rc_range_params[i].range_min_qp;
+ vdsc_cfg->rc_range_params[i].range_max_qp =
+ rc_params[row_index][column_index].rc_range_params[i].range_max_qp;
+ /*
+ * Range BPG Offset uses 2's complement and is only a 6 bits. So
+ * mask it to get only 6 bits.
+ */
+ vdsc_cfg->rc_range_params[i].range_bpg_offset =
+ rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset &
+ DSC_RANGE_BPG_OFFSET_MASK;
+ }
+
+ /*
+ * BitsPerComponent value determines mux_word_size:
+ * When BitsPerComponent is 12bpc, muxWordSize will be equal to 64 bits
+ * When BitsPerComponent is 8 or 10bpc, muxWordSize will be equal to
+ * 48 bits
+ */
+ if (vdsc_cfg->bits_per_component == 8 ||
+ vdsc_cfg->bits_per_component == 10)
+ vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+ else if (vdsc_cfg->bits_per_component == 12)
+ vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
+
+ /* RC_MODEL_SIZE is a constant across all configurations */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ /* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
+ vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
+
+ return intel_compute_rc_parameters(vdsc_cfg);
+}
+
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
+{
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ /*
+ * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2
+ * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain.
+ * For any other transcoder, VDSC/joining uses the power well associated
+ * with the pipe/transcoder in use. Hence another reference on the
+ * transcoder power domain will suffice.
+ */
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return POWER_DOMAIN_TRANSCODER_EDP_VDSC;
+ else
+ return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+}
+
+static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 pps_val = 0;
+ u32 rc_buf_thresh_dword[4];
+ u32 rc_range_params_dword[8];
+ u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1;
+ int i = 0;
+
+ /* Populate PICTURE_PARAMETER_SET_0 registers */
+ pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor <<
+ DSC_VER_MIN_SHIFT |
+ vdsc_cfg->bits_per_component << DSC_BPC_SHIFT |
+ vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT;
+ if (vdsc_cfg->block_pred_enable)
+ pps_val |= DSC_BLOCK_PREDICTION;
+ if (vdsc_cfg->convert_rgb)
+ pps_val |= DSC_COLOR_SPACE_CONVERSION;
+ if (vdsc_cfg->enable422)
+ pps_val |= DSC_422_ENABLE;
+ if (vdsc_cfg->vbr_enable)
+ pps_val |= DSC_VBR_ENABLE;
+ DRM_INFO("PPS0 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_1 registers */
+ pps_val = 0;
+ pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
+ DRM_INFO("PPS1 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_2 registers */
+ pps_val = 0;
+ pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
+ DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
+ DRM_INFO("PPS2 = 0x%08x\n", pps_val);
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_3 registers */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
+ DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
+ DRM_INFO("PPS3 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_4 registers */
+ pps_val = 0;
+ pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
+ DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
+ DRM_INFO("PPS4 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_5 registers */
+ pps_val = 0;
+ pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
+ DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
+ DRM_INFO("PPS5 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_6 registers */
+ pps_val = 0;
+ pps_val |= DSC_INITIAL_SCALE_VALUE(vdsc_cfg->initial_scale_value) |
+ DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
+ DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
+ DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
+ DRM_INFO("PPS6 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_7 registers */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
+ DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
+ DRM_INFO("PPS7 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_8 registers */
+ pps_val = 0;
+ pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
+ DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
+ DRM_INFO("PPS8 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_9 registers */
+ pps_val = 0;
+ pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
+ DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
+ DRM_INFO("PPS9 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_10 registers */
+ pps_val = 0;
+ pps_val |= DSC_RC_QUANT_INC_LIMIT0(vdsc_cfg->rc_quant_incr_limit0) |
+ DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
+ DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
+ DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
+ DRM_INFO("PPS10 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
+ pps_val);
+ }
+
+ /* Populate Picture parameter set 16 */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_CHUNK_SIZE(vdsc_cfg->slice_chunk_size) |
+ DSC_SLICE_PER_LINE((vdsc_cfg->pic_width / num_vdsc_instances) /
+ vdsc_cfg->slice_width) |
+ DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
+ vdsc_cfg->slice_height);
+ DRM_INFO("PPS16 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
+ pps_val);
+ }
+
+ /* Populate the RC_BUF_THRESH registers */
+ memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword));
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ rc_buf_thresh_dword[i / 4] |=
+ (u32)(vdsc_cfg->rc_buf_thresh[i] <<
+ BITS_PER_BYTE * (i % 4));
+ DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
+ rc_buf_thresh_dword[i / 4]);
+ }
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(DSCC_RC_BUF_THRESH_0,
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_1,
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW,
+ rc_buf_thresh_dword[3]);
+ }
+ } else {
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
+ }
+ }
+
+ /* Populate the RC_RANGE_PARAMETERS registers */
+ memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword));
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ rc_range_params_dword[i / 2] |=
+ (u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset <<
+ RC_BPG_OFFSET_SHIFT) |
+ (vdsc_cfg->rc_range_params[i].range_max_qp <<
+ RC_MAX_QP_SHIFT) |
+ (vdsc_cfg->rc_range_params[i].range_min_qp <<
+ RC_MIN_QP_SHIFT)) << 16 * (i % 2));
+ DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
+ rc_range_params_dword[i / 2]);
+ }
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
+ }
+ } else {
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
+ }
+ }
+}
+
+static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
+
+ /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
+ drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp);
+
+ /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
+ drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg);
+
+ intel_dig_port->write_infoframe(encoder, crtc_state,
+ DP_SDP_PPS, &dp_dsc_pps_sdp,
+ sizeof(dp_dsc_pps_sdp));
+}
+
+void intel_dsc_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ u32 dss_ctl1_val = 0;
+ u32 dss_ctl2_val = 0;
+
+ if (!crtc_state->dsc_params.compression_enable)
+ return;
+
+ /* Enable Power wells for VDSC/joining */
+ intel_display_power_get(dev_priv,
+ intel_dsc_power_domain(crtc_state));
+
+ intel_configure_pps_for_dsc_encoder(encoder, crtc_state);
+
+ intel_dp_write_dsc_pps_sdp(encoder, crtc_state);
+
+ if (crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ } else {
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+ }
+ dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
+ if (crtc_state->dsc_params.dsc_split) {
+ dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
+ dss_ctl1_val |= JOINER_ENABLE;
+ }
+ I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+ I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+}
+
+void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ u32 dss_ctl1_val = 0, dss_ctl2_val = 0;
+
+ if (!old_crtc_state->dsc_params.compression_enable)
+ return;
+
+ if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ } else {
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+ }
+ dss_ctl1_val = I915_READ(dss_ctl1_reg);
+ if (dss_ctl1_val & JOINER_ENABLE)
+ dss_ctl1_val &= ~JOINER_ENABLE;
+ I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+
+ dss_ctl2_val = I915_READ(dss_ctl2_reg);
+ if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE ||
+ dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE)
+ dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE |
+ RIGHT_BRANCH_VDSC_ENABLE);
+ I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+
+ /* Disable Power wells for VDSC/joining */
+ intel_display_power_put(dev_priv,
+ intel_dsc_power_domain(old_crtc_state));
+}
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 4bcdeaf8d98f..4f41e326f3f3 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -48,58 +48,112 @@
* - Public functions to init or apply the given workaround type.
*/
-static void wa_add(struct drm_i915_private *i915,
- i915_reg_t reg, const u32 mask, const u32 val)
+static void wa_init_start(struct i915_wa_list *wal, const char *name)
{
- struct i915_workarounds *wa = &i915->workarounds;
- unsigned int start = 0, end = wa->count;
- unsigned int addr = i915_mmio_reg_offset(reg);
- struct i915_wa_reg *r;
+ wal->name = name;
+}
+
+#define WA_LIST_CHUNK (1 << 4)
+
+static void wa_init_finish(struct i915_wa_list *wal)
+{
+ /* Trim unused entries. */
+ if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
+ struct i915_wa *list = kmemdup(wal->list,
+ wal->count * sizeof(*list),
+ GFP_KERNEL);
+
+ if (list) {
+ kfree(wal->list);
+ wal->list = list;
+ }
+ }
+
+ if (!wal->count)
+ return;
+
+ DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
+ wal->wa_count, wal->name);
+}
+
+static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
+{
+ unsigned int addr = i915_mmio_reg_offset(wa->reg);
+ unsigned int start = 0, end = wal->count;
+ const unsigned int grow = WA_LIST_CHUNK;
+ struct i915_wa *wa_;
+
+ GEM_BUG_ON(!is_power_of_2(grow));
+
+ if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
+ struct i915_wa *list;
+
+ list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+ GFP_KERNEL);
+ if (!list) {
+ DRM_ERROR("No space for workaround init!\n");
+ return;
+ }
+
+ if (wal->list)
+ memcpy(list, wal->list, sizeof(*wa) * wal->count);
+
+ wal->list = list;
+ }
while (start < end) {
unsigned int mid = start + (end - start) / 2;
- if (wa->reg[mid].addr < addr) {
+ if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
start = mid + 1;
- } else if (wa->reg[mid].addr > addr) {
+ } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
end = mid;
} else {
- r = &wa->reg[mid];
+ wa_ = &wal->list[mid];
- if ((mask & ~r->mask) == 0) {
+ if ((wa->mask & ~wa_->mask) == 0) {
DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
- addr, r->mask, r->value);
+ i915_mmio_reg_offset(wa_->reg),
+ wa_->mask, wa_->val);
- r->value &= ~mask;
+ wa_->val &= ~wa->mask;
}
- r->value |= val;
- r->mask |= mask;
+ wal->wa_count++;
+ wa_->val |= wa->val;
+ wa_->mask |= wa->mask;
return;
}
}
- if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) {
- DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n",
- addr, mask, val);
- return;
- }
-
- r = &wa->reg[wa->count++];
- r->addr = addr;
- r->value = val;
- r->mask = mask;
+ wal->wa_count++;
+ wa_ = &wal->list[wal->count++];
+ *wa_ = *wa;
- while (r-- > wa->reg) {
- GEM_BUG_ON(r[0].addr == r[1].addr);
- if (r[1].addr > r[0].addr)
+ while (wa_-- > wal->list) {
+ GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
+ i915_mmio_reg_offset(wa_[1].reg));
+ if (i915_mmio_reg_offset(wa_[1].reg) >
+ i915_mmio_reg_offset(wa_[0].reg))
break;
- swap(r[1], r[0]);
+ swap(wa_[1], wa_[0]);
}
}
-#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val))
+static void
+__wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val
+ };
+
+ _wa_add(wal, &wa);
+}
+
+#define WA_REG(addr, mask, val) __wa_add(wal, (addr), (mask), (val))
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -110,8 +164,10 @@ static void wa_add(struct drm_i915_private *i915,
#define WA_SET_FIELD_MASKED(addr, mask, value) \
WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
-static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
@@ -155,17 +211,14 @@ static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
-
- return 0;
}
-static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen8_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen8_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
@@ -185,31 +238,28 @@ static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
-
- return 0;
+ (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
-static int chv_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void chv_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen8_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen8_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* Improve HiZ throughput on CHV. */
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
-
- return 0;
}
-static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- if (HAS_LLC(dev_priv)) {
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+ if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
@@ -228,7 +278,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
- if (!IS_COFFEELAKE(dev_priv))
+ if (!IS_COFFEELAKE(i915))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
@@ -271,9 +321,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
- if (IS_SKYLAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv))
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
@@ -300,14 +348,14 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEN9_LP(i915))
WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
-
- return 0;
}
-static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
+static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -318,7 +366,7 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
- if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
+ if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i]))
continue;
/*
@@ -327,12 +375,12 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
*
* -> 0 <= ss <= 3;
*/
- ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
+ ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
- return 0;
+ return;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
@@ -342,28 +390,19 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
GEN9_IZ_HASHING(2, vals[2]) |
GEN9_IZ_HASHING(1, vals[1]) |
GEN9_IZ_HASHING(0, vals[0]));
-
- return 0;
}
-static int skl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void skl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
-
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
-
- return skl_tune_iz_hashing(dev_priv);
+ gen9_ctx_workarounds_init(engine);
+ skl_tune_iz_hashing(engine);
}
-static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -372,57 +411,41 @@ static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaToEnableHwFixForPushConstHWBug:bxt */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- return 0;
}
-static int kbl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
-
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE);
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+ if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- return 0;
}
-static int glk_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void glk_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:glk */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- return 0;
}
-static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:cfl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
@@ -431,18 +454,19 @@ static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaDisableSbeCacheDispatchPortSharing:cfl */
WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- return 0;
}
-static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
/* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+ if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
@@ -450,7 +474,7 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
+ if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
@@ -470,16 +494,17 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaDisableEarlyEOT:cnl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
-
- return 0;
}
-static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
/* Wa_1604370585:icl (pre-prod)
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
PUSH_CONSTANT_DEREF_DISABLE);
@@ -495,7 +520,7 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* Wa_2006611047:icl (pre-prod)
* Formerly known as WaDisableImprovedTdlClkGating
*/
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
@@ -504,70 +529,67 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN11_STATE_CACHE_REDIRECT_TO_CS);
/* Wa_2006665173:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
-
- return 0;
}
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
{
- int err = 0;
-
- dev_priv->workarounds.count = 0;
-
- if (INTEL_GEN(dev_priv) < 8)
- err = 0;
- else if (IS_BROADWELL(dev_priv))
- err = bdw_ctx_workarounds_init(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- err = chv_ctx_workarounds_init(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
- err = skl_ctx_workarounds_init(dev_priv);
- else if (IS_BROXTON(dev_priv))
- err = bxt_ctx_workarounds_init(dev_priv);
- else if (IS_KABYLAKE(dev_priv))
- err = kbl_ctx_workarounds_init(dev_priv);
- else if (IS_GEMINILAKE(dev_priv))
- err = glk_ctx_workarounds_init(dev_priv);
- else if (IS_COFFEELAKE(dev_priv))
- err = cfl_ctx_workarounds_init(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- err = cnl_ctx_workarounds_init(dev_priv);
- else if (IS_ICELAKE(dev_priv))
- err = icl_ctx_workarounds_init(dev_priv);
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+ wa_init_start(wal, "context");
+
+ if (INTEL_GEN(i915) < 8)
+ return;
+ else if (IS_BROADWELL(i915))
+ bdw_ctx_workarounds_init(engine);
+ else if (IS_CHERRYVIEW(i915))
+ chv_ctx_workarounds_init(engine);
+ else if (IS_SKYLAKE(i915))
+ skl_ctx_workarounds_init(engine);
+ else if (IS_BROXTON(i915))
+ bxt_ctx_workarounds_init(engine);
+ else if (IS_KABYLAKE(i915))
+ kbl_ctx_workarounds_init(engine);
+ else if (IS_GEMINILAKE(i915))
+ glk_ctx_workarounds_init(engine);
+ else if (IS_COFFEELAKE(i915))
+ cfl_ctx_workarounds_init(engine);
+ else if (IS_CANNONLAKE(i915))
+ cnl_ctx_workarounds_init(engine);
+ else if (IS_ICELAKE(i915))
+ icl_ctx_workarounds_init(engine);
else
- MISSING_CASE(INTEL_GEN(dev_priv));
- if (err)
- return err;
+ MISSING_CASE(INTEL_GEN(i915));
- DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
- dev_priv->workarounds.count);
- return 0;
+ wa_init_finish(wal);
}
-int intel_ctx_workarounds_emit(struct i915_request *rq)
+int intel_engine_emit_ctx_wa(struct i915_request *rq)
{
- struct i915_workarounds *w = &rq->i915->workarounds;
+ struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
+ struct i915_wa *wa;
+ unsigned int i;
u32 *cs;
- int ret, i;
+ int ret;
- if (w->count == 0)
+ if (wal->count == 0)
return 0;
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
- cs = intel_ring_begin(rq, (w->count * 2 + 2));
+ cs = intel_ring_begin(rq, (wal->count * 2 + 2));
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(w->count);
- for (i = 0; i < w->count; i++) {
- *cs++ = w->reg[i].addr;
- *cs++ = w->reg[i].value;
+ *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ *cs++ = i915_mmio_reg_offset(wa->reg);
+ *cs++ = wa->val;
}
*cs++ = MI_NOOP;
@@ -580,160 +602,149 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
return 0;
}
-static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = val,
+ .val = _MASKED_BIT_ENABLE(val)
+ };
+
+ _wa_add(wal, &wa);
+}
+
+static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val
+ };
+
+ _wa_add(wal, &wa);
}
-static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
+ wa_write_masked_or(wal, reg, ~0, val);
}
-static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
- _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+ wa_write_masked_or(wal, reg, val, val);
+}
- /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
- I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
- GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
+{
+ struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaDisableKillLogic:bxt,skl,kbl */
- if (!IS_COFFEELAKE(dev_priv))
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- ECOCHK_DIS_TLB);
+ if (!IS_COFFEELAKE(i915))
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ ECOCHK_DIS_TLB);
- if (HAS_LLC(dev_priv)) {
+ if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
- I915_WRITE(MMCD_MISC_CTRL,
- I915_READ(MMCD_MISC_CTRL) |
- MMCD_PCLA |
- MMCD_HOTSPOT_EN);
+ wa_write_or(wal,
+ MMCD_MISC_CTRL,
+ MMCD_PCLA | MMCD_HOTSPOT_EN);
}
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
-
- /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
- if (IS_GEN9_LP(dev_priv)) {
- u32 val = I915_READ(GEN8_L3SQCREG1);
-
- val &= ~L3_PRIO_CREDITS_MASK;
- val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
- I915_WRITE(GEN8_L3SQCREG1, val);
- }
-
- /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
- I915_WRITE(GEN8_L3SQCREG4,
- I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ BDW_DISABLE_HDC_INVALIDATION);
}
-static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void skl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:skl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:skl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaDisablePooledEuLoadBalancingFix:bxt */
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+ gen9_gt_workarounds_init(i915);
/* WaInPlaceDecompressionHang:bxt */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:kbl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+ if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableGafsUnitClkGating:kbl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
- struct intel_engine_cs *engine;
- unsigned int tmp;
-
- for_each_engine(engine, dev_priv, tmp) {
- if (engine->id == RCS)
- continue;
-
- I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
- }
- }
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void glk_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ gen9_gt_workarounds_init(i915);
}
-static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:cfl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:cfl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:cfl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void wa_init_mcr(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
- u32 mcr;
+ struct i915_wa_list *wal = &dev_priv->gt_wa_list;
u32 mcr_slice_subslice_mask;
/*
@@ -770,8 +781,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
}
- mcr = I915_READ(GEN8_MCR_SELECTOR);
-
if (INTEL_GEN(dev_priv) >= 11)
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
@@ -789,173 +798,220 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
* occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg.
*/
- mcr &= ~mcr_slice_subslice_mask;
- mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
- I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+ wa_write_masked_or(wal,
+ GEN8_MCR_SELECTOR,
+ mcr_slice_subslice_mask,
+ intel_calculate_mcr_s_ss_select(dev_priv));
}
-static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_mcr(i915);
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+ if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
/* WaInPlaceDecompressionHang:cnl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaEnablePreemptionGranularityControlByUMD:cnl */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void icl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* This is not an Wa. Enable for better image quality */
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+ wa_init_mcr(i915);
/* WaInPlaceDecompressionHang:icl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaPipelineFlushCoherentLines:icl */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* Wa_1405543622:icl
- * Formerly known as WaGAPZPriorityScheme
- */
- I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
- GEN11_ARBITRATION_PRIO_ORDER_MASK);
-
- /* Wa_1604223664:icl
- * Formerly known as WaL3BankAddressHashing
- */
- I915_WRITE(GEN8_GARBCNTL,
- (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
- GEN11_HASH_CTRL_EXCL_BIT0);
- I915_WRITE(GEN11_GLBLINVL,
- (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
- GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaModifyGamTlbPartitioning:icl */
- I915_WRITE(GEN11_GACB_PERF_CTRL,
- (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
- GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
-
- /* Wa_1405733216:icl
- * Formerly known as WaDisableCleanEvicts
- */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN11_LQSC_CLEAN_EVICT_DISABLE);
+ wa_write_masked_or(wal,
+ GEN11_GACB_PERF_CTRL,
+ GEN11_HASH_CTRL_MASK,
+ GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
*/
- I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
- GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
- GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+ wa_write_or(wal,
+ GEN11_LSN_UNSLCVC,
+ GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+ GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
/* Wa_220166154:icl
* Formerly known as WaDisCtxReload
*/
- I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
- GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+ wa_write_or(wal,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
/* Wa_1405779004:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
- I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
- MSCUNIT_CLKGATE_DIS);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ MSCUNIT_CLKGATE_DIS);
/* Wa_1406680159:icl */
- I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
- GWUNIT_CLKGATE_DIS);
-
- /* Wa_1604302699:icl */
- I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
- I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
- GEN11_I2M_WRITE_DISABLE);
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
/* Wa_1406838659:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
- I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
- I915_READ(INF_UNIT_LEVEL_CLKGATE) |
- CGPSF_CLKGATE_DIS);
-
- /* WaForwardProgressSoftReset:icl */
- I915_WRITE(GEN10_SCRATCH_LNCF2,
- I915_READ(GEN10_SCRATCH_LNCF2) |
- PMFLUSHDONE_LNICRSDROP |
- PMFLUSH_GAPL3UNBLOCK |
- PMFLUSHDONE_LNEBLK);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ INF_UNIT_LEVEL_CLKGATE,
+ CGPSF_CLKGATE_DIS);
/* Wa_1406463099:icl
* Formerly known as WaGamTlbPendError
*/
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_L3_COH_PIPE);
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
- if (INTEL_GEN(dev_priv) < 8)
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_start(wal, "GT");
+
+ if (INTEL_GEN(i915) < 8)
return;
- else if (IS_BROADWELL(dev_priv))
- bdw_gt_workarounds_apply(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- chv_gt_workarounds_apply(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
- skl_gt_workarounds_apply(dev_priv);
- else if (IS_BROXTON(dev_priv))
- bxt_gt_workarounds_apply(dev_priv);
- else if (IS_KABYLAKE(dev_priv))
- kbl_gt_workarounds_apply(dev_priv);
- else if (IS_GEMINILAKE(dev_priv))
- glk_gt_workarounds_apply(dev_priv);
- else if (IS_COFFEELAKE(dev_priv))
- cfl_gt_workarounds_apply(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_gt_workarounds_apply(dev_priv);
- else if (IS_ICELAKE(dev_priv))
- icl_gt_workarounds_apply(dev_priv);
+ else if (IS_BROADWELL(i915))
+ return;
+ else if (IS_CHERRYVIEW(i915))
+ return;
+ else if (IS_SKYLAKE(i915))
+ skl_gt_workarounds_init(i915);
+ else if (IS_BROXTON(i915))
+ bxt_gt_workarounds_init(i915);
+ else if (IS_KABYLAKE(i915))
+ kbl_gt_workarounds_init(i915);
+ else if (IS_GEMINILAKE(i915))
+ glk_gt_workarounds_init(i915);
+ else if (IS_COFFEELAKE(i915))
+ cfl_gt_workarounds_init(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_gt_workarounds_init(i915);
+ else if (IS_ICELAKE(i915))
+ icl_gt_workarounds_init(i915);
else
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(INTEL_GEN(i915));
+
+ wa_init_finish(wal);
}
-struct whitelist {
- i915_reg_t reg[RING_MAX_NONPRIV_SLOTS];
- unsigned int count;
- u32 nopid;
-};
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
+ const struct i915_wa_list *wal)
+{
+ enum forcewake_domains fw = 0;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ fw |= intel_uncore_forcewake_for_reg(dev_priv,
+ wa->reg,
+ FW_REG_READ |
+ FW_REG_WRITE);
+
+ return fw;
+}
-static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
+static void
+wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
{
- if (GEM_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
+ enum forcewake_domains fw;
+ unsigned long flags;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ if (!wal->count)
return;
- w->reg[w->count++] = reg;
+ fw = wal_get_fw_for_rmw(dev_priv, wal);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_get__locked(dev_priv, fw);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ u32 val = I915_READ_FW(wa->reg);
+
+ val &= ~wa->mask;
+ val |= wa->val;
+
+ I915_WRITE_FW(wa->reg, val);
+ }
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+ DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
+}
+
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
+{
+ wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
+}
+
+static bool
+wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
+{
+ if ((cur ^ wa->val) & wa->mask) {
+ DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+ name, from, i915_mmio_reg_offset(wa->reg), cur,
+ cur & wa->mask, wa->val, wa->mask);
+
+ return false;
+ }
+
+ return true;
}
-static void bdw_whitelist_build(struct whitelist *w)
+static bool wa_list_verify(struct drm_i915_private *dev_priv,
+ const struct i915_wa_list *wal,
+ const char *from)
{
+ struct i915_wa *wa;
+ unsigned int i;
+ bool ok = true;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ ok &= wa_verify(wa, I915_READ(wa->reg), wal->name, from);
+
+ return ok;
}
-static void chv_whitelist_build(struct whitelist *w)
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+ const char *from)
{
+ return wa_list_verify(dev_priv, &dev_priv->gt_wa_list, from);
}
-static void gen9_whitelist_build(struct whitelist *w)
+static void
+whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+{
+ struct i915_wa wa = {
+ .reg = reg
+ };
+
+ if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
+ return;
+
+ _wa_add(wal, &wa);
+}
+
+static void gen9_whitelist_build(struct i915_wa_list *w)
{
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
@@ -967,7 +1023,7 @@ static void gen9_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_HDC_CHICKEN1);
}
-static void skl_whitelist_build(struct whitelist *w)
+static void skl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -975,12 +1031,12 @@ static void skl_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void bxt_whitelist_build(struct whitelist *w)
+static void bxt_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
}
-static void kbl_whitelist_build(struct whitelist *w)
+static void kbl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -988,7 +1044,7 @@ static void kbl_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void glk_whitelist_build(struct whitelist *w)
+static void glk_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -996,37 +1052,41 @@ static void glk_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
}
-static void cfl_whitelist_build(struct whitelist *w)
+static void cfl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
}
-static void cnl_whitelist_build(struct whitelist *w)
+static void cnl_whitelist_build(struct i915_wa_list *w)
{
/* WaEnablePreemptionGranularityControlByUMD:cnl */
whitelist_reg(w, GEN8_CS_CHICKEN1);
}
-static void icl_whitelist_build(struct whitelist *w)
+static void icl_whitelist_build(struct i915_wa_list *w)
{
+ /* WaAllowUMDToModifyHalfSliceChicken7:icl */
+ whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
+
+ /* WaAllowUMDToModifySamplerMode:icl */
+ whitelist_reg(w, GEN10_SAMPLER_MODE);
}
-static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
- struct whitelist *w)
+void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *w = &engine->whitelist;
GEM_BUG_ON(engine->id != RCS);
- w->count = 0;
- w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));
+ wa_init_start(w, "whitelist");
if (INTEL_GEN(i915) < 8)
- return NULL;
+ return;
else if (IS_BROADWELL(i915))
- bdw_whitelist_build(w);
+ return;
else if (IS_CHERRYVIEW(i915))
- chv_whitelist_build(w);
+ return;
else if (IS_SKYLAKE(i915))
skl_whitelist_build(w);
else if (IS_BROXTON(i915))
@@ -1044,39 +1104,180 @@ static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
else
MISSING_CASE(INTEL_GEN(i915));
- return w;
+ wa_init_finish(w);
}
-static void whitelist_apply(struct intel_engine_cs *engine,
- const struct whitelist *w)
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ const struct i915_wa_list *wal = &engine->whitelist;
const u32 base = engine->mmio_base;
+ struct i915_wa *wa;
unsigned int i;
- if (!w)
+ if (!wal->count)
return;
- intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
-
- for (i = 0; i < w->count; i++)
- I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i),
- i915_mmio_reg_offset(w->reg[i]));
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(wa->reg));
/* And clear the rest just in case of garbage */
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
- I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), w->nopid);
+ I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(RING_NOPID(base)));
- intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+ DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
}
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void rcs_engine_wa_init(struct intel_engine_cs *engine)
{
- struct whitelist w;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (IS_ICELAKE(i915)) {
+ /* This is not an Wa. Enable for better image quality */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
+
+ /* WaPipelineFlushCoherentLines:icl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /*
+ * Wa_1405543622:icl
+ * Formerly known as WaGAPZPriorityScheme
+ */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+ /*
+ * Wa_1604223664:icl
+ * Formerly known as WaL3BankAddressHashing
+ */
+ wa_write_masked_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_HASH_CTRL_EXCL_MASK,
+ GEN11_HASH_CTRL_EXCL_BIT0);
+ wa_write_masked_or(wal,
+ GEN11_GLBLINVL,
+ GEN11_BANK_HASH_ADDR_EXCL_MASK,
+ GEN11_BANK_HASH_ADDR_EXCL_BIT0);
- whitelist_apply(engine, whitelist_build(engine, &w));
+ /*
+ * Wa_1405733216:icl
+ * Formerly known as WaDisableCleanEvicts
+ */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+ /* WaForwardProgressSoftReset:icl */
+ wa_write_or(wal,
+ GEN10_SCRATCH_LNCF2,
+ PMFLUSHDONE_LNICRSDROP |
+ PMFLUSH_GAPL3UNBLOCK |
+ PMFLUSHDONE_LNEBLK);
+
+ /* Wa_1406609255:icl (pre-prod) */
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+ GEN7_DISABLE_DEMAND_PREFETCH |
+ GEN7_DISABLE_SAMPLER_PREFETCH);
+ }
+
+ if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+ wa_masked_en(wal,
+ GEN7_FF_SLICE_CS_CHICKEN1,
+ GEN9_FFSC_PERCTX_PREEMPT_CTRL);
+ }
+
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+ /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN9_GAPS_TSV_CREDIT_DISABLE);
+ }
+
+ if (IS_BROXTON(i915)) {
+ /* WaDisablePooledEuLoadBalancingFix:bxt */
+ wa_masked_en(wal,
+ FF_SLICE_CS_CHICKEN2,
+ GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ }
+
+ if (IS_GEN9(i915)) {
+ /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+ wa_masked_en(wal,
+ GEN9_CSFE_CHICKEN1_RCS,
+ GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+ wa_write_or(wal,
+ BDW_SCRATCH1,
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+ if (IS_GEN9_LP(i915))
+ wa_write_masked_or(wal,
+ GEN8_L3SQCREG1,
+ L3_PRIO_CREDITS_MASK,
+ L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
+
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+ }
+}
+
+static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ /* WaKBLVECSSemaphoreWaitPoll:kbl */
+ if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+ wa_write(wal,
+ RING_SEMA_WAIT_POLL(engine->mmio_base),
+ 1);
+ }
+}
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+ return;
+
+ wa_init_start(wal, engine->name);
+
+ if (engine->id == RCS)
+ rcs_engine_wa_init(engine);
+ else
+ xcs_engine_wa_init(engine);
+
+ wa_init_finish(wal);
+}
+
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
+{
+ wa_list_apply(engine->i915, &engine->wa_list);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from)
+{
+ return wa_list_verify(engine->i915, &engine->wa_list, from);
+}
+
#include "selftests/intel_workarounds.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index b11d0623e626..7c734714b05e 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -7,11 +7,39 @@
#ifndef _I915_WORKAROUNDS_H_
#define _I915_WORKAROUNDS_H_
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
-int intel_ctx_workarounds_emit(struct i915_request *rq);
+#include <linux/slab.h>
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+struct i915_wa {
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+};
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
+struct i915_wa_list {
+ const char *name;
+ struct i915_wa *list;
+ unsigned int count;
+ unsigned int wa_count;
+};
+
+static inline void intel_wa_list_free(struct i915_wa_list *wal)
+{
+ kfree(wal->list);
+ memset(wal, 0, sizeof(*wal));
+}
+
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
+int intel_engine_emit_ctx_wa(struct i915_request *rq);
+
+void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+ const char *from);
+
+void intel_engine_init_whitelist(struct intel_engine_cs *engine);
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine);
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 8d03f64eabd7..26c065c8d2c0 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -551,7 +551,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
err = igt_check_page_sizes(vma);
if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
- pr_err("page_sizes.gtt=%u, expected %lu\n",
+ pr_err("page_sizes.gtt=%u, expected %llu\n",
vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
err = -EINVAL;
}
@@ -1135,7 +1135,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
n = 0;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine)) {
- pr_info("store-dword-imm not supported on engine=%u\n", id);
+ pr_info("store-dword-imm not supported on engine=%u\n",
+ id);
continue;
}
engines[n++] = engine;
@@ -1167,17 +1168,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
engine = engines[order[i] % n];
i = (i + 1) % (n * I915_NUM_ENGINES);
- err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1);
+ /*
+ * In order to utilize 64K pages we need to both pad the vma
+ * size and ensure the vma offset is at the start of the pt
+ * boundary, however to improve coverage we opt for testing both
+ * aligned and unaligned offsets.
+ */
+ if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+ offset_low = round_down(offset_low,
+ I915_GTT_PAGE_SIZE_2M);
+
+ err = __igt_write_huge(ctx, engine, obj, size, offset_low,
+ dword, num + 1);
if (err)
break;
- err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1);
+ err = __igt_write_huge(ctx, engine, obj, size, offset_high,
+ dword, num + 1);
if (err)
break;
if (igt_timeout(end_time,
"%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
- __func__, engine->id, offset_low, offset_high, max_page_size))
+ __func__, engine->id, offset_low, offset_high,
+ max_page_size))
break;
}
@@ -1436,7 +1450,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!USES_FULL_48BIT_PPGTT(dev_priv)) {
+ if (!HAS_FULL_48BIT_PPGTT(dev_priv)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
}
@@ -1687,10 +1701,9 @@ int i915_gem_huge_page_mock_selftests(void)
SUBTEST(igt_mock_ppgtt_huge_fill),
SUBTEST(igt_mock_ppgtt_64K),
};
- int saved_ppgtt = i915_modparams.enable_ppgtt;
struct drm_i915_private *dev_priv;
- struct pci_dev *pdev;
struct i915_hw_ppgtt *ppgtt;
+ struct pci_dev *pdev;
int err;
dev_priv = mock_gem_device();
@@ -1698,7 +1711,7 @@ int i915_gem_huge_page_mock_selftests(void)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- i915_modparams.enable_ppgtt = 3;
+ mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
pdev = dev_priv->drm.pdev;
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
@@ -1731,9 +1744,6 @@ out_close:
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
-
- i915_modparams.enable_ppgtt = saved_ppgtt;
-
drm_dev_put(&dev_priv->drm);
return err;
@@ -1753,7 +1763,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
struct i915_gem_context *ctx;
int err;
- if (!USES_PPGTT(dev_priv)) {
+ if (!HAS_PPGTT(dev_priv)) {
pr_info("PPGTT not supported, skipping live-selftests\n");
return 0;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 76df25aa90c9..7d82043aff10 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -39,7 +39,8 @@ struct live_test {
const char *func;
const char *name;
- unsigned int reset_count;
+ unsigned int reset_global;
+ unsigned int reset_engine[I915_NUM_ENGINES];
};
static int begin_live_test(struct live_test *t,
@@ -47,6 +48,8 @@ static int begin_live_test(struct live_test *t,
const char *func,
const char *name)
{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int err;
t->i915 = i915;
@@ -63,7 +66,11 @@ static int begin_live_test(struct live_test *t,
}
i915->gpu_error.missed_irq_rings = 0;
- t->reset_count = i915_reset_count(&i915->gpu_error);
+ t->reset_global = i915_reset_count(&i915->gpu_error);
+
+ for_each_engine(engine, i915, id)
+ t->reset_engine[id] =
+ i915_reset_engine_count(&i915->gpu_error, engine);
return 0;
}
@@ -71,14 +78,28 @@ static int begin_live_test(struct live_test *t,
static int end_live_test(struct live_test *t)
{
struct drm_i915_private *i915 = t->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (igt_flush_test(i915, I915_WAIT_LOCKED))
return -EIO;
- if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
+ if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
pr_err("%s(%s): GPU was reset %d times!\n",
t->func, t->name,
- i915_reset_count(&i915->gpu_error) - t->reset_count);
+ i915_reset_count(&i915->gpu_error) - t->reset_global);
+ return -EIO;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (t->reset_engine[id] ==
+ i915_reset_engine_count(&i915->gpu_error, engine))
+ continue;
+
+ pr_err("%s(%s): engine '%s' was reset %d times!\n",
+ t->func, t->name, engine->name,
+ i915_reset_engine_count(&i915->gpu_error, engine) -
+ t->reset_engine[id]);
return -EIO;
}
@@ -531,11 +552,11 @@ static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
+ unsigned long ncontexts, ndwords, dw;
struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
- unsigned long ncontexts, ndwords, dw;
- bool first_shared_gtt = true;
+ struct live_test t;
int err = -ENODEV;
/*
@@ -553,6 +574,10 @@ static int igt_ctx_exec(void *arg)
mutex_lock(&i915->drm.struct_mutex);
+ err = begin_live_test(&t, i915, __func__, "");
+ if (err)
+ goto out_unlock;
+
ncontexts = 0;
ndwords = 0;
dw = 0;
@@ -561,12 +586,7 @@ static int igt_ctx_exec(void *arg)
struct i915_gem_context *ctx;
unsigned int id;
- if (first_shared_gtt) {
- ctx = __create_hw_context(i915, file->driver_priv);
- first_shared_gtt = false;
- } else {
- ctx = i915_gem_create_context(i915, file->driver_priv);
- }
+ ctx = i915_gem_create_context(i915, file->driver_priv);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
@@ -622,7 +642,7 @@ static int igt_ctx_exec(void *arg)
}
out_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (end_live_test(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
@@ -634,13 +654,14 @@ static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
+ struct i915_gem_context *ctx;
+ struct i915_hw_ppgtt *ppgtt;
+ unsigned long ndwords, dw;
struct drm_file *file;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
- struct i915_gem_context *ctx;
- struct i915_hw_ppgtt *ppgtt;
- unsigned long ndwords, dw;
+ struct live_test t;
int err = -ENODEV;
/*
@@ -655,6 +676,10 @@ static int igt_ctx_readonly(void *arg)
mutex_lock(&i915->drm.struct_mutex);
+ err = begin_live_test(&t, i915, __func__, "");
+ if (err)
+ goto out_unlock;
+
ctx = i915_gem_create_context(i915, file->driver_priv);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
@@ -727,7 +752,324 @@ static int igt_ctx_readonly(void *arg)
}
out_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (end_live_test(&t))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mock_file_free(i915, file);
+ return err;
+}
+
+static int check_scratch(struct i915_gem_context *ctx, u64 offset)
+{
+ struct drm_mm_node *node =
+ __drm_mm_interval_first(&ctx->ppgtt->vm.mm,
+ offset, offset + sizeof(u32) - 1);
+ if (!node || node->start > offset)
+ return 0;
+
+ GEM_BUG_ON(offset >= node->start + node->size);
+
+ pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
+ upper_32_bits(offset), lower_32_bits(offset));
+ return -EINVAL;
+}
+
+static int write_to_scratch(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u64 offset, u32 value)
+{
+ struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cmd;
+ int err;
+
+ GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+ if (INTEL_GEN(i915) >= 8) {
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ } else {
+ *cmd++ = 0;
+ *cmd++ = offset;
+ }
+ *cmd++ = value;
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+ if (err)
+ goto err;
+
+ err = check_scratch(ctx, offset);
+ if (err)
+ goto err_unpin;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ if (err)
+ goto err_request;
+
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto skip_request;
+
+ i915_gem_object_set_active_reference(obj);
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_request_add(rq);
+
+ return 0;
+
+skip_request:
+ i915_request_skip(rq, err);
+err_request:
+ i915_request_add(rq);
+err_unpin:
+ i915_vma_unpin(vma);
+err:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int read_from_scratch(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u64 offset, u32 *value)
+{
+ struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_gem_object *obj;
+ const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
+ const u32 result = 0x100;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cmd;
+ int err;
+
+ GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ memset(cmd, POISON_INUSE, PAGE_SIZE);
+ if (INTEL_GEN(i915) >= 8) {
+ *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
+ *cmd++ = RCS_GPR0;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cmd++ = RCS_GPR0;
+ *cmd++ = result;
+ *cmd++ = 0;
+ } else {
+ *cmd++ = MI_LOAD_REGISTER_MEM;
+ *cmd++ = RCS_GPR0;
+ *cmd++ = offset;
+ *cmd++ = MI_STORE_REGISTER_MEM;
+ *cmd++ = RCS_GPR0;
+ *cmd++ = result;
+ }
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+ if (err)
+ goto err;
+
+ err = check_scratch(ctx, offset);
+ if (err)
+ goto err_unpin;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ if (err)
+ goto err_request;
+
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto skip_request;
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_request_add(rq);
+
+ err = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (err)
+ goto err;
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ *value = cmd[result / sizeof(*cmd)];
+ i915_gem_object_unpin_map(obj);
+ i915_gem_object_put(obj);
+
+ return 0;
+
+skip_request:
+ i915_request_skip(rq, err);
+err_request:
+ i915_request_add(rq);
+err_unpin:
+ i915_vma_unpin(vma);
+err:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int igt_vm_isolation(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx_a, *ctx_b;
+ struct intel_engine_cs *engine;
+ struct drm_file *file;
+ I915_RND_STATE(prng);
+ unsigned long count;
+ struct live_test t;
+ unsigned int id;
+ u64 vm_total;
+ int err;
+
+ if (INTEL_GEN(i915) < 7)
+ return 0;
+
+ /*
+ * The simple goal here is that a write into one context is not
+ * observed in a second (separate page tables and scratch).
+ */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ err = begin_live_test(&t, i915, __func__, "");
+ if (err)
+ goto out_unlock;
+
+ ctx_a = i915_gem_create_context(i915, file->driver_priv);
+ if (IS_ERR(ctx_a)) {
+ err = PTR_ERR(ctx_a);
+ goto out_unlock;
+ }
+
+ ctx_b = i915_gem_create_context(i915, file->driver_priv);
+ if (IS_ERR(ctx_b)) {
+ err = PTR_ERR(ctx_b);
+ goto out_unlock;
+ }
+
+ /* We can only test vm isolation, if the vm are distinct */
+ if (ctx_a->ppgtt == ctx_b->ppgtt)
+ goto out_unlock;
+
+ vm_total = ctx_a->ppgtt->vm.total;
+ GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
+ vm_total -= I915_GTT_PAGE_SIZE;
+
+ intel_runtime_pm_get(i915);
+
+ count = 0;
+ for_each_engine(engine, i915, id) {
+ IGT_TIMEOUT(end_time);
+ unsigned long this = 0;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ while (!__igt_timeout(end_time, NULL)) {
+ u32 value = 0xc5c5c5c5;
+ u64 offset;
+
+ div64_u64_rem(i915_prandom_u64_state(&prng),
+ vm_total, &offset);
+ offset &= ~sizeof(u32);
+ offset += I915_GTT_PAGE_SIZE;
+
+ err = write_to_scratch(ctx_a, engine,
+ offset, 0xdeadbeef);
+ if (err == 0)
+ err = read_from_scratch(ctx_b, engine,
+ offset, &value);
+ if (err)
+ goto out_rpm;
+
+ if (value) {
+ pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
+ engine->name, value,
+ upper_32_bits(offset),
+ lower_32_bits(offset),
+ this);
+ err = -EINVAL;
+ goto out_rpm;
+ }
+
+ this++;
+ }
+ count += this;
+ }
+ pr_info("Checked %lu scratch offsets across %d engines\n",
+ count, INTEL_INFO(i915)->num_rings);
+
+out_rpm:
+ intel_runtime_pm_put(i915);
+out_unlock:
+ if (end_live_test(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
@@ -865,33 +1207,6 @@ out_unlock:
return err;
}
-static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
-{
- struct drm_i915_gem_object *obj;
- int err;
-
- err = i915_gem_init_aliasing_ppgtt(i915);
- if (err)
- return err;
-
- list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
- struct i915_vma *vma;
-
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma))
- continue;
-
- vma->flags &= ~I915_VMA_LOCAL_BIND;
- }
-
- return 0;
-}
-
-static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
-{
- i915_gem_fini_aliasing_ppgtt(i915);
-}
-
int i915_gem_context_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -917,32 +1232,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(live_nop_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
+ SUBTEST(igt_vm_isolation),
};
- bool fake_alias = false;
- int err;
if (i915_terminally_wedged(&dev_priv->gpu_error))
return 0;
- /* Install a fake aliasing gtt for exercise */
- if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
- mutex_lock(&dev_priv->drm.struct_mutex);
- err = fake_aliasing_ppgtt_enable(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (err)
- return err;
-
- GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
- fake_alias = true;
- }
-
- err = i915_subtests(tests, dev_priv);
-
- if (fake_alias) {
- mutex_lock(&dev_priv->drm.struct_mutex);
- fake_aliasing_ppgtt_disable(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- }
-
- return err;
+ return i915_subtests(tests, dev_priv);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 128ad1cf0647..4365979d8222 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -351,7 +351,7 @@ static int igt_evict_contexts(void *arg)
* where the GTT space of the request is separate from the GGTT
* allocation required to build the request.
*/
- if (!USES_FULL_PPGTT(i915))
+ if (!HAS_FULL_PPGTT(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 8e2e269db97e..69fe86b30fbb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -153,7 +153,7 @@ static int igt_ppgtt_alloc(void *arg)
/* Allocate a ppggt and try to fill the entire range */
- if (!USES_PPGTT(dev_priv))
+ if (!HAS_PPGTT(dev_priv))
return 0;
ppgtt = __hw_ppgtt_create(dev_priv);
@@ -1001,7 +1001,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
IGT_TIMEOUT(end_time);
int err;
- if (!USES_FULL_PPGTT(dev_priv))
+ if (!HAS_FULL_PPGTT(dev_priv))
return 0;
file = mock_file(dev_priv);
@@ -1337,7 +1337,7 @@ static int igt_gtt_reserve(void *arg)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
if (vma->node.start != total ||
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
- pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
vma->node.start, vma->node.size,
total, 2*I915_GTT_PAGE_SIZE);
err = -EINVAL;
@@ -1386,7 +1386,7 @@ static int igt_gtt_reserve(void *arg)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
if (vma->node.start != total ||
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
- pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
vma->node.start, vma->node.size,
total, 2*I915_GTT_PAGE_SIZE);
err = -EINVAL;
@@ -1430,7 +1430,7 @@ static int igt_gtt_reserve(void *arg)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
if (vma->node.start != offset ||
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
- pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
vma->node.start, vma->node.size,
offset, 2*I915_GTT_PAGE_SIZE);
err = -EINVAL;
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
new file mode 100644
index 000000000000..208a966da8ca
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_reset.h"
+
+#include "../i915_drv.h"
+#include "../intel_ringbuffer.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ pr_debug("%s: current gpu_error=%08lx\n",
+ __func__, i915->gpu_error.flags);
+
+ while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
+ wait_event(i915->gpu_error.reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &i915->gpu_error.flags));
+
+ for_each_engine(engine, i915, id) {
+ while (test_and_set_bit(I915_RESET_ENGINE + id,
+ &i915->gpu_error.flags))
+ wait_on_bit(&i915->gpu_error.flags,
+ I915_RESET_ENGINE + id,
+ TASK_UNINTERRUPTIBLE);
+ }
+}
+
+void igt_global_reset_unlock(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
new file mode 100644
index 000000000000..5f0234d045d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_RESET_H__
+#define __I915_SELFTESTS_IGT_RESET_H__
+
+#include "../i915_drv.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915);
+void igt_global_reset_unlock(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
new file mode 100644
index 000000000000..8cd34f6e6859
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -0,0 +1,199 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_spinner.h"
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
+{
+ unsigned int mode;
+ void *vaddr;
+ int err;
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+ memset(spin, 0, sizeof(*spin));
+ spin->i915 = i915;
+
+ spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(spin->hws)) {
+ err = PTR_ERR(spin->hws);
+ goto err;
+ }
+
+ spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(spin->obj)) {
+ err = PTR_ERR(spin->obj);
+ goto err_hws;
+ }
+
+ i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+ vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+ spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+
+ mode = i915_coherent_map_type(i915);
+ vaddr = i915_gem_object_pin_map(spin->obj, mode);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_unpin_hws;
+ }
+ spin->batch = vaddr;
+
+ return 0;
+
+err_unpin_hws:
+ i915_gem_object_unpin_map(spin->hws);
+err_obj:
+ i915_gem_object_put(spin->obj);
+err_hws:
+ i915_gem_object_put(spin->hws);
+err:
+ return err;
+}
+
+static unsigned int seqno_offset(u64 fence)
+{
+ return offset_in_page(sizeof(u32) * fence);
+}
+
+static u64 hws_address(const struct i915_vma *hws,
+ const struct i915_request *rq)
+{
+ return hws->node.start + seqno_offset(rq->fence.context);
+}
+
+static int emit_recurse_batch(struct igt_spinner *spin,
+ struct i915_request *rq,
+ u32 arbitration_command)
+{
+ struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
+ struct i915_vma *hws, *vma;
+ u32 *batch;
+ int err;
+
+ vma = i915_vma_instance(spin->obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ hws = i915_vma_instance(spin->hws, vm, NULL);
+ if (IS_ERR(hws))
+ return PTR_ERR(hws);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+
+ err = i915_vma_pin(hws, 0, 0, PIN_USER);
+ if (err)
+ goto unpin_vma;
+
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
+ if (!i915_gem_object_has_active_reference(vma->obj)) {
+ i915_gem_object_get(vma->obj);
+ i915_gem_object_set_active_reference(vma->obj);
+ }
+
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
+ if (!i915_gem_object_has_active_reference(hws->obj)) {
+ i915_gem_object_get(hws->obj);
+ i915_gem_object_set_active_reference(hws->obj);
+ }
+
+ batch = spin->batch;
+
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+
+ *batch++ = arbitration_command;
+
+ *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = upper_32_bits(vma->node.start);
+ *batch++ = MI_BATCH_BUFFER_END; /* not reached */
+
+ i915_gem_chipset_flush(spin->i915);
+
+ err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+
+unpin_hws:
+ i915_vma_unpin(hws);
+unpin_vma:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arbitration_command)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = emit_recurse_batch(spin, rq, arbitration_command);
+ if (err) {
+ i915_request_add(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static u32
+hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
+{
+ u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
+
+ return READ_ONCE(*seqno);
+}
+
+void igt_spinner_end(struct igt_spinner *spin)
+{
+ *spin->batch = MI_BATCH_BUFFER_END;
+ i915_gem_chipset_flush(spin->i915);
+}
+
+void igt_spinner_fini(struct igt_spinner *spin)
+{
+ igt_spinner_end(spin);
+
+ i915_gem_object_unpin_map(spin->obj);
+ i915_gem_object_put(spin->obj);
+
+ i915_gem_object_unpin_map(spin->hws);
+ i915_gem_object_put(spin->hws);
+}
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
+{
+ if (!wait_event_timeout(rq->execute,
+ READ_ONCE(rq->global_seqno),
+ msecs_to_jiffies(10)))
+ return false;
+
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 10) &&
+ wait_for(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 1000));
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
new file mode 100644
index 000000000000..391777c76dc7
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -0,0 +1,37 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_SPINNER_H__
+#define __I915_SELFTESTS_IGT_SPINNER_H__
+
+#include "../i915_selftest.h"
+
+#include "../i915_drv.h"
+#include "../i915_request.h"
+#include "../intel_ringbuffer.h"
+#include "../i915_gem_context.h"
+
+struct igt_spinner {
+ struct drm_i915_private *i915;
+ struct drm_i915_gem_object *hws;
+ struct drm_i915_gem_object *obj;
+ u32 *batch;
+ void *seqno;
+};
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915);
+void igt_spinner_fini(struct igt_spinner *spin);
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arbitration_command);
+void igt_spinner_end(struct igt_spinner *spin);
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 0c0ab82b6228..32cba4cae31a 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -159,6 +159,7 @@ static int igt_guc_clients(void *args)
* Get rid of clients created during driver load because the test will
* recreate them.
*/
+ guc_clients_disable(guc);
guc_clients_destroy(guc);
if (guc->execbuf_client || guc->preempt_client) {
pr_err("guc_clients_destroy lied!\n");
@@ -197,8 +198,8 @@ static int igt_guc_clients(void *args)
goto out;
}
- /* Now create the doorbells */
- guc_clients_doorbell_init(guc);
+ /* Now enable the clients */
+ guc_clients_enable(guc);
/* each client should now have received a doorbell */
if (!client_doorbell_in_sync(guc->execbuf_client) ||
@@ -212,63 +213,17 @@ static int igt_guc_clients(void *args)
* Basic test - an attempt to reallocate a valid doorbell to the
* client it is currently assigned should not cause a failure.
*/
- err = guc_clients_doorbell_init(guc);
- if (err)
- goto out;
-
- /*
- * Negative test - a client with no doorbell (invalid db id).
- * After destroying the doorbell, the db id is changed to
- * GUC_DOORBELL_INVALID and the firmware will reject any attempt to
- * allocate a doorbell with an invalid id (db has to be reserved before
- * allocation).
- */
- destroy_doorbell(guc->execbuf_client);
- if (client_doorbell_in_sync(guc->execbuf_client)) {
- pr_err("destroy db did not work\n");
- err = -EINVAL;
- goto out;
- }
-
- unreserve_doorbell(guc->execbuf_client);
-
- __create_doorbell(guc->execbuf_client);
- err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id);
- if (err != -EIO) {
- pr_err("unexpected (err = %d)", err);
- goto out_db;
- }
-
- if (!available_dbs(guc, guc->execbuf_client->priority)) {
- pr_err("doorbell not available when it should\n");
- err = -EIO;
- goto out_db;
- }
-
-out_db:
- /* clean after test */
- __destroy_doorbell(guc->execbuf_client);
- err = reserve_doorbell(guc->execbuf_client);
- if (err) {
- pr_err("failed to reserve back the doorbell back\n");
- }
err = create_doorbell(guc->execbuf_client);
- if (err) {
- pr_err("recreate doorbell failed\n");
- goto out;
- }
out:
/*
* Leave clean state for other test, plus the driver always destroy the
* clients during unload.
*/
- destroy_doorbell(guc->execbuf_client);
- if (guc->preempt_client)
- destroy_doorbell(guc->preempt_client);
+ guc_clients_disable(guc);
guc_clients_destroy(guc);
guc_clients_create(guc);
- guc_clients_doorbell_init(guc);
+ guc_clients_enable(guc);
unlock:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -352,7 +307,7 @@ static int igt_guc_doorbells(void *arg)
db_id = clients[i]->doorbell_id;
- err = create_doorbell(clients[i]);
+ err = __guc_client_enable(clients[i]);
if (err) {
pr_err("[%d] Failed to create a doorbell\n", i);
goto out;
@@ -378,7 +333,7 @@ static int igt_guc_doorbells(void *arg)
out:
for (i = 0; i < ATTEMPTS; i++)
if (!IS_ERR_OR_NULL(clients[i])) {
- destroy_doorbell(clients[i]);
+ __guc_client_disable(clients[i]);
guc_client_free(clients[i]);
}
unlock:
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index db378226ac10..40efbed611de 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -27,6 +27,7 @@
#include "../i915_selftest.h"
#include "i915_random.h"
#include "igt_flush_test.h"
+#include "igt_reset.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
@@ -76,7 +77,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
vaddr = i915_gem_object_pin_map(h->obj,
- HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC);
+ i915_coherent_map_type(i915));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -234,7 +235,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
return ERR_CAST(obj);
vaddr = i915_gem_object_pin_map(obj,
- HAS_LLC(h->i915) ? I915_MAP_WB : I915_MAP_WC);
+ i915_coherent_map_type(h->i915));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
return ERR_CAST(vaddr);
@@ -308,6 +309,7 @@ static int igt_hang_sanitycheck(void *arg)
goto unlock;
for_each_engine(engine, i915, id) {
+ struct igt_wedge_me w;
long timeout;
if (!intel_engine_can_store_dword(engine))
@@ -328,9 +330,14 @@ static int igt_hang_sanitycheck(void *arg)
i915_request_add(rq);
- timeout = i915_request_wait(rq,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ timeout = 0;
+ igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ timeout = i915_request_wait(rq,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (i915_terminally_wedged(&i915->gpu_error))
+ timeout = -EIO;
+
i915_request_put(rq);
if (timeout < 0) {
@@ -348,40 +355,6 @@ unlock:
return err;
}
-static void global_reset_lock(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- pr_debug("%s: current gpu_error=%08lx\n",
- __func__, i915->gpu_error.flags);
-
- while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &i915->gpu_error.flags));
-
- for_each_engine(engine, i915, id) {
- while (test_and_set_bit(I915_RESET_ENGINE + id,
- &i915->gpu_error.flags))
- wait_on_bit(&i915->gpu_error.flags,
- I915_RESET_ENGINE + id,
- TASK_UNINTERRUPTIBLE);
- }
-}
-
-static void global_reset_unlock(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id)
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
-
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
- wake_up_all(&i915->gpu_error.reset_queue);
-}
-
static int igt_global_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -390,7 +363,7 @@ static int igt_global_reset(void *arg)
/* Check that we can issue a global GPU reset */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
mutex_lock(&i915->drm.struct_mutex);
@@ -405,7 +378,7 @@ static int igt_global_reset(void *arg)
mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
@@ -936,7 +909,7 @@ static int igt_reset_wait(void *arg)
/* Check that we detect a stuck waiter and issue a reset */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -988,7 +961,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
@@ -1066,7 +1039,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
/* Check that we can recover an unbind stuck on a hanging request */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -1150,6 +1123,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
tsk = NULL;
goto out_reset;
}
+ get_task_struct(tsk);
wait_for_completion(&arg.completion);
@@ -1172,6 +1146,8 @@ out_reset:
/* The reset, even indirectly, should take less than 10ms. */
igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
err = kthread_stop(tsk);
+
+ put_task_struct(tsk);
}
mutex_lock(&i915->drm.struct_mutex);
@@ -1183,7 +1159,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
@@ -1263,7 +1239,7 @@ static int igt_reset_queue(void *arg)
/* Check that we replay pending requests following a hang */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -1394,7 +1370,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 1aea7a8f2224..ca461e3a5f27 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -6,215 +6,18 @@
#include "../i915_selftest.h"
#include "igt_flush_test.h"
+#include "igt_spinner.h"
+#include "i915_random.h"
#include "mock_context.h"
-struct spinner {
- struct drm_i915_private *i915;
- struct drm_i915_gem_object *hws;
- struct drm_i915_gem_object *obj;
- u32 *batch;
- void *seqno;
-};
-
-static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
-{
- unsigned int mode;
- void *vaddr;
- int err;
-
- GEM_BUG_ON(INTEL_GEN(i915) < 8);
-
- memset(spin, 0, sizeof(*spin));
- spin->i915 = i915;
-
- spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(spin->hws)) {
- err = PTR_ERR(spin->hws);
- goto err;
- }
-
- spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(spin->obj)) {
- err = PTR_ERR(spin->obj);
- goto err_hws;
- }
-
- i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
- vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
- spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
-
- mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
- vaddr = i915_gem_object_pin_map(spin->obj, mode);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_unpin_hws;
- }
- spin->batch = vaddr;
-
- return 0;
-
-err_unpin_hws:
- i915_gem_object_unpin_map(spin->hws);
-err_obj:
- i915_gem_object_put(spin->obj);
-err_hws:
- i915_gem_object_put(spin->hws);
-err:
- return err;
-}
-
-static unsigned int seqno_offset(u64 fence)
-{
- return offset_in_page(sizeof(u32) * fence);
-}
-
-static u64 hws_address(const struct i915_vma *hws,
- const struct i915_request *rq)
-{
- return hws->node.start + seqno_offset(rq->fence.context);
-}
-
-static int emit_recurse_batch(struct spinner *spin,
- struct i915_request *rq,
- u32 arbitration_command)
-{
- struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
- struct i915_vma *hws, *vma;
- u32 *batch;
- int err;
-
- vma = i915_vma_instance(spin->obj, vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- hws = i915_vma_instance(spin->hws, vm, NULL);
- if (IS_ERR(hws))
- return PTR_ERR(hws);
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
-
- err = i915_vma_pin(hws, 0, 0, PIN_USER);
- if (err)
- goto unpin_vma;
-
- err = i915_vma_move_to_active(vma, rq, 0);
- if (err)
- goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
- }
-
- err = i915_vma_move_to_active(hws, rq, 0);
- if (err)
- goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(hws->obj)) {
- i915_gem_object_get(hws->obj);
- i915_gem_object_set_active_reference(hws->obj);
- }
-
- batch = spin->batch;
-
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(hws_address(hws, rq));
- *batch++ = upper_32_bits(hws_address(hws, rq));
- *batch++ = rq->fence.seqno;
-
- *batch++ = arbitration_command;
-
- *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
- *batch++ = lower_32_bits(vma->node.start);
- *batch++ = upper_32_bits(vma->node.start);
- *batch++ = MI_BATCH_BUFFER_END; /* not reached */
-
- i915_gem_chipset_flush(spin->i915);
-
- err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
-
-unpin_hws:
- i915_vma_unpin(hws);
-unpin_vma:
- i915_vma_unpin(vma);
- return err;
-}
-
-static struct i915_request *
-spinner_create_request(struct spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u32 arbitration_command)
-{
- struct i915_request *rq;
- int err;
-
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return rq;
-
- err = emit_recurse_batch(spin, rq, arbitration_command);
- if (err) {
- i915_request_add(rq);
- return ERR_PTR(err);
- }
-
- return rq;
-}
-
-static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
-{
- u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
-
- return READ_ONCE(*seqno);
-}
-
-static void spinner_end(struct spinner *spin)
-{
- *spin->batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(spin->i915);
-}
-
-static void spinner_fini(struct spinner *spin)
-{
- spinner_end(spin);
-
- i915_gem_object_unpin_map(spin->obj);
- i915_gem_object_put(spin->obj);
-
- i915_gem_object_unpin_map(spin->hws);
- i915_gem_object_put(spin->hws);
-}
-
-static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
-{
- if (!wait_event_timeout(rq->execute,
- READ_ONCE(rq->global_seqno),
- msecs_to_jiffies(10)))
- return false;
-
- return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
- rq->fence.seqno),
- 10) &&
- wait_for(i915_seqno_passed(hws_seqno(spin, rq),
- rq->fence.seqno),
- 1000));
-}
-
static int live_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
- struct spinner spin;
+ struct igt_spinner spin;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
@@ -223,7 +26,7 @@ static int live_sanitycheck(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin, i915))
+ if (igt_spinner_init(&spin, i915))
goto err_unlock;
ctx = kernel_context(i915);
@@ -233,14 +36,14 @@ static int live_sanitycheck(void *arg)
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin, rq)) {
+ if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -248,7 +51,7 @@ static int live_sanitycheck(void *arg)
goto err_ctx;
}
- spinner_end(&spin);
+ igt_spinner_end(&spin);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx;
@@ -259,7 +62,7 @@ static int live_sanitycheck(void *arg)
err_ctx:
kernel_context_close(ctx);
err_spin:
- spinner_fini(&spin);
+ igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -271,7 +74,7 @@ static int live_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -282,34 +85,36 @@ static int live_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
if (!ctx_hi)
goto err_spin_lo;
- ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
ctx_lo = kernel_context(i915);
if (!ctx_lo)
goto err_ctx_hi;
- ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -317,16 +122,16 @@ static int live_preempt(void *arg)
goto err_ctx_lo;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -334,8 +139,8 @@ static int live_preempt(void *arg)
goto err_ctx_lo;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -348,9 +153,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -362,7 +167,7 @@ static int live_late_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
@@ -374,10 +179,10 @@ static int live_late_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -391,43 +196,44 @@ static int live_late_preempt(void *arg)
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
pr_err("First context failed to start\n");
goto err_wedged;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_NOOP);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (wait_for_spinner(&spin_hi, rq)) {
+ if (igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("Second context overtook first?\n");
goto err_wedged;
}
- attr.priority = I915_PRIORITY_MAX;
+ attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
engine->schedule(rq, &attr);
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("High priority context failed to preempt the low priority context\n");
GEM_TRACE_DUMP();
goto err_wedged;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -440,9 +246,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -450,8 +256,8 @@ err_unlock:
return err;
err_wedged:
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
@@ -461,7 +267,7 @@ static int live_preempt_hang(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -475,10 +281,10 @@ static int live_preempt_hang(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -497,15 +303,15 @@ static int live_preempt_hang(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -513,10 +319,10 @@ static int live_preempt_hang(void *arg)
goto err_ctx_lo;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
@@ -541,7 +347,7 @@ static int live_preempt_hang(void *arg)
engine->execlists.preempt_hang.inject_hang = false;
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -549,8 +355,8 @@ static int live_preempt_hang(void *arg)
goto err_ctx_lo;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -563,9 +369,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -573,6 +379,261 @@ err_unlock:
return err;
}
+static int random_range(struct rnd_state *rnd, int min, int max)
+{
+ return i915_prandom_u32_max_state(max - min, rnd) + min;
+}
+
+static int random_priority(struct rnd_state *rnd)
+{
+ return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
+}
+
+struct preempt_smoke {
+ struct drm_i915_private *i915;
+ struct i915_gem_context **contexts;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *batch;
+ unsigned int ncontext;
+ struct rnd_state prng;
+ unsigned long count;
+};
+
+static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
+{
+ return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
+ &smoke->prng)];
+}
+
+static int smoke_submit(struct preempt_smoke *smoke,
+ struct i915_gem_context *ctx, int prio,
+ struct drm_i915_gem_object *batch)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma = NULL;
+ int err = 0;
+
+ if (batch) {
+ vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+ }
+
+ ctx->sched.priority = prio;
+
+ rq = i915_request_alloc(smoke->engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ if (vma) {
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ }
+
+ i915_request_add(rq);
+
+unpin:
+ if (vma)
+ i915_vma_unpin(vma);
+
+ return err;
+}
+
+static int smoke_crescendo_thread(void *arg)
+{
+ struct preempt_smoke *smoke = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ mutex_lock(&smoke->i915->drm.struct_mutex);
+ err = smoke_submit(smoke,
+ ctx, count % I915_PRIORITY_MAX,
+ smoke->batch);
+ mutex_unlock(&smoke->i915->drm.struct_mutex);
+ if (err)
+ return err;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ smoke->count = count;
+ return 0;
+}
+
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
+{
+ struct task_struct *tsk[I915_NUM_ENGINES] = {};
+ struct preempt_smoke arg[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long count;
+ int err = 0;
+
+ mutex_unlock(&smoke->i915->drm.struct_mutex);
+
+ for_each_engine(engine, smoke->i915, id) {
+ arg[id] = *smoke;
+ arg[id].engine = engine;
+ if (!(flags & BATCH))
+ arg[id].batch = NULL;
+ arg[id].count = 0;
+
+ tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+ "igt/smoke:%d", id);
+ if (IS_ERR(tsk[id])) {
+ err = PTR_ERR(tsk[id]);
+ break;
+ }
+ get_task_struct(tsk[id]);
+ }
+
+ count = 0;
+ for_each_engine(engine, smoke->i915, id) {
+ int status;
+
+ if (IS_ERR_OR_NULL(tsk[id]))
+ continue;
+
+ status = kthread_stop(tsk[id]);
+ if (status && !err)
+ err = status;
+
+ count += arg[id].count;
+
+ put_task_struct(tsk[id]);
+ }
+
+ mutex_lock(&smoke->i915->drm.struct_mutex);
+
+ pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+ count, flags,
+ INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ return 0;
+}
+
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
+{
+ enum intel_engine_id id;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ for_each_engine(smoke->engine, smoke->i915, id) {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ err = smoke_submit(smoke,
+ ctx, random_priority(&smoke->prng),
+ flags & BATCH ? smoke->batch : NULL);
+ if (err)
+ return err;
+
+ count++;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+ count, flags,
+ INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ return 0;
+}
+
+static int live_preempt_smoke(void *arg)
+{
+ struct preempt_smoke smoke = {
+ .i915 = arg,
+ .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
+ .ncontext = 1024,
+ };
+ const unsigned int phase[] = { 0, BATCH };
+ int err = -ENOMEM;
+ u32 *cs;
+ int n;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
+ return 0;
+
+ smoke.contexts = kmalloc_array(smoke.ncontext,
+ sizeof(*smoke.contexts),
+ GFP_KERNEL);
+ if (!smoke.contexts)
+ return -ENOMEM;
+
+ mutex_lock(&smoke.i915->drm.struct_mutex);
+ intel_runtime_pm_get(smoke.i915);
+
+ smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
+ if (IS_ERR(smoke.batch)) {
+ err = PTR_ERR(smoke.batch);
+ goto err_unlock;
+ }
+
+ cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+ for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+ cs[n] = MI_ARB_CHECK;
+ cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(smoke.batch);
+
+ err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
+ if (err)
+ goto err_batch;
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ smoke.contexts[n] = kernel_context(smoke.i915);
+ if (!smoke.contexts[n])
+ goto err_ctx;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(phase); n++) {
+ err = smoke_crescendo(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+
+ err = smoke_random(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+ }
+
+err_ctx:
+ if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ if (!smoke.contexts[n])
+ break;
+ kernel_context_close(smoke.contexts[n]);
+ }
+
+err_batch:
+ i915_gem_object_put(smoke.batch);
+err_unlock:
+ intel_runtime_pm_put(smoke.i915);
+ mutex_unlock(&smoke.i915->drm.struct_mutex);
+ kfree(smoke.contexts);
+
+ return err;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
@@ -580,6 +641,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
SUBTEST(live_preempt_hang),
+ SUBTEST(live_preempt_smoke),
};
if (!HAS_EXECLISTS(i915))
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index d1a0923d2f38..67017d5175b8 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -6,6 +6,9 @@
#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+#include "igt_reset.h"
+#include "igt_spinner.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
@@ -91,17 +94,23 @@ err_obj:
return ERR_PTR(err);
}
-static u32 get_whitelist_reg(const struct whitelist *w, unsigned int i)
+static u32
+get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
{
- return i < w->count ? i915_mmio_reg_offset(w->reg[i]) : w->nopid;
+ i915_reg_t reg = i < engine->whitelist.count ?
+ engine->whitelist.list[i].reg :
+ RING_NOPID(engine->mmio_base);
+
+ return i915_mmio_reg_offset(reg);
}
-static void print_results(const struct whitelist *w, const u32 *results)
+static void
+print_results(const struct intel_engine_cs *engine, const u32 *results)
{
unsigned int i;
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
- u32 expected = get_whitelist_reg(w, i);
+ u32 expected = get_whitelist_reg(engine, i);
u32 actual = results[i];
pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
@@ -109,8 +118,7 @@ static void print_results(const struct whitelist *w, const u32 *results)
}
}
-static int check_whitelist(const struct whitelist *w,
- struct i915_gem_context *ctx,
+static int check_whitelist(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *results;
@@ -138,11 +146,11 @@ static int check_whitelist(const struct whitelist *w,
}
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
- u32 expected = get_whitelist_reg(w, i);
+ u32 expected = get_whitelist_reg(engine, i);
u32 actual = vaddr[i];
if (expected != actual) {
- print_results(w, vaddr);
+ print_results(engine, vaddr);
pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
i, expected, actual);
@@ -159,66 +167,107 @@ out_put:
static int do_device_reset(struct intel_engine_cs *engine)
{
- i915_reset(engine->i915, ENGINE_MASK(engine->id), NULL);
+ set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags);
+ i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
return 0;
}
static int do_engine_reset(struct intel_engine_cs *engine)
{
- return i915_reset_engine(engine, NULL);
+ return i915_reset_engine(engine, "live_workarounds");
}
-static int switch_to_scratch_context(struct intel_engine_cs *engine)
+static int
+switch_to_scratch_context(struct intel_engine_cs *engine,
+ struct igt_spinner *spin)
{
struct i915_gem_context *ctx;
struct i915_request *rq;
+ int err = 0;
ctx = kernel_context(engine->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
intel_runtime_pm_get(engine->i915);
- rq = i915_request_alloc(engine, ctx);
+
+ if (spin)
+ rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
+ else
+ rq = i915_request_alloc(engine, ctx);
+
intel_runtime_pm_put(engine->i915);
kernel_context_close(ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+
+ if (IS_ERR(rq)) {
+ spin = NULL;
+ err = PTR_ERR(rq);
+ goto err;
+ }
i915_request_add(rq);
- return 0;
+ if (spin && !igt_wait_for_spinner(spin, rq)) {
+ pr_err("Spinner failed to start\n");
+ err = -ETIMEDOUT;
+ }
+
+err:
+ if (err && spin)
+ igt_spinner_end(spin);
+
+ return err;
}
static int check_whitelist_across_reset(struct intel_engine_cs *engine,
int (*reset)(struct intel_engine_cs *),
- const struct whitelist *w,
const char *name)
{
+ struct drm_i915_private *i915 = engine->i915;
+ bool want_spin = reset == do_engine_reset;
struct i915_gem_context *ctx;
+ struct igt_spinner spin;
int err;
- ctx = kernel_context(engine->i915);
+ pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
+ engine->whitelist.count, name);
+
+ if (want_spin) {
+ err = igt_spinner_init(&spin, i915);
+ if (err)
+ return err;
+ }
+
+ ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *before* %s reset!\n", name);
goto out;
}
- err = switch_to_scratch_context(engine);
+ err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
if (err)
goto out;
+ intel_runtime_pm_get(i915);
err = reset(engine);
+ intel_runtime_pm_put(i915);
+
+ if (want_spin) {
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+ }
+
if (err) {
pr_err("%s reset failed\n", name);
goto out;
}
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Whitelist not preserved in context across %s reset!\n",
name);
@@ -227,11 +276,11 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
kernel_context_close(ctx);
- ctx = kernel_context(engine->i915);
+ ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
name);
@@ -247,26 +296,18 @@ static int live_reset_whitelist(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine = i915->engine[RCS];
- struct i915_gpu_error *error = &i915->gpu_error;
- struct whitelist w;
int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */
- if (!engine)
- return 0;
-
- if (!whitelist_build(engine, &w))
+ if (!engine || engine->whitelist.count == 0)
return 0;
- pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", w.count);
-
- set_bit(I915_RESET_BACKOFF, &error->flags);
- set_bit(I915_RESET_ENGINE + engine->id, &error->flags);
+ igt_global_reset_lock(i915);
if (intel_has_reset_engine(i915)) {
err = check_whitelist_across_reset(engine,
- do_engine_reset, &w,
+ do_engine_reset,
"engine");
if (err)
goto out;
@@ -274,22 +315,156 @@ static int live_reset_whitelist(void *arg)
if (intel_has_gpu_reset(i915)) {
err = check_whitelist_across_reset(engine,
- do_device_reset, &w,
+ do_device_reset,
"device");
if (err)
goto out;
}
out:
- clear_bit(I915_RESET_ENGINE + engine->id, &error->flags);
- clear_bit(I915_RESET_BACKOFF, &error->flags);
+ igt_global_reset_unlock(i915);
return err;
}
+static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ bool ok = true;
+
+ ok &= intel_gt_verify_workarounds(i915, str);
+
+ for_each_engine(engine, i915, id)
+ ok &= intel_engine_verify_workarounds(engine, str);
+
+ return ok;
+}
+
+static int
+live_gpu_reset_gt_engine_workarounds(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gpu_error *error = &i915->gpu_error;
+ bool ok;
+
+ if (!intel_has_gpu_reset(i915))
+ return 0;
+
+ pr_info("Verifying after GPU reset...\n");
+
+ igt_global_reset_lock(i915);
+
+ ok = verify_gt_engine_wa(i915, "before reset");
+ if (!ok)
+ goto out;
+
+ intel_runtime_pm_get(i915);
+ set_bit(I915_RESET_HANDOFF, &error->flags);
+ i915_reset(i915, ALL_ENGINES, "live_workarounds");
+ intel_runtime_pm_put(i915);
+
+ ok = verify_gt_engine_wa(i915, "after reset");
+
+out:
+ igt_global_reset_unlock(i915);
+
+ return ok ? 0 : -ESRCH;
+}
+
+static int
+live_engine_reset_gt_engine_workarounds(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ struct i915_request *rq;
+ int ret = 0;
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ igt_global_reset_lock(i915);
+
+ for_each_engine(engine, i915, id) {
+ bool ok;
+
+ pr_info("Verifying after %s reset...\n", engine->name);
+
+ ok = verify_gt_engine_wa(i915, "before reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+
+ intel_runtime_pm_get(i915);
+ i915_reset_engine(engine, "live_workarounds");
+ intel_runtime_pm_put(i915);
+
+ ok = verify_gt_engine_wa(i915, "after idle reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+
+ ret = igt_spinner_init(&spin, i915);
+ if (ret)
+ goto err;
+
+ intel_runtime_pm_get(i915);
+
+ rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ igt_spinner_fini(&spin);
+ intel_runtime_pm_put(i915);
+ goto err;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("Spinner failed to start\n");
+ igt_spinner_fini(&spin);
+ intel_runtime_pm_put(i915);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ i915_reset_engine(engine, "live_workarounds");
+
+ intel_runtime_pm_put(i915);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ ok = verify_gt_engine_wa(i915, "after busy reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+ }
+
+err:
+ igt_global_reset_unlock(i915);
+ kernel_context_close(ctx);
+
+ igt_flush_test(i915, I915_WAIT_LOCKED);
+
+ return ret;
+}
+
int intel_workarounds_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_reset_whitelist),
+ SUBTEST(live_gpu_reset_gt_engine_workarounds),
+ SUBTEST(live_engine_reset_gt_engine_workarounds),
};
int err;
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 22a73da45ad5..d0c44c18db42 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -200,7 +200,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.submit_request = mock_submit_request;
i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
- lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE);
+ i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
intel_engine_init_breadcrumbs(&engine->base);
engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 435a2c35ee8c..361e962a7969 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -206,39 +206,6 @@ static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
.transfer = intel_dsi_host_transfer,
};
-static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
- enum port port)
-{
- struct intel_dsi_host *host;
- struct mipi_dsi_device *device;
-
- host = kzalloc(sizeof(*host), GFP_KERNEL);
- if (!host)
- return NULL;
-
- host->base.ops = &intel_dsi_host_ops;
- host->intel_dsi = intel_dsi;
- host->port = port;
-
- /*
- * We should call mipi_dsi_host_register(&host->base) here, but we don't
- * have a host->dev, and we don't have OF stuff either. So just use the
- * dsi framework as a library and hope for the best. Create the dsi
- * devices by ourselves here too. Need to be careful though, because we
- * don't initialize any of the driver model devices here.
- */
- device = kzalloc(sizeof(*device), GFP_KERNEL);
- if (!device) {
- kfree(host);
- return NULL;
- }
-
- device->host = &host->base;
- host->device = device;
-
- return host;
-}
-
/*
* send a video mode command
*
@@ -290,16 +257,6 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->sb_lock);
}
-static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
-{
- return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
-}
-
-static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
-{
- return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
-}
-
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -314,6 +271,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
int ret;
DRM_DEBUG_KMS("\n");
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
if (fixed_mode) {
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -745,17 +703,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
const struct intel_crtc_state *pipe_config);
static void intel_dsi_unprepare(struct intel_encoder *encoder);
-static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
-
- /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
- if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
- return;
-
- msleep(msec);
-}
-
/*
* Panel enable/disable sequences from the VBT spec.
*
@@ -793,6 +740,10 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
* - wait t4 - wait t4
*/
+/*
+ * DSI port enable has to be done before pipe and plane enable, so we do it in
+ * the pre_enable hook instead of the enable hook.
+ */
static void intel_dsi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
@@ -895,17 +846,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
}
/*
- * DSI port enable has to be done before pipe and plane enable, so we do it in
- * the pre_enable hook.
- */
-static void intel_dsi_enable_nop(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- DRM_DEBUG_KMS("\n");
-}
-
-/*
* DSI port disable has to be done after pipe and plane disable, so we do it in
* the post_disable hook.
*/
@@ -1272,31 +1212,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
}
}
-static enum drm_mode_status
-intel_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
-
- DRM_DEBUG_KMS("\n");
-
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
- if (fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
- if (fixed_mode->clock > max_dotclk)
- return MODE_CLOCK_HIGH;
- }
-
- return MODE_OK;
-}
-
/* return txclkesc cycles in terms of divider and duration in us */
static u16 txclkesc(u32 divider, unsigned int us)
{
@@ -1619,39 +1534,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
}
}
-static int intel_dsi_get_modes(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_display_mode *mode;
-
- DRM_DEBUG_KMS("\n");
-
- if (!intel_connector->panel.fixed_mode) {
- DRM_DEBUG_KMS("no fixed mode\n");
- return 0;
- }
-
- mode = drm_mode_duplicate(connector->dev,
- intel_connector->panel.fixed_mode);
- if (!mode) {
- DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
- return 0;
- }
-
- drm_mode_probed_add(connector, mode);
- return 1;
-}
-
-static void intel_dsi_connector_destroy(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- DRM_DEBUG_KMS("\n");
- intel_panel_fini(&intel_connector->panel);
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -1676,7 +1558,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_dsi_connector_destroy,
+ .destroy = intel_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
@@ -1684,27 +1566,57 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
+static enum drm_panel_orientation
+vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
- enum i9xx_plane_id i9xx_plane;
+ struct intel_encoder *encoder = connector->encoder;
+ enum intel_display_power_domain power_domain;
+ enum drm_panel_orientation orientation;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+ enum pipe pipe;
u32 val;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- if (connector->encoder->crtc_mask == BIT(PIPE_B))
- i9xx_plane = PLANE_B;
- else
- i9xx_plane = PLANE_A;
+ if (!encoder->get_hw_state(encoder, &pipe))
+ return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
- val = I915_READ(DSPCNTR(i9xx_plane));
- if (val & DISPPLANE_ROTATE_180)
- orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
- }
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ plane = to_intel_plane(crtc->base.primary);
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+
+ val = I915_READ(DSPCNTR(plane->i9xx_plane));
+
+ if (!(val & DISPLAY_PLANE_ENABLE))
+ orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ else if (val & DISPPLANE_ROTATE_180)
+ orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+ else
+ orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
+
+ intel_display_power_put(dev_priv, power_domain);
return orientation;
}
+static enum drm_panel_orientation
+vlv_dsi_get_panel_orientation(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum drm_panel_orientation orientation;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ orientation = vlv_dsi_get_hw_panel_orientation(connector);
+ if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return orientation;
+ }
+
+ return intel_dsi_get_panel_orientation(connector);
+}
+
static void intel_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1722,7 +1634,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
connector->base.display_info.panel_orientation =
- intel_dsi_get_panel_orientation(connector);
+ vlv_dsi_get_panel_orientation(connector);
drm_connector_init_panel_orientation_property(
&connector->base,
connector->panel.fixed_mode->hdisplay,
@@ -1773,7 +1685,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_enable = intel_dsi_pre_enable;
- intel_encoder->enable = intel_dsi_enable_nop;
intel_encoder->disable = intel_dsi_disable;
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
@@ -1806,7 +1717,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
for_each_dsi_port(port, intel_dsi->ports) {
struct intel_dsi_host *host;
- host = intel_dsi_host_init(intel_dsi, port);
+ host = intel_dsi_host_init(intel_dsi, &intel_dsi_host_ops,
+ port);
if (!host)
goto err;
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index fe6becdcc29e..77a26fd3a44a 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
*
* derived from imx-hdmi.c(renamed to bridge/dw_hdmi.c now)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 0e6942f21a4e..820c7e3878f0 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale i.MX drm driver
*
* Copyright (C) 2011 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/component.h>
#include <linux/device.h>
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 3bd0f8a18e74..2c5bbe317353 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX drm driver - LVDS display bridge
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index cffd3310240e..293dd5752583 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX drm driver - Television Encoder (TVEv2)
*
* Copyright (C) 2013 Philipp Zabel, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -442,7 +434,7 @@ static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static struct clk_ops clk_tve_di_ops = {
+static const struct clk_ops clk_tve_di_ops = {
.round_rate = clk_tve_di_round_rate,
.set_rate = clk_tve_di_set_rate,
.recalc_rate = clk_tve_di_recalc_rate,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7d4b710b837a..058b53c0aa7e 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX IPUv3 Graphics driver
*
* Copyright (C) 2011 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/component.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 40605fdf0e33..c390924de93d 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX IPUv3 DP Overlay Planes
*
* Copyright (C) 2013 Philipp Zabel, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <drm/drmP.h>
@@ -236,9 +228,15 @@ static void ipu_plane_enable(struct ipu_plane *ipu_plane)
void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel)
{
+ int ret;
+
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
+ ret = ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
+ if (ret == -ETIMEDOUT) {
+ DRM_ERROR("[PLANE:%d] IDMAC timeout\n",
+ ipu_plane->base.base.id);
+ }
if (ipu_plane->dp && disable_dp_channel)
ipu_dp_disable_channel(ipu_plane->dp, false);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index aefd04e18f93..f3ce51121dd6 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX drm driver - parallel display implementation
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/component.h>
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index 3ce51d8dfe1c..c28b69f48555 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -7,6 +7,7 @@ config DRM_MESON
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
select REGMAP_MMIO
+ select MESON_CANVAS
config DRM_MESON_DW_HDMI
tristate "HDMI Synopsys Controller support for Amlogic Meson Display"
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index c5c4cc362f02..7709f2fbb9f7 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,5 +1,5 @@
meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
-meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
+meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o meson_overlay.o
obj-$(CONFIG_DRM_MESON) += meson-drm.o
obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c
index 08f6073d967e..5de11aa7c775 100644
--- a/drivers/gpu/drm/meson/meson_canvas.c
+++ b/drivers/gpu/drm/meson/meson_canvas.c
@@ -39,6 +39,7 @@
#define CANVAS_WIDTH_HBIT 0
#define CANVAS_HEIGHT_BIT 9
#define CANVAS_BLKMODE_BIT 24
+#define CANVAS_ENDIAN_BIT 26
#define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */
#define CANVAS_LUT_WR_EN (0x2 << 8)
#define CANVAS_LUT_RD_EN (0x1 << 8)
@@ -47,7 +48,8 @@ void meson_canvas_setup(struct meson_drm *priv,
uint32_t canvas_index, uint32_t addr,
uint32_t stride, uint32_t height,
unsigned int wrap,
- unsigned int blkmode)
+ unsigned int blkmode,
+ unsigned int endian)
{
unsigned int val;
@@ -60,7 +62,8 @@ void meson_canvas_setup(struct meson_drm *priv,
CANVAS_WIDTH_HBIT) |
(height << CANVAS_HEIGHT_BIT) |
(wrap << 22) |
- (blkmode << CANVAS_BLKMODE_BIT));
+ (blkmode << CANVAS_BLKMODE_BIT) |
+ (endian << CANVAS_ENDIAN_BIT));
regmap_write(priv->dmc, DMC_CAV_LUT_ADDR,
CANVAS_LUT_WR_EN | canvas_index);
diff --git a/drivers/gpu/drm/meson/meson_canvas.h b/drivers/gpu/drm/meson/meson_canvas.h
index af1759da4b27..85dbf26e2826 100644
--- a/drivers/gpu/drm/meson/meson_canvas.h
+++ b/drivers/gpu/drm/meson/meson_canvas.h
@@ -23,6 +23,9 @@
#define __MESON_CANVAS_H
#define MESON_CANVAS_ID_OSD1 0x4e
+#define MESON_CANVAS_ID_VD1_0 0x60
+#define MESON_CANVAS_ID_VD1_1 0x61
+#define MESON_CANVAS_ID_VD1_2 0x62
/* Canvas configuration. */
#define MESON_CANVAS_WRAP_NONE 0x00
@@ -33,10 +36,16 @@
#define MESON_CANVAS_BLKMODE_32x32 0x01
#define MESON_CANVAS_BLKMODE_64x64 0x02
+#define MESON_CANVAS_ENDIAN_SWAP16 0x1
+#define MESON_CANVAS_ENDIAN_SWAP32 0x3
+#define MESON_CANVAS_ENDIAN_SWAP64 0x7
+#define MESON_CANVAS_ENDIAN_SWAP128 0xf
+
void meson_canvas_setup(struct meson_drm *priv,
uint32_t canvas_index, uint32_t addr,
uint32_t stride, uint32_t height,
unsigned int wrap,
- unsigned int blkmode);
+ unsigned int blkmode,
+ unsigned int endian);
#endif /* __MESON_CANVAS_H */
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 05520202c967..d78168f979db 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/bitfield.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -98,6 +99,10 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
writel(crtc_state->mode.hdisplay,
priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
+ /* VD1 Preblend vertical start/end */
+ writel(FIELD_PREP(GENMASK(11, 0), 2303),
+ priv->io_base + _REG(VPP_PREBLEND_VD1_V_START_END));
+
writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
priv->io_base + _REG(VPP_MISC));
@@ -110,11 +115,17 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
+ DRM_DEBUG_DRIVER("\n");
+
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
+ priv->viu.vd1_enabled = false;
+ priv->viu.vd1_commit = false;
+
/* Disable VPP Postblend */
- writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_VD1_POSTBLEND |
+ VPP_VD1_PREBLEND | VPP_POSTBLEND_ENABLE, 0,
priv->io_base + _REG(VPP_MISC));
if (crtc->state->event && !crtc->state->active) {
@@ -149,6 +160,7 @@ static void meson_crtc_atomic_flush(struct drm_crtc *crtc,
struct meson_drm *priv = meson_crtc->priv;
priv->viu.osd1_commit = true;
+ priv->viu.vd1_commit = true;
}
static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
@@ -177,26 +189,37 @@ void meson_crtc_irq(struct meson_drm *priv)
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W3));
writel_relaxed(priv->viu.osd1_blk0_cfg[4],
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W4));
-
- /* If output is interlace, make use of the Scaler */
- if (priv->viu.osd1_interlace) {
- struct drm_plane *plane = priv->primary_plane;
- struct drm_plane_state *state = plane->state;
- struct drm_rect dest = {
- .x1 = state->crtc_x,
- .y1 = state->crtc_y,
- .x2 = state->crtc_x + state->crtc_w,
- .y2 = state->crtc_y + state->crtc_h,
- };
-
- meson_vpp_setup_interlace_vscaler_osd1(priv, &dest);
- } else
- meson_vpp_disable_interlace_vscaler_osd1(priv);
-
- meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
- priv->viu.osd1_addr, priv->viu.osd1_stride,
- priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR);
+ writel_relaxed(priv->viu.osd_sc_ctrl0,
+ priv->io_base + _REG(VPP_OSD_SC_CTRL0));
+ writel_relaxed(priv->viu.osd_sc_i_wh_m1,
+ priv->io_base + _REG(VPP_OSD_SCI_WH_M1));
+ writel_relaxed(priv->viu.osd_sc_o_h_start_end,
+ priv->io_base + _REG(VPP_OSD_SCO_H_START_END));
+ writel_relaxed(priv->viu.osd_sc_o_v_start_end,
+ priv->io_base + _REG(VPP_OSD_SCO_V_START_END));
+ writel_relaxed(priv->viu.osd_sc_v_ini_phase,
+ priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE));
+ writel_relaxed(priv->viu.osd_sc_v_phase_step,
+ priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP));
+ writel_relaxed(priv->viu.osd_sc_h_ini_phase,
+ priv->io_base + _REG(VPP_OSD_HSC_INI_PHASE));
+ writel_relaxed(priv->viu.osd_sc_h_phase_step,
+ priv->io_base + _REG(VPP_OSD_HSC_PHASE_STEP));
+ writel_relaxed(priv->viu.osd_sc_h_ctrl0,
+ priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+ writel_relaxed(priv->viu.osd_sc_v_ctrl0,
+ priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
+
+ if (priv->canvas)
+ meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
+ priv->viu.osd1_addr, priv->viu.osd1_stride,
+ priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR, 0);
+ else
+ meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
+ priv->viu.osd1_addr, priv->viu.osd1_stride,
+ priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR, 0);
/* Enable OSD1 */
writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
@@ -205,6 +228,206 @@ void meson_crtc_irq(struct meson_drm *priv)
priv->viu.osd1_commit = false;
}
+ /* Update the VD1 registers */
+ if (priv->viu.vd1_enabled && priv->viu.vd1_commit) {
+
+ switch (priv->viu.vd1_planes) {
+ case 3:
+ if (priv->canvas)
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_2,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ else
+ meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_2,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ /* fallthrough */
+ case 2:
+ if (priv->canvas)
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_1,
+ priv->viu.vd1_addr1,
+ priv->viu.vd1_stride1,
+ priv->viu.vd1_height1,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ else
+ meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_1,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ /* fallthrough */
+ case 1:
+ if (priv->canvas)
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_0,
+ priv->viu.vd1_addr0,
+ priv->viu.vd1_stride0,
+ priv->viu.vd1_height0,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ else
+ meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_0,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ };
+
+ writel_relaxed(priv->viu.vd1_if0_gen_reg,
+ priv->io_base + _REG(VD1_IF0_GEN_REG));
+ writel_relaxed(priv->viu.vd1_if0_gen_reg,
+ priv->io_base + _REG(VD2_IF0_GEN_REG));
+ writel_relaxed(priv->viu.vd1_if0_gen_reg2,
+ priv->io_base + _REG(VD1_IF0_GEN_REG2));
+ writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
+ priv->io_base + _REG(VIU_VD1_FMT_CTRL));
+ writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
+ priv->io_base + _REG(VIU_VD2_FMT_CTRL));
+ writel_relaxed(priv->viu.viu_vd1_fmt_w,
+ priv->io_base + _REG(VIU_VD1_FMT_W));
+ writel_relaxed(priv->viu.viu_vd1_fmt_w,
+ priv->io_base + _REG(VIU_VD2_FMT_W));
+ writel_relaxed(priv->viu.vd1_if0_canvas0,
+ priv->io_base + _REG(VD1_IF0_CANVAS0));
+ writel_relaxed(priv->viu.vd1_if0_canvas0,
+ priv->io_base + _REG(VD1_IF0_CANVAS1));
+ writel_relaxed(priv->viu.vd1_if0_canvas0,
+ priv->io_base + _REG(VD2_IF0_CANVAS0));
+ writel_relaxed(priv->viu.vd1_if0_canvas0,
+ priv->io_base + _REG(VD2_IF0_CANVAS1));
+ writel_relaxed(priv->viu.vd1_if0_luma_x0,
+ priv->io_base + _REG(VD1_IF0_LUMA_X0));
+ writel_relaxed(priv->viu.vd1_if0_luma_x0,
+ priv->io_base + _REG(VD1_IF0_LUMA_X1));
+ writel_relaxed(priv->viu.vd1_if0_luma_x0,
+ priv->io_base + _REG(VD2_IF0_LUMA_X0));
+ writel_relaxed(priv->viu.vd1_if0_luma_x0,
+ priv->io_base + _REG(VD2_IF0_LUMA_X1));
+ writel_relaxed(priv->viu.vd1_if0_luma_y0,
+ priv->io_base + _REG(VD1_IF0_LUMA_Y0));
+ writel_relaxed(priv->viu.vd1_if0_luma_y0,
+ priv->io_base + _REG(VD1_IF0_LUMA_Y1));
+ writel_relaxed(priv->viu.vd1_if0_luma_y0,
+ priv->io_base + _REG(VD2_IF0_LUMA_Y0));
+ writel_relaxed(priv->viu.vd1_if0_luma_y0,
+ priv->io_base + _REG(VD2_IF0_LUMA_Y1));
+ writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+ priv->io_base + _REG(VD1_IF0_CHROMA_X0));
+ writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+ priv->io_base + _REG(VD1_IF0_CHROMA_X1));
+ writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+ priv->io_base + _REG(VD2_IF0_CHROMA_X0));
+ writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+ priv->io_base + _REG(VD2_IF0_CHROMA_X1));
+ writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+ priv->io_base + _REG(VD1_IF0_CHROMA_Y0));
+ writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+ priv->io_base + _REG(VD1_IF0_CHROMA_Y1));
+ writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+ priv->io_base + _REG(VD2_IF0_CHROMA_Y0));
+ writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+ priv->io_base + _REG(VD2_IF0_CHROMA_Y1));
+ writel_relaxed(priv->viu.vd1_if0_repeat_loop,
+ priv->io_base + _REG(VD1_IF0_RPT_LOOP));
+ writel_relaxed(priv->viu.vd1_if0_repeat_loop,
+ priv->io_base + _REG(VD2_IF0_RPT_LOOP));
+ writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+ priv->io_base + _REG(VD1_IF0_LUMA0_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+ priv->io_base + _REG(VD2_IF0_LUMA0_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+ priv->io_base + _REG(VD1_IF0_LUMA1_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+ priv->io_base + _REG(VD2_IF0_LUMA1_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+ priv->io_base + _REG(VD1_IF0_CHROMA0_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+ priv->io_base + _REG(VD2_IF0_CHROMA0_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+ priv->io_base + _REG(VD1_IF0_CHROMA1_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+ priv->io_base + _REG(VD2_IF0_CHROMA1_RPT_PAT));
+ writel_relaxed(0, priv->io_base + _REG(VD1_IF0_LUMA_PSEL));
+ writel_relaxed(0, priv->io_base + _REG(VD1_IF0_CHROMA_PSEL));
+ writel_relaxed(0, priv->io_base + _REG(VD2_IF0_LUMA_PSEL));
+ writel_relaxed(0, priv->io_base + _REG(VD2_IF0_CHROMA_PSEL));
+ writel_relaxed(priv->viu.vd1_range_map_y,
+ priv->io_base + _REG(VD1_IF0_RANGE_MAP_Y));
+ writel_relaxed(priv->viu.vd1_range_map_cb,
+ priv->io_base + _REG(VD1_IF0_RANGE_MAP_CB));
+ writel_relaxed(priv->viu.vd1_range_map_cr,
+ priv->io_base + _REG(VD1_IF0_RANGE_MAP_CR));
+ writel_relaxed(0x78404,
+ priv->io_base + _REG(VPP_SC_MISC));
+ writel_relaxed(priv->viu.vpp_pic_in_height,
+ priv->io_base + _REG(VPP_PIC_IN_HEIGHT));
+ writel_relaxed(priv->viu.vpp_postblend_vd1_h_start_end,
+ priv->io_base + _REG(VPP_POSTBLEND_VD1_H_START_END));
+ writel_relaxed(priv->viu.vpp_blend_vd2_h_start_end,
+ priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+ writel_relaxed(priv->viu.vpp_postblend_vd1_v_start_end,
+ priv->io_base + _REG(VPP_POSTBLEND_VD1_V_START_END));
+ writel_relaxed(priv->viu.vpp_blend_vd2_v_start_end,
+ priv->io_base + _REG(VPP_BLEND_VD2_V_START_END));
+ writel_relaxed(priv->viu.vpp_hsc_region12_startp,
+ priv->io_base + _REG(VPP_HSC_REGION12_STARTP));
+ writel_relaxed(priv->viu.vpp_hsc_region34_startp,
+ priv->io_base + _REG(VPP_HSC_REGION34_STARTP));
+ writel_relaxed(priv->viu.vpp_hsc_region4_endp,
+ priv->io_base + _REG(VPP_HSC_REGION4_ENDP));
+ writel_relaxed(priv->viu.vpp_hsc_start_phase_step,
+ priv->io_base + _REG(VPP_HSC_START_PHASE_STEP));
+ writel_relaxed(priv->viu.vpp_hsc_region1_phase_slope,
+ priv->io_base + _REG(VPP_HSC_REGION1_PHASE_SLOPE));
+ writel_relaxed(priv->viu.vpp_hsc_region3_phase_slope,
+ priv->io_base + _REG(VPP_HSC_REGION3_PHASE_SLOPE));
+ writel_relaxed(priv->viu.vpp_line_in_length,
+ priv->io_base + _REG(VPP_LINE_IN_LENGTH));
+ writel_relaxed(priv->viu.vpp_preblend_h_size,
+ priv->io_base + _REG(VPP_PREBLEND_H_SIZE));
+ writel_relaxed(priv->viu.vpp_vsc_region12_startp,
+ priv->io_base + _REG(VPP_VSC_REGION12_STARTP));
+ writel_relaxed(priv->viu.vpp_vsc_region34_startp,
+ priv->io_base + _REG(VPP_VSC_REGION34_STARTP));
+ writel_relaxed(priv->viu.vpp_vsc_region4_endp,
+ priv->io_base + _REG(VPP_VSC_REGION4_ENDP));
+ writel_relaxed(priv->viu.vpp_vsc_start_phase_step,
+ priv->io_base + _REG(VPP_VSC_START_PHASE_STEP));
+ writel_relaxed(priv->viu.vpp_vsc_ini_phase,
+ priv->io_base + _REG(VPP_VSC_INI_PHASE));
+ writel_relaxed(priv->viu.vpp_vsc_phase_ctrl,
+ priv->io_base + _REG(VPP_VSC_PHASE_CTRL));
+ writel_relaxed(priv->viu.vpp_hsc_phase_ctrl,
+ priv->io_base + _REG(VPP_HSC_PHASE_CTRL));
+ writel_relaxed(0x42, priv->io_base + _REG(VPP_SCALE_COEF_IDX));
+
+ /* Enable VD1 */
+ writel_bits_relaxed(VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+ VPP_COLOR_MNG_ENABLE,
+ VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+ VPP_COLOR_MNG_ENABLE,
+ priv->io_base + _REG(VPP_MISC));
+
+ priv->viu.vd1_commit = false;
+ }
+
drm_crtc_handle_vblank(priv->crtc);
spin_lock_irqsave(&priv->drm->event_lock, flags);
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index d3443125e661..3ee4d4a4ecba 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -41,6 +41,7 @@
#include "meson_drv.h"
#include "meson_plane.h"
+#include "meson_overlay.h"
#include "meson_crtc.h"
#include "meson_venc_cvbs.h"
@@ -68,15 +69,7 @@
* - Powering Up HDMI controller and PHY
*/
-static void meson_fb_output_poll_changed(struct drm_device *dev)
-{
- struct meson_drm *priv = dev->dev_private;
-
- drm_fbdev_cma_hotplug_event(priv->fbdev);
-}
-
static const struct drm_mode_config_funcs meson_mode_config_funcs = {
- .output_poll_changed = meson_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
@@ -216,24 +209,51 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
goto free_drm;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
- if (!res) {
- ret = -EINVAL;
- goto free_drm;
- }
- /* Simply ioremap since it may be a shared register zone */
- regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!regs) {
- ret = -EADDRNOTAVAIL;
- goto free_drm;
- }
+ priv->canvas = meson_canvas_get(dev);
+ if (!IS_ERR(priv->canvas)) {
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
+ if (ret)
+ goto free_drm;
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ goto free_drm;
+ }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ goto free_drm;
+ }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+ goto free_drm;
+ }
+ } else {
+ priv->canvas = NULL;
- priv->dmc = devm_regmap_init_mmio(dev, regs,
- &meson_regmap_config);
- if (IS_ERR(priv->dmc)) {
- dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
- ret = PTR_ERR(priv->dmc);
- goto free_drm;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
+ if (!res) {
+ ret = -EINVAL;
+ goto free_drm;
+ }
+ /* Simply ioremap since it may be a shared register zone */
+ regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!regs) {
+ ret = -EADDRNOTAVAIL;
+ goto free_drm;
+ }
+
+ priv->dmc = devm_regmap_init_mmio(dev, regs,
+ &meson_regmap_config);
+ if (IS_ERR(priv->dmc)) {
+ dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
+ ret = PTR_ERR(priv->dmc);
+ goto free_drm;
+ }
}
priv->vsync_irq = platform_get_irq(pdev, 0);
@@ -272,6 +292,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (ret)
goto free_drm;
+ ret = meson_overlay_create(priv);
+ if (ret)
+ goto free_drm;
+
ret = meson_crtc_create(priv);
if (ret)
goto free_drm;
@@ -282,13 +306,6 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
drm_mode_config_reset(drm);
- priv->fbdev = drm_fbdev_cma_init(drm, 32,
- drm->mode_config.num_connector);
- if (IS_ERR(priv->fbdev)) {
- ret = PTR_ERR(priv->fbdev);
- goto free_drm;
- }
-
drm_kms_helper_poll_init(drm);
platform_set_drvdata(pdev, priv);
@@ -297,6 +314,8 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (ret)
goto free_drm;
+ drm_fbdev_generic_setup(drm, 32);
+
return 0;
free_drm:
@@ -315,9 +334,15 @@ static void meson_drv_unbind(struct device *dev)
struct drm_device *drm = dev_get_drvdata(dev);
struct meson_drm *priv = drm->dev_private;
+ if (priv->canvas) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2);
+ }
+
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
- drm_fbdev_cma_fini(priv->fbdev);
drm_mode_config_cleanup(drm);
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 8450d6ac8c9b..4dccf4cd042a 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/soc/amlogic/meson-canvas.h>
#include <drm/drmP.h>
struct meson_drm {
@@ -31,10 +32,16 @@ struct meson_drm {
struct regmap *dmc;
int vsync_irq;
+ struct meson_canvas *canvas;
+ u8 canvas_id_osd1;
+ u8 canvas_id_vd1_0;
+ u8 canvas_id_vd1_1;
+ u8 canvas_id_vd1_2;
+
struct drm_device *drm;
struct drm_crtc *crtc;
- struct drm_fbdev_cma *fbdev;
struct drm_plane *primary_plane;
+ struct drm_plane *overlay_plane;
/* Components Data */
struct {
@@ -46,6 +53,64 @@ struct meson_drm {
uint32_t osd1_addr;
uint32_t osd1_stride;
uint32_t osd1_height;
+ uint32_t osd_sc_ctrl0;
+ uint32_t osd_sc_i_wh_m1;
+ uint32_t osd_sc_o_h_start_end;
+ uint32_t osd_sc_o_v_start_end;
+ uint32_t osd_sc_v_ini_phase;
+ uint32_t osd_sc_v_phase_step;
+ uint32_t osd_sc_h_ini_phase;
+ uint32_t osd_sc_h_phase_step;
+ uint32_t osd_sc_h_ctrl0;
+ uint32_t osd_sc_v_ctrl0;
+
+ bool vd1_enabled;
+ bool vd1_commit;
+ unsigned int vd1_planes;
+ uint32_t vd1_if0_gen_reg;
+ uint32_t vd1_if0_luma_x0;
+ uint32_t vd1_if0_luma_y0;
+ uint32_t vd1_if0_chroma_x0;
+ uint32_t vd1_if0_chroma_y0;
+ uint32_t vd1_if0_repeat_loop;
+ uint32_t vd1_if0_luma0_rpt_pat;
+ uint32_t vd1_if0_chroma0_rpt_pat;
+ uint32_t vd1_range_map_y;
+ uint32_t vd1_range_map_cb;
+ uint32_t vd1_range_map_cr;
+ uint32_t viu_vd1_fmt_w;
+ uint32_t vd1_if0_canvas0;
+ uint32_t vd1_if0_gen_reg2;
+ uint32_t viu_vd1_fmt_ctrl;
+ uint32_t vd1_addr0;
+ uint32_t vd1_addr1;
+ uint32_t vd1_addr2;
+ uint32_t vd1_stride0;
+ uint32_t vd1_stride1;
+ uint32_t vd1_stride2;
+ uint32_t vd1_height0;
+ uint32_t vd1_height1;
+ uint32_t vd1_height2;
+ uint32_t vpp_pic_in_height;
+ uint32_t vpp_postblend_vd1_h_start_end;
+ uint32_t vpp_postblend_vd1_v_start_end;
+ uint32_t vpp_hsc_region12_startp;
+ uint32_t vpp_hsc_region34_startp;
+ uint32_t vpp_hsc_region4_endp;
+ uint32_t vpp_hsc_start_phase_step;
+ uint32_t vpp_hsc_region1_phase_slope;
+ uint32_t vpp_hsc_region3_phase_slope;
+ uint32_t vpp_line_in_length;
+ uint32_t vpp_preblend_h_size;
+ uint32_t vpp_vsc_region12_startp;
+ uint32_t vpp_vsc_region34_startp;
+ uint32_t vpp_vsc_region4_endp;
+ uint32_t vpp_vsc_start_phase_step;
+ uint32_t vpp_vsc_ini_phase;
+ uint32_t vpp_vsc_phase_ctrl;
+ uint32_t vpp_hsc_phase_ctrl;
+ uint32_t vpp_blend_vd2_h_start_end;
+ uint32_t vpp_blend_vd2_v_start_end;
} viu;
struct {
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index df7247cd93f9..d8c5cc34e22e 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -594,17 +594,7 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
vclk_freq, venc_freq, hdmi_freq);
- /* Finally filter by configurable vclk frequencies for VIC modes */
- switch (vclk_freq) {
- case 54000:
- case 74250:
- case 148500:
- case 297000:
- case 594000:
- return MODE_OK;
- }
-
- return MODE_CLOCK_RANGE;
+ return meson_vclk_vic_supported_freq(vclk_freq);
}
/* Encoder */
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
new file mode 100644
index 000000000000..691a9fd16b36
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/bitfield.h>
+#include <linux/platform_device.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_rect.h>
+
+#include "meson_overlay.h"
+#include "meson_vpp.h"
+#include "meson_viu.h"
+#include "meson_canvas.h"
+#include "meson_registers.h"
+
+/* VD1_IF0_GEN_REG */
+#define VD_URGENT_CHROMA BIT(28)
+#define VD_URGENT_LUMA BIT(27)
+#define VD_HOLD_LINES(lines) FIELD_PREP(GENMASK(24, 19), lines)
+#define VD_DEMUX_MODE_RGB BIT(16)
+#define VD_BYTES_PER_PIXEL(val) FIELD_PREP(GENMASK(15, 14), val)
+#define VD_CHRO_RPT_LASTL_CTRL BIT(6)
+#define VD_LITTLE_ENDIAN BIT(4)
+#define VD_SEPARATE_EN BIT(1)
+#define VD_ENABLE BIT(0)
+
+/* VD1_IF0_CANVAS0 */
+#define CANVAS_ADDR2(addr) FIELD_PREP(GENMASK(23, 16), addr)
+#define CANVAS_ADDR1(addr) FIELD_PREP(GENMASK(15, 8), addr)
+#define CANVAS_ADDR0(addr) FIELD_PREP(GENMASK(7, 0), addr)
+
+/* VD1_IF0_LUMA_X0 VD1_IF0_CHROMA_X0 */
+#define VD_X_START(value) FIELD_PREP(GENMASK(14, 0), value)
+#define VD_X_END(value) FIELD_PREP(GENMASK(30, 16), value)
+
+/* VD1_IF0_LUMA_Y0 VD1_IF0_CHROMA_Y0 */
+#define VD_Y_START(value) FIELD_PREP(GENMASK(12, 0), value)
+#define VD_Y_END(value) FIELD_PREP(GENMASK(28, 16), value)
+
+/* VD1_IF0_GEN_REG2 */
+#define VD_COLOR_MAP(value) FIELD_PREP(GENMASK(1, 0), value)
+
+/* VIU_VD1_FMT_CTRL */
+#define VD_HORZ_Y_C_RATIO(value) FIELD_PREP(GENMASK(22, 21), value)
+#define VD_HORZ_FMT_EN BIT(20)
+#define VD_VERT_RPT_LINE0 BIT(16)
+#define VD_VERT_INITIAL_PHASE(value) FIELD_PREP(GENMASK(11, 8), value)
+#define VD_VERT_PHASE_STEP(value) FIELD_PREP(GENMASK(7, 1), value)
+#define VD_VERT_FMT_EN BIT(0)
+
+/* VPP_POSTBLEND_VD1_H_START_END */
+#define VD_H_END(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD_H_START(value) FIELD_PREP(GENMASK(27, 16), value)
+
+/* VPP_POSTBLEND_VD1_V_START_END */
+#define VD_V_END(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD_V_START(value) FIELD_PREP(GENMASK(27, 16), value)
+
+/* VPP_BLEND_VD2_V_START_END */
+#define VD2_V_END(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD2_V_START(value) FIELD_PREP(GENMASK(27, 16), value)
+
+/* VIU_VD1_FMT_W */
+#define VD_V_WIDTH(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD_H_WIDTH(value) FIELD_PREP(GENMASK(27, 16), value)
+
+/* VPP_HSC_REGION12_STARTP VPP_HSC_REGION34_STARTP */
+#define VD_REGION24_START(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD_REGION13_END(value) FIELD_PREP(GENMASK(27, 16), value)
+
+struct meson_overlay {
+ struct drm_plane base;
+ struct meson_drm *priv;
+};
+#define to_meson_overlay(x) container_of(x, struct meson_overlay, base)
+
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
+static int meson_overlay_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (!state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ FRAC_16_16(1, 5),
+ FRAC_16_16(5, 1),
+ true, true);
+}
+
+/* Takes a fixed 16.16 number and converts it to integer. */
+static inline int64_t fixed16_to_int(int64_t value)
+{
+ return value >> 16;
+}
+
+static const uint8_t skip_tab[6] = {
+ 0x24, 0x04, 0x68, 0x48, 0x28, 0x08,
+};
+
+static void meson_overlay_get_vertical_phase(unsigned int ratio_y, int *phase,
+ int *repeat, bool interlace)
+{
+ int offset_in = 0;
+ int offset_out = 0;
+ int repeat_skip = 0;
+
+ if (!interlace && ratio_y > (1 << 18))
+ offset_out = (1 * ratio_y) >> 10;
+
+ while ((offset_in + (4 << 8)) <= offset_out) {
+ repeat_skip++;
+ offset_in += 4 << 8;
+ }
+
+ *phase = (offset_out - offset_in) >> 2;
+
+ if (*phase > 0x100)
+ repeat_skip++;
+
+ *phase = *phase & 0xff;
+
+ if (repeat_skip > 5)
+ repeat_skip = 5;
+
+ *repeat = skip_tab[repeat_skip];
+}
+
+static void meson_overlay_setup_scaler_params(struct meson_drm *priv,
+ struct drm_plane *plane,
+ bool interlace_mode)
+{
+ struct drm_crtc_state *crtc_state = priv->crtc->state;
+ int video_top, video_left, video_width, video_height;
+ struct drm_plane_state *state = plane->state;
+ unsigned int vd_start_lines, vd_end_lines;
+ unsigned int hd_start_lines, hd_end_lines;
+ unsigned int crtc_height, crtc_width;
+ unsigned int vsc_startp, vsc_endp;
+ unsigned int hsc_startp, hsc_endp;
+ unsigned int crop_top, crop_left;
+ int vphase, vphase_repeat_skip;
+ unsigned int ratio_x, ratio_y;
+ int temp_height, temp_width;
+ unsigned int w_in, h_in;
+ int temp, start, end;
+
+ if (!crtc_state) {
+ DRM_ERROR("Invalid crtc_state\n");
+ return;
+ }
+
+ crtc_height = crtc_state->mode.vdisplay;
+ crtc_width = crtc_state->mode.hdisplay;
+
+ w_in = fixed16_to_int(state->src_w);
+ h_in = fixed16_to_int(state->src_h);
+ crop_top = fixed16_to_int(state->src_x);
+ crop_left = fixed16_to_int(state->src_x);
+
+ video_top = state->crtc_y;
+ video_left = state->crtc_x;
+ video_width = state->crtc_w;
+ video_height = state->crtc_h;
+
+ DRM_DEBUG("crtc_width %d crtc_height %d interlace %d\n",
+ crtc_width, crtc_height, interlace_mode);
+ DRM_DEBUG("w_in %d h_in %d crop_top %d crop_left %d\n",
+ w_in, h_in, crop_top, crop_left);
+ DRM_DEBUG("video top %d left %d width %d height %d\n",
+ video_top, video_left, video_width, video_height);
+
+ ratio_x = (w_in << 18) / video_width;
+ ratio_y = (h_in << 18) / video_height;
+
+ if (ratio_x * video_width < (w_in << 18))
+ ratio_x++;
+
+ DRM_DEBUG("ratio x 0x%x y 0x%x\n", ratio_x, ratio_y);
+
+ meson_overlay_get_vertical_phase(ratio_y, &vphase, &vphase_repeat_skip,
+ interlace_mode);
+
+ DRM_DEBUG("vphase 0x%x skip %d\n", vphase, vphase_repeat_skip);
+
+ /* Vertical */
+
+ start = video_top + video_height / 2 - ((h_in << 17) / ratio_y);
+ end = (h_in << 18) / ratio_y + start - 1;
+
+ if (video_top < 0 && start < 0)
+ vd_start_lines = (-(start) * ratio_y) >> 18;
+ else if (start < video_top)
+ vd_start_lines = ((video_top - start) * ratio_y) >> 18;
+ else
+ vd_start_lines = 0;
+
+ if (video_top < 0)
+ temp_height = min_t(unsigned int,
+ video_top + video_height - 1,
+ crtc_height - 1);
+ else
+ temp_height = min_t(unsigned int,
+ video_top + video_height - 1,
+ crtc_height - 1) - video_top + 1;
+
+ temp = vd_start_lines + (temp_height * ratio_y >> 18);
+ vd_end_lines = (temp <= (h_in - 1)) ? temp : (h_in - 1);
+
+ vd_start_lines += crop_left;
+ vd_end_lines += crop_left;
+
+ /*
+ * TOFIX: Input frames are handled and scaled like progressive frames,
+ * proper handling of interlaced field input frames need to be figured
+ * out using the proper framebuffer flags set by userspace.
+ */
+ if (interlace_mode) {
+ start >>= 1;
+ end >>= 1;
+ }
+
+ vsc_startp = max_t(int, start,
+ max_t(int, 0, video_top));
+ vsc_endp = min_t(int, end,
+ min_t(int, crtc_height - 1,
+ video_top + video_height - 1));
+
+ DRM_DEBUG("vsc startp %d endp %d start_lines %d end_lines %d\n",
+ vsc_startp, vsc_endp, vd_start_lines, vd_end_lines);
+
+ /* Horizontal */
+
+ start = video_left + video_width / 2 - ((w_in << 17) / ratio_x);
+ end = (w_in << 18) / ratio_x + start - 1;
+
+ if (video_left < 0 && start < 0)
+ hd_start_lines = (-(start) * ratio_x) >> 18;
+ else if (start < video_left)
+ hd_start_lines = ((video_left - start) * ratio_x) >> 18;
+ else
+ hd_start_lines = 0;
+
+ if (video_left < 0)
+ temp_width = min_t(unsigned int,
+ video_left + video_width - 1,
+ crtc_width - 1);
+ else
+ temp_width = min_t(unsigned int,
+ video_left + video_width - 1,
+ crtc_width - 1) - video_left + 1;
+
+ temp = hd_start_lines + (temp_width * ratio_x >> 18);
+ hd_end_lines = (temp <= (w_in - 1)) ? temp : (w_in - 1);
+
+ priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1;
+ hsc_startp = max_t(int, start, max_t(int, 0, video_left));
+ hsc_endp = min_t(int, end, min_t(int, crtc_width - 1,
+ video_left + video_width - 1));
+
+ hd_start_lines += crop_top;
+ hd_end_lines += crop_top;
+
+ DRM_DEBUG("hsc startp %d endp %d start_lines %d end_lines %d\n",
+ hsc_startp, hsc_endp, hd_start_lines, hd_end_lines);
+
+ priv->viu.vpp_vsc_start_phase_step = ratio_y << 6;
+
+ priv->viu.vpp_vsc_ini_phase = vphase << 8;
+ priv->viu.vpp_vsc_phase_ctrl = (1 << 13) | (4 << 8) |
+ vphase_repeat_skip;
+
+ priv->viu.vd1_if0_luma_x0 = VD_X_START(hd_start_lines) |
+ VD_X_END(hd_end_lines);
+ priv->viu.vd1_if0_chroma_x0 = VD_X_START(hd_start_lines >> 1) |
+ VD_X_END(hd_end_lines >> 1);
+
+ priv->viu.viu_vd1_fmt_w =
+ VD_H_WIDTH(hd_end_lines - hd_start_lines + 1) |
+ VD_V_WIDTH(hd_end_lines/2 - hd_start_lines/2 + 1);
+
+ priv->viu.vd1_if0_luma_y0 = VD_Y_START(vd_start_lines) |
+ VD_Y_END(vd_end_lines);
+
+ priv->viu.vd1_if0_chroma_y0 = VD_Y_START(vd_start_lines >> 1) |
+ VD_Y_END(vd_end_lines >> 1);
+
+ priv->viu.vpp_pic_in_height = h_in;
+
+ priv->viu.vpp_postblend_vd1_h_start_end = VD_H_START(hsc_startp) |
+ VD_H_END(hsc_endp);
+ priv->viu.vpp_blend_vd2_h_start_end = VD_H_START(hd_start_lines) |
+ VD_H_END(hd_end_lines);
+ priv->viu.vpp_hsc_region12_startp = VD_REGION13_END(0) |
+ VD_REGION24_START(hsc_startp);
+ priv->viu.vpp_hsc_region34_startp =
+ VD_REGION13_END(hsc_startp) |
+ VD_REGION24_START(hsc_endp - hsc_startp);
+ priv->viu.vpp_hsc_region4_endp = hsc_endp - hsc_startp;
+ priv->viu.vpp_hsc_start_phase_step = ratio_x << 6;
+ priv->viu.vpp_hsc_region1_phase_slope = 0;
+ priv->viu.vpp_hsc_region3_phase_slope = 0;
+ priv->viu.vpp_hsc_phase_ctrl = (1 << 21) | (4 << 16);
+
+ priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1;
+ priv->viu.vpp_preblend_h_size = hd_end_lines - hd_start_lines + 1;
+
+ priv->viu.vpp_postblend_vd1_v_start_end = VD_V_START(vsc_startp) |
+ VD_V_END(vsc_endp);
+ priv->viu.vpp_blend_vd2_v_start_end =
+ VD2_V_START((vd_end_lines + 1) >> 1) |
+ VD2_V_END(vd_end_lines);
+
+ priv->viu.vpp_vsc_region12_startp = 0;
+ priv->viu.vpp_vsc_region34_startp =
+ VD_REGION13_END(vsc_endp - vsc_startp) |
+ VD_REGION24_START(vsc_endp - vsc_startp);
+ priv->viu.vpp_vsc_region4_endp = vsc_endp - vsc_startp;
+ priv->viu.vpp_vsc_start_phase_step = ratio_y << 6;
+}
+
+static void meson_overlay_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct meson_overlay *meson_overlay = to_meson_overlay(plane);
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ struct meson_drm *priv = meson_overlay->priv;
+ struct drm_gem_cma_object *gem;
+ unsigned long flags;
+ bool interlace_mode;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Fallback is canvas provider is not available */
+ if (!priv->canvas) {
+ priv->canvas_id_vd1_0 = MESON_CANVAS_ID_VD1_0;
+ priv->canvas_id_vd1_1 = MESON_CANVAS_ID_VD1_1;
+ priv->canvas_id_vd1_2 = MESON_CANVAS_ID_VD1_2;
+ }
+
+ interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
+
+ spin_lock_irqsave(&priv->drm->event_lock, flags);
+
+ priv->viu.vd1_if0_gen_reg = VD_URGENT_CHROMA |
+ VD_URGENT_LUMA |
+ VD_HOLD_LINES(9) |
+ VD_CHRO_RPT_LASTL_CTRL |
+ VD_ENABLE;
+
+ /* Setup scaler params */
+ meson_overlay_setup_scaler_params(priv, plane, interlace_mode);
+
+ priv->viu.vd1_if0_repeat_loop = 0;
+ priv->viu.vd1_if0_luma0_rpt_pat = interlace_mode ? 8 : 0;
+ priv->viu.vd1_if0_chroma0_rpt_pat = interlace_mode ? 8 : 0;
+ priv->viu.vd1_range_map_y = 0;
+ priv->viu.vd1_range_map_cb = 0;
+ priv->viu.vd1_range_map_cr = 0;
+
+ /* Default values for RGB888/YUV444 */
+ priv->viu.vd1_if0_gen_reg2 = 0;
+ priv->viu.viu_vd1_fmt_ctrl = 0;
+
+ switch (fb->format->format) {
+ /* TOFIX DRM_FORMAT_RGB888 should be supported */
+ case DRM_FORMAT_YUYV:
+ priv->viu.vd1_if0_gen_reg |= VD_BYTES_PER_PIXEL(1);
+ priv->viu.vd1_if0_canvas0 =
+ CANVAS_ADDR2(priv->canvas_id_vd1_0) |
+ CANVAS_ADDR1(priv->canvas_id_vd1_0) |
+ CANVAS_ADDR0(priv->canvas_id_vd1_0);
+ priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(16) | /* /2 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN;
+ priv->viu.vd1_if0_canvas0 =
+ CANVAS_ADDR2(priv->canvas_id_vd1_1) |
+ CANVAS_ADDR1(priv->canvas_id_vd1_1) |
+ CANVAS_ADDR0(priv->canvas_id_vd1_0);
+ if (fb->format->format == DRM_FORMAT_NV12)
+ priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(1);
+ else
+ priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(2);
+ priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(8) | /* /4 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YUV410:
+ priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN;
+ priv->viu.vd1_if0_canvas0 =
+ CANVAS_ADDR2(priv->canvas_id_vd1_2) |
+ CANVAS_ADDR1(priv->canvas_id_vd1_1) |
+ CANVAS_ADDR0(priv->canvas_id_vd1_0);
+ switch (fb->format->format) {
+ case DRM_FORMAT_YUV422:
+ priv->viu.viu_vd1_fmt_ctrl =
+ VD_HORZ_Y_C_RATIO(1) | /* /2 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(16) | /* /2 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_YUV420:
+ priv->viu.viu_vd1_fmt_ctrl =
+ VD_HORZ_Y_C_RATIO(1) | /* /2 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(8) | /* /4 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_YUV411:
+ priv->viu.viu_vd1_fmt_ctrl =
+ VD_HORZ_Y_C_RATIO(2) | /* /4 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(16) | /* /2 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_YUV410:
+ priv->viu.viu_vd1_fmt_ctrl =
+ VD_HORZ_Y_C_RATIO(2) | /* /4 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(8) | /* /4 */
+ VD_VERT_FMT_EN;
+ break;
+ }
+ break;
+ }
+
+ /* Update Canvas with buffer address */
+ priv->viu.vd1_planes = drm_format_num_planes(fb->format->format);
+
+ switch (priv->viu.vd1_planes) {
+ case 3:
+ gem = drm_fb_cma_get_gem_obj(fb, 2);
+ priv->viu.vd1_addr2 = gem->paddr + fb->offsets[2];
+ priv->viu.vd1_stride2 = fb->pitches[2];
+ priv->viu.vd1_height2 =
+ drm_format_plane_height(fb->height,
+ fb->format->format, 2);
+ DRM_DEBUG("plane 2 addr 0x%x stride %d height %d\n",
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2);
+ /* fallthrough */
+ case 2:
+ gem = drm_fb_cma_get_gem_obj(fb, 1);
+ priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1];
+ priv->viu.vd1_stride1 = fb->pitches[1];
+ priv->viu.vd1_height1 =
+ drm_format_plane_height(fb->height,
+ fb->format->format, 1);
+ DRM_DEBUG("plane 1 addr 0x%x stride %d height %d\n",
+ priv->viu.vd1_addr1,
+ priv->viu.vd1_stride1,
+ priv->viu.vd1_height1);
+ /* fallthrough */
+ case 1:
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0];
+ priv->viu.vd1_stride0 = fb->pitches[0];
+ priv->viu.vd1_height0 =
+ drm_format_plane_height(fb->height,
+ fb->format->format, 0);
+ DRM_DEBUG("plane 0 addr 0x%x stride %d height %d\n",
+ priv->viu.vd1_addr0,
+ priv->viu.vd1_stride0,
+ priv->viu.vd1_height0);
+ }
+
+ priv->viu.vd1_enabled = true;
+
+ spin_unlock_irqrestore(&priv->drm->event_lock, flags);
+
+ DRM_DEBUG_DRIVER("\n");
+}
+
+static void meson_overlay_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct meson_overlay *meson_overlay = to_meson_overlay(plane);
+ struct meson_drm *priv = meson_overlay->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ priv->viu.vd1_enabled = false;
+
+ /* Disable VD1 */
+ writel_bits_relaxed(VPP_VD1_POSTBLEND | VPP_VD1_PREBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+}
+
+static const struct drm_plane_helper_funcs meson_overlay_helper_funcs = {
+ .atomic_check = meson_overlay_atomic_check,
+ .atomic_disable = meson_overlay_atomic_disable,
+ .atomic_update = meson_overlay_atomic_update,
+ .prepare_fb = drm_gem_fb_prepare_fb,
+};
+
+static const struct drm_plane_funcs meson_overlay_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const uint32_t supported_drm_formats[] = {
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV411,
+ DRM_FORMAT_YUV410,
+};
+
+int meson_overlay_create(struct meson_drm *priv)
+{
+ struct meson_overlay *meson_overlay;
+ struct drm_plane *plane;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ meson_overlay = devm_kzalloc(priv->drm->dev, sizeof(*meson_overlay),
+ GFP_KERNEL);
+ if (!meson_overlay)
+ return -ENOMEM;
+
+ meson_overlay->priv = priv;
+ plane = &meson_overlay->base;
+
+ drm_universal_plane_init(priv->drm, plane, 0xFF,
+ &meson_overlay_funcs,
+ supported_drm_formats,
+ ARRAY_SIZE(supported_drm_formats),
+ NULL,
+ DRM_PLANE_TYPE_OVERLAY, "meson_overlay_plane");
+
+ drm_plane_helper_add(plane, &meson_overlay_helper_funcs);
+
+ priv->overlay_plane = plane;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_overlay.h b/drivers/gpu/drm/meson/meson_overlay.h
new file mode 100644
index 000000000000..dae24f5ac63d
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_overlay.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __MESON_OVERLAY_H
+#define __MESON_OVERLAY_H
+
+#include "meson_drv.h"
+
+int meson_overlay_create(struct meson_drm *priv);
+
+#endif /* __MESON_OVERLAY_H */
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 12c80dfcff59..6119a0224278 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/bitfield.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -31,6 +32,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_rect.h>
#include "meson_plane.h"
@@ -39,12 +41,51 @@
#include "meson_canvas.h"
#include "meson_registers.h"
+/* OSD_SCI_WH_M1 */
+#define SCI_WH_M1_W(w) FIELD_PREP(GENMASK(28, 16), w)
+#define SCI_WH_M1_H(h) FIELD_PREP(GENMASK(12, 0), h)
+
+/* OSD_SCO_H_START_END */
+/* OSD_SCO_V_START_END */
+#define SCO_HV_START(start) FIELD_PREP(GENMASK(27, 16), start)
+#define SCO_HV_END(end) FIELD_PREP(GENMASK(11, 0), end)
+
+/* OSD_SC_CTRL0 */
+#define SC_CTRL0_PATH_EN BIT(3)
+#define SC_CTRL0_SEL_OSD1 BIT(2)
+
+/* OSD_VSC_CTRL0 */
+#define VSC_BANK_LEN(value) FIELD_PREP(GENMASK(2, 0), value)
+#define VSC_TOP_INI_RCV_NUM(value) FIELD_PREP(GENMASK(6, 3), value)
+#define VSC_TOP_RPT_L0_NUM(value) FIELD_PREP(GENMASK(9, 8), value)
+#define VSC_BOT_INI_RCV_NUM(value) FIELD_PREP(GENMASK(14, 11), value)
+#define VSC_BOT_RPT_L0_NUM(value) FIELD_PREP(GENMASK(17, 16), value)
+#define VSC_PROG_INTERLACE BIT(23)
+#define VSC_VERTICAL_SCALER_EN BIT(24)
+
+/* OSD_VSC_INI_PHASE */
+#define VSC_INI_PHASE_BOT(bottom) FIELD_PREP(GENMASK(31, 16), bottom)
+#define VSC_INI_PHASE_TOP(top) FIELD_PREP(GENMASK(15, 0), top)
+
+/* OSD_HSC_CTRL0 */
+#define HSC_BANK_LENGTH(value) FIELD_PREP(GENMASK(2, 0), value)
+#define HSC_INI_RCV_NUM0(value) FIELD_PREP(GENMASK(6, 3), value)
+#define HSC_RPT_P0_NUM0(value) FIELD_PREP(GENMASK(9, 8), value)
+#define HSC_HORIZ_SCALER_EN BIT(22)
+
+/* VPP_OSD_VSC_PHASE_STEP */
+/* VPP_OSD_HSC_PHASE_STEP */
+#define SC_PHASE_STEP(value) FIELD_PREP(GENMASK(27, 0), value)
+
struct meson_plane {
struct drm_plane base;
struct meson_drm *priv;
+ bool enabled;
};
#define to_meson_plane(x) container_of(x, struct meson_plane, base)
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
static int meson_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -57,10 +98,15 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
+ /*
+ * Only allow :
+ * - Upscaling up to 5x, vertical and horizontal
+ * - Final coordinates must match crtc size
+ */
return drm_atomic_helper_check_plane_state(state, crtc_state,
+ FRAC_16_16(1, 5),
DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true);
+ false, true);
}
/* Takes a fixed 16.16 number and converts it to integer. */
@@ -74,22 +120,20 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
{
struct meson_plane *meson_plane = to_meson_plane(plane);
struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_rect dest = drm_plane_state_dest(state);
struct meson_drm *priv = meson_plane->priv;
+ struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *gem;
- struct drm_rect src = {
- .x1 = (state->src_x),
- .y1 = (state->src_y),
- .x2 = (state->src_x + state->src_w),
- .y2 = (state->src_y + state->src_h),
- };
- struct drm_rect dest = {
- .x1 = state->crtc_x,
- .y1 = state->crtc_y,
- .x2 = state->crtc_x + state->crtc_w,
- .y2 = state->crtc_y + state->crtc_h,
- };
unsigned long flags;
+ int vsc_ini_rcv_num, vsc_ini_rpt_p0_num;
+ int vsc_bot_rcv_num, vsc_bot_rpt_p0_num;
+ int hsc_ini_rcv_num, hsc_ini_rpt_p0_num;
+ int hf_phase_step, vf_phase_step;
+ int src_w, src_h, dst_w, dst_h;
+ int bot_ini_phase;
+ int hf_bank_len;
+ int vf_bank_len;
+ u8 canvas_id_osd1;
/*
* Update Coordinates
@@ -104,8 +148,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
(0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
+ if (priv->canvas)
+ canvas_id_osd1 = priv->canvas_id_osd1;
+ else
+ canvas_id_osd1 = MESON_CANVAS_ID_OSD1;
+
/* Set up BLK0 to point to the right canvas */
- priv->viu.osd1_blk0_cfg[0] = ((MESON_CANVAS_ID_OSD1 << OSD_CANVAS_SEL) |
+ priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) |
OSD_ENDIANNESS_LE);
/* On GXBB, Use the old non-HDR RGB2YUV converter */
@@ -137,23 +186,115 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
break;
};
+ /* Default scaler parameters */
+ vsc_bot_rcv_num = 0;
+ vsc_bot_rpt_p0_num = 0;
+ hf_bank_len = 4;
+ vf_bank_len = 4;
+
if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
- priv->viu.osd1_interlace = true;
+ vsc_bot_rcv_num = 6;
+ vsc_bot_rpt_p0_num = 2;
+ }
+
+ hsc_ini_rcv_num = hf_bank_len;
+ vsc_ini_rcv_num = vf_bank_len;
+ hsc_ini_rpt_p0_num = (hf_bank_len / 2) - 1;
+ vsc_ini_rpt_p0_num = (vf_bank_len / 2) - 1;
+
+ src_w = fixed16_to_int(state->src_w);
+ src_h = fixed16_to_int(state->src_h);
+ dst_w = state->crtc_w;
+ dst_h = state->crtc_h;
+ /*
+ * When the output is interlaced, the OSD must switch between
+ * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0
+ * at each vsync.
+ * But the vertical scaler can provide such funtionnality if
+ * is configured for 2:1 scaling with interlace options enabled.
+ */
+ if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
dest.y1 /= 2;
dest.y2 /= 2;
- } else
- priv->viu.osd1_interlace = false;
+ dst_h /= 2;
+ }
+
+ hf_phase_step = ((src_w << 18) / dst_w) << 6;
+ vf_phase_step = (src_h << 20) / dst_h;
+
+ if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ bot_ini_phase = ((vf_phase_step / 2) >> 4);
+ else
+ bot_ini_phase = 0;
+
+ vf_phase_step = (vf_phase_step << 4);
+
+ /* In interlaced mode, scaler is always active */
+ if (src_h != dst_h || src_w != dst_w) {
+ priv->viu.osd_sc_i_wh_m1 = SCI_WH_M1_W(src_w - 1) |
+ SCI_WH_M1_H(src_h - 1);
+ priv->viu.osd_sc_o_h_start_end = SCO_HV_START(dest.x1) |
+ SCO_HV_END(dest.x2 - 1);
+ priv->viu.osd_sc_o_v_start_end = SCO_HV_START(dest.y1) |
+ SCO_HV_END(dest.y2 - 1);
+ /* Enable OSD Scaler */
+ priv->viu.osd_sc_ctrl0 = SC_CTRL0_PATH_EN | SC_CTRL0_SEL_OSD1;
+ } else {
+ priv->viu.osd_sc_i_wh_m1 = 0;
+ priv->viu.osd_sc_o_h_start_end = 0;
+ priv->viu.osd_sc_o_v_start_end = 0;
+ priv->viu.osd_sc_ctrl0 = 0;
+ }
+
+ /* In interlaced mode, vertical scaler is always active */
+ if (src_h != dst_h) {
+ priv->viu.osd_sc_v_ctrl0 =
+ VSC_BANK_LEN(vf_bank_len) |
+ VSC_TOP_INI_RCV_NUM(vsc_ini_rcv_num) |
+ VSC_TOP_RPT_L0_NUM(vsc_ini_rpt_p0_num) |
+ VSC_VERTICAL_SCALER_EN;
+
+ if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ priv->viu.osd_sc_v_ctrl0 |=
+ VSC_BOT_INI_RCV_NUM(vsc_bot_rcv_num) |
+ VSC_BOT_RPT_L0_NUM(vsc_bot_rpt_p0_num) |
+ VSC_PROG_INTERLACE;
+
+ priv->viu.osd_sc_v_phase_step = SC_PHASE_STEP(vf_phase_step);
+ priv->viu.osd_sc_v_ini_phase = VSC_INI_PHASE_BOT(bot_ini_phase);
+ } else {
+ priv->viu.osd_sc_v_ctrl0 = 0;
+ priv->viu.osd_sc_v_phase_step = 0;
+ priv->viu.osd_sc_v_ini_phase = 0;
+ }
+
+ /* Horizontal scaler is only used if width does not match */
+ if (src_w != dst_w) {
+ priv->viu.osd_sc_h_ctrl0 =
+ HSC_BANK_LENGTH(hf_bank_len) |
+ HSC_INI_RCV_NUM0(hsc_ini_rcv_num) |
+ HSC_RPT_P0_NUM0(hsc_ini_rpt_p0_num) |
+ HSC_HORIZ_SCALER_EN;
+ priv->viu.osd_sc_h_phase_step = SC_PHASE_STEP(hf_phase_step);
+ priv->viu.osd_sc_h_ini_phase = 0;
+ } else {
+ priv->viu.osd_sc_h_ctrl0 = 0;
+ priv->viu.osd_sc_h_phase_step = 0;
+ priv->viu.osd_sc_h_ini_phase = 0;
+ }
/*
* The format of these registers is (x2 << 16 | x1),
* where x2 is exclusive.
* e.g. +30x1920 would be (1919 << 16) | 30
*/
- priv->viu.osd1_blk0_cfg[1] = ((fixed16_to_int(src.x2) - 1) << 16) |
- fixed16_to_int(src.x1);
- priv->viu.osd1_blk0_cfg[2] = ((fixed16_to_int(src.y2) - 1) << 16) |
- fixed16_to_int(src.y1);
+ priv->viu.osd1_blk0_cfg[1] =
+ ((fixed16_to_int(state->src.x2) - 1) << 16) |
+ fixed16_to_int(state->src.x1);
+ priv->viu.osd1_blk0_cfg[2] =
+ ((fixed16_to_int(state->src.y2) - 1) << 16) |
+ fixed16_to_int(state->src.y1);
priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
@@ -164,6 +305,15 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_stride = fb->pitches[0];
priv->viu.osd1_height = fb->height;
+ if (!meson_plane->enabled) {
+ /* Reset OSD1 before enabling it on GXL+ SoCs */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_viu_osd1_reset(priv);
+
+ meson_plane->enabled = true;
+ }
+
spin_unlock_irqrestore(&priv->drm->event_lock, flags);
}
@@ -177,12 +327,15 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
priv->io_base + _REG(VPP_MISC));
+ meson_plane->enabled = false;
+
}
static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
.atomic_check = meson_plane_atomic_check,
.atomic_disable = meson_plane_atomic_disable,
.atomic_update = meson_plane_atomic_update,
+ .prepare_fb = drm_gem_fb_prepare_fb,
};
static const struct drm_plane_funcs meson_plane_funcs = {
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index bca87143e548..5c7e02c703bc 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -286,6 +286,7 @@
#define VIU_OSD1_MATRIX_COEF22_30 0x1a9d
#define VIU_OSD1_MATRIX_COEF31_32 0x1a9e
#define VIU_OSD1_MATRIX_COEF40_41 0x1a9f
+#define VD1_IF0_GEN_REG3 0x1aa7
#define VIU_OSD1_EOTF_CTL 0x1ad4
#define VIU_OSD1_EOTF_COEF00_01 0x1ad5
#define VIU_OSD1_EOTF_COEF02_10 0x1ad6
@@ -297,6 +298,7 @@
#define VIU_OSD1_OETF_CTL 0x1adc
#define VIU_OSD1_OETF_LUT_ADDR_PORT 0x1add
#define VIU_OSD1_OETF_LUT_DATA_PORT 0x1ade
+#define AFBC_ENABLE 0x1ae0
/* vpp */
#define VPP_DUMMY_DATA 0x1d00
@@ -349,6 +351,7 @@
#define VPP_VD2_PREBLEND BIT(15)
#define VPP_OSD1_PREBLEND BIT(16)
#define VPP_OSD2_PREBLEND BIT(17)
+#define VPP_COLOR_MNG_ENABLE BIT(28)
#define VPP_OFIFO_SIZE 0x1d27
#define VPP_FIFO_STATUS 0x1d28
#define VPP_SMOKE_CTRL 0x1d29
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index ae5473257f72..f6ba35a405f8 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -117,6 +117,8 @@
#define HDMI_PLL_RESET BIT(28)
#define HDMI_PLL_LOCK BIT(31)
+#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST(_freq * 1000, 1001)
+
/* VID PLL Dividers */
enum {
VID_PLL_DIV_1 = 0,
@@ -323,7 +325,7 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
enum {
/* PLL O1 O2 O3 VP DV EN TX */
/* 4320 /4 /4 /1 /5 /1 => /2 /2 */
- MESON_VCLK_HDMI_ENCI_54000 = 1,
+ MESON_VCLK_HDMI_ENCI_54000 = 0,
/* 4320 /4 /4 /1 /5 /1 => /1 /2 */
MESON_VCLK_HDMI_DDR_54000,
/* 2970 /4 /1 /1 /5 /1 => /1 /2 */
@@ -339,6 +341,7 @@ enum {
};
struct meson_vclk_params {
+ unsigned int pixel_freq;
unsigned int pll_base_freq;
unsigned int pll_od1;
unsigned int pll_od2;
@@ -347,6 +350,7 @@ struct meson_vclk_params {
unsigned int vclk_div;
} params[] = {
[MESON_VCLK_HDMI_ENCI_54000] = {
+ .pixel_freq = 54000,
.pll_base_freq = 4320000,
.pll_od1 = 4,
.pll_od2 = 4,
@@ -355,6 +359,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_54000] = {
+ .pixel_freq = 54000,
.pll_base_freq = 4320000,
.pll_od1 = 4,
.pll_od2 = 4,
@@ -363,6 +368,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_148500] = {
+ .pixel_freq = 148500,
.pll_base_freq = 2970000,
.pll_od1 = 4,
.pll_od2 = 1,
@@ -371,6 +377,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_74250] = {
+ .pixel_freq = 74250,
.pll_base_freq = 2970000,
.pll_od1 = 2,
.pll_od2 = 2,
@@ -379,6 +386,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_148500] = {
+ .pixel_freq = 148500,
.pll_base_freq = 2970000,
.pll_od1 = 1,
.pll_od2 = 2,
@@ -387,6 +395,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_297000] = {
+ .pixel_freq = 297000,
.pll_base_freq = 2970000,
.pll_od1 = 1,
.pll_od2 = 1,
@@ -395,6 +404,7 @@ struct meson_vclk_params {
.vclk_div = 2,
},
[MESON_VCLK_HDMI_594000] = {
+ .pixel_freq = 594000,
.pll_base_freq = 5940000,
.pll_od1 = 1,
.pll_od2 = 1,
@@ -402,6 +412,7 @@ struct meson_vclk_params {
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
+ { /* sentinel */ },
};
static inline unsigned int pll_od_to_reg(unsigned int od)
@@ -626,12 +637,37 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
pll_freq);
}
+enum drm_mode_status
+meson_vclk_vic_supported_freq(unsigned int freq)
+{
+ int i;
+
+ DRM_DEBUG_DRIVER("freq = %d\n", freq);
+
+ for (i = 0 ; params[i].pixel_freq ; ++i) {
+ DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
+ i, params[i].pixel_freq,
+ FREQ_1000_1001(params[i].pixel_freq));
+ /* Match strict frequency */
+ if (freq == params[i].pixel_freq)
+ return MODE_OK;
+ /* Match 1000/1001 variant */
+ if (freq == FREQ_1000_1001(params[i].pixel_freq))
+ return MODE_OK;
+ }
+
+ return MODE_CLOCK_RANGE;
+}
+EXPORT_SYMBOL_GPL(meson_vclk_vic_supported_freq);
+
static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
unsigned int od1, unsigned int od2, unsigned int od3,
unsigned int vid_pll_div, unsigned int vclk_div,
unsigned int hdmi_tx_div, unsigned int venc_div,
- bool hdmi_use_enci)
+ bool hdmi_use_enci, bool vic_alternate_clock)
{
+ unsigned int m = 0, frac = 0;
+
/* Set HDMI-TX sys clock */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
CTS_HDMI_SYS_SEL_MASK, 0);
@@ -646,34 +682,38 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
switch (pll_base_freq) {
case 2970000:
- meson_hdmi_pll_set_params(priv, 0x3d, 0xe00,
- od1, od2, od3);
+ m = 0x3d;
+ frac = vic_alternate_clock ? 0xd02 : 0xe00;
break;
case 4320000:
- meson_hdmi_pll_set_params(priv, 0x5a, 0,
- od1, od2, od3);
+ m = vic_alternate_clock ? 0x59 : 0x5a;
+ frac = vic_alternate_clock ? 0xe8f : 0;
break;
case 5940000:
- meson_hdmi_pll_set_params(priv, 0x7b, 0xc00,
- od1, od2, od3);
+ m = 0x7b;
+ frac = vic_alternate_clock ? 0xa05 : 0xc00;
break;
}
+
+ meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
switch (pll_base_freq) {
case 2970000:
- meson_hdmi_pll_set_params(priv, 0x7b, 0x300,
- od1, od2, od3);
+ m = 0x7b;
+ frac = vic_alternate_clock ? 0x281 : 0x300;
break;
case 4320000:
- meson_hdmi_pll_set_params(priv, 0xb4, 0,
- od1, od2, od3);
+ m = vic_alternate_clock ? 0xb3 : 0xb4;
+ frac = vic_alternate_clock ? 0x347 : 0;
break;
case 5940000:
- meson_hdmi_pll_set_params(priv, 0xf7, 0x200,
- od1, od2, od3);
+ m = 0xf7;
+ frac = vic_alternate_clock ? 0x102 : 0x200;
break;
}
+
+ meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
}
/* Setup vid_pll divider */
@@ -826,6 +866,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
unsigned int vclk_freq, unsigned int venc_freq,
unsigned int dac_freq, bool hdmi_use_enci)
{
+ bool vic_alternate_clock = false;
unsigned int freq;
unsigned int hdmi_tx_div;
unsigned int venc_div;
@@ -843,7 +884,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
* - encp encoder
*/
meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0,
- VID_PLL_DIV_5, 2, 1, 1, false);
+ VID_PLL_DIV_5, 2, 1, 1, false, false);
return;
}
@@ -863,31 +904,35 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
return;
}
- switch (vclk_freq) {
- case 54000:
- if (hdmi_use_enci)
- freq = MESON_VCLK_HDMI_ENCI_54000;
- else
- freq = MESON_VCLK_HDMI_DDR_54000;
- break;
- case 74250:
- freq = MESON_VCLK_HDMI_74250;
- break;
- case 148500:
- if (dac_freq != 148500)
- freq = MESON_VCLK_HDMI_DDR_148500;
- else
- freq = MESON_VCLK_HDMI_148500;
- break;
- case 297000:
- freq = MESON_VCLK_HDMI_297000;
- break;
- case 594000:
- freq = MESON_VCLK_HDMI_594000;
- break;
- default:
- pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
- vclk_freq);
+ for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
+ if (vclk_freq == params[freq].pixel_freq ||
+ vclk_freq == FREQ_1000_1001(params[freq].pixel_freq)) {
+ if (vclk_freq != params[freq].pixel_freq)
+ vic_alternate_clock = true;
+ else
+ vic_alternate_clock = false;
+
+ if (freq == MESON_VCLK_HDMI_ENCI_54000 &&
+ !hdmi_use_enci)
+ continue;
+
+ if (freq == MESON_VCLK_HDMI_DDR_54000 &&
+ hdmi_use_enci)
+ continue;
+
+ if (freq == MESON_VCLK_HDMI_DDR_148500 &&
+ dac_freq == vclk_freq)
+ continue;
+
+ if (freq == MESON_VCLK_HDMI_148500 &&
+ dac_freq != vclk_freq)
+ continue;
+ break;
+ }
+ }
+
+ if (!params[freq].pixel_freq) {
+ pr_err("Fatal Error, invalid HDMI vclk freq %d\n", vclk_freq);
return;
}
@@ -895,6 +940,6 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
params[freq].pll_od1, params[freq].pll_od2,
params[freq].pll_od3, params[freq].vid_pll_div,
params[freq].vclk_div, hdmi_tx_div, venc_div,
- hdmi_use_enci);
+ hdmi_use_enci, vic_alternate_clock);
}
EXPORT_SYMBOL_GPL(meson_vclk_setup);
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index 869fa3a3073e..4bd8752da02a 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -32,6 +32,8 @@ enum {
enum drm_mode_status
meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
+enum drm_mode_status
+meson_vclk_vic_supported_freq(unsigned int freq);
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
unsigned int vclk_freq, unsigned int venc_freq,
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 514245e69b38..e95e0e7a7fa1 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -697,6 +697,132 @@ union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = {
},
};
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p24 = {
+ .encp = {
+ .dvi_settings = 0x1,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x8,
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ .video_filt_ctrl = 0x1000,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 140,
+ .yfp2_htime = 140+3840,
+ .max_pxcnt = 3840+1660-1,
+ .hspuls_begin = 2156+1920,
+ .hspuls_end = 44,
+ .hspuls_switch = 44,
+ .vspuls_begin = 140,
+ .vspuls_end = 2059+1920,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 148,
+ .havon_end = 3987,
+ .vavon_bline = 89,
+ .vavon_eline = 2248,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 44,
+ .hso_end = 2156+1920,
+ .vso_begin = 2100+1920,
+ .vso_end = 2164+1920,
+ .vso_bline = 51,
+ .vso_eline = 53,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 2249,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p25 = {
+ .encp = {
+ .dvi_settings = 0x1,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x8,
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ .video_filt_ctrl = 0x1000,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 140,
+ .yfp2_htime = 140+3840,
+ .max_pxcnt = 3840+1440-1,
+ .hspuls_begin = 2156+1920,
+ .hspuls_end = 44,
+ .hspuls_switch = 44,
+ .vspuls_begin = 140,
+ .vspuls_end = 2059+1920,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 148,
+ .havon_end = 3987,
+ .vavon_bline = 89,
+ .vavon_eline = 2248,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 44,
+ .hso_end = 2156+1920,
+ .vso_begin = 2100+1920,
+ .vso_end = 2164+1920,
+ .vso_bline = 51,
+ .vso_eline = 53,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 2249,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p30 = {
+ .encp = {
+ .dvi_settings = 0x1,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x8,
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ .video_filt_ctrl = 0x1000,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 140,
+ .yfp2_htime = 140+3840,
+ .max_pxcnt = 3840+560-1,
+ .hspuls_begin = 2156+1920,
+ .hspuls_end = 44,
+ .hspuls_switch = 44,
+ .vspuls_begin = 140,
+ .vspuls_end = 2059+1920,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 148,
+ .havon_end = 3987,
+ .vavon_bline = 89,
+ .vavon_eline = 2248,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 44,
+ .hso_end = 2156+1920,
+ .vso_begin = 2100+1920,
+ .vso_end = 2164+1920,
+ .vso_bline = 51,
+ .vso_eline = 53,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 2249,
+ },
+};
+
struct meson_hdmi_venc_vic_mode {
unsigned int vic;
union meson_hdmi_venc_mode *mode;
@@ -717,6 +843,9 @@ struct meson_hdmi_venc_vic_mode {
{ 34, &meson_hdmi_encp_mode_1080p30 },
{ 31, &meson_hdmi_encp_mode_1080p50 },
{ 16, &meson_hdmi_encp_mode_1080p60 },
+ { 93, &meson_hdmi_encp_mode_2160p24 },
+ { 94, &meson_hdmi_encp_mode_2160p25 },
+ { 95, &meson_hdmi_encp_mode_2160p30 },
{ 0, NULL}, /* sentinel */
};
@@ -854,6 +983,13 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
unsigned int sof_lines;
unsigned int vsync_lines;
+ /* Use VENCI for 480i and 576i and double HDMI pixels */
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ hdmi_repeat = true;
+ use_enci = true;
+ venc_hdmi_latency = 1;
+ }
+
if (meson_venc_hdmi_supported_vic(vic)) {
vmode = meson_venc_hdmi_get_vic_vmode(vic);
if (!vmode) {
@@ -865,13 +1001,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
} else {
meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt);
vmode = &vmode_dmt;
- }
-
- /* Use VENCI for 480i and 576i and double HDMI pixels */
- if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
- hdmi_repeat = true;
- use_enci = true;
- venc_hdmi_latency = 1;
+ use_enci = false;
}
/* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 6bcfa527c180..0ba87ff95530 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -296,6 +296,33 @@ static void meson_viu_load_matrix(struct meson_drm *priv)
true);
}
+/* VIU OSD1 Reset as workaround for GXL+ Alpha OSD Bug */
+void meson_viu_osd1_reset(struct meson_drm *priv)
+{
+ uint32_t osd1_fifo_ctrl_stat, osd1_ctrl_stat2;
+
+ /* Save these 2 registers state */
+ osd1_fifo_ctrl_stat = readl_relaxed(
+ priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
+ osd1_ctrl_stat2 = readl_relaxed(
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+
+ /* Reset OSD1 */
+ writel_bits_relaxed(BIT(0), BIT(0),
+ priv->io_base + _REG(VIU_SW_RESET));
+ writel_bits_relaxed(BIT(0), 0,
+ priv->io_base + _REG(VIU_SW_RESET));
+
+ /* Rewrite these registers state lost in the reset */
+ writel_relaxed(osd1_fifo_ctrl_stat,
+ priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
+ writel_relaxed(osd1_ctrl_stat2,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+
+ /* Reload the conversion matrix */
+ meson_viu_load_matrix(priv);
+}
+
void meson_viu_init(struct meson_drm *priv)
{
uint32_t reg;
@@ -329,6 +356,21 @@ void meson_viu_init(struct meson_drm *priv)
0xff << OSD_REPLACE_SHIFT,
priv->io_base + _REG(VIU_OSD2_CTRL_STAT2));
+ /* Disable VD1 AFBC */
+ /* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 */
+ writel_bits_relaxed(0x7 << 16, 0,
+ priv->io_base + _REG(VIU_MISC_CTRL0));
+ /* afbc vd1 set=0 */
+ writel_bits_relaxed(BIT(20), 0,
+ priv->io_base + _REG(VIU_MISC_CTRL0));
+ writel_relaxed(0, priv->io_base + _REG(AFBC_ENABLE));
+
+ writel_relaxed(0x00FF00C0,
+ priv->io_base + _REG(VD1_IF0_LUMA_FIFO_SIZE));
+ writel_relaxed(0x00FF00C0,
+ priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
+
+
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
priv->viu.osd1_interlace = false;
diff --git a/drivers/gpu/drm/meson/meson_viu.h b/drivers/gpu/drm/meson/meson_viu.h
index 073b1910bd1b..0f84bddd2ff0 100644
--- a/drivers/gpu/drm/meson/meson_viu.h
+++ b/drivers/gpu/drm/meson/meson_viu.h
@@ -59,6 +59,7 @@
#define OSD_REPLACE_EN BIT(14)
#define OSD_REPLACE_SHIFT 6
+void meson_viu_osd1_reset(struct meson_drm *priv);
void meson_viu_init(struct meson_drm *priv);
#endif /* __MESON_VIU_H */
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index 27356f81a0ab..f9efb431e953 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -51,52 +51,6 @@ void meson_vpp_setup_mux(struct meson_drm *priv, unsigned int mux)
writel(mux, priv->io_base + _REG(VPU_VIU_VENC_MUX_CTRL));
}
-/*
- * When the output is interlaced, the OSD must switch between
- * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0
- * at each vsync.
- * But the vertical scaler can provide such funtionnality if
- * is configured for 2:1 scaling with interlace options enabled.
- */
-void meson_vpp_setup_interlace_vscaler_osd1(struct meson_drm *priv,
- struct drm_rect *input)
-{
- writel_relaxed(BIT(3) /* Enable scaler */ |
- BIT(2), /* Select OSD1 */
- priv->io_base + _REG(VPP_OSD_SC_CTRL0));
-
- writel_relaxed(((drm_rect_width(input) - 1) << 16) |
- (drm_rect_height(input) - 1),
- priv->io_base + _REG(VPP_OSD_SCI_WH_M1));
- /* 2:1 scaling */
- writel_relaxed(((input->x1) << 16) | (input->x2),
- priv->io_base + _REG(VPP_OSD_SCO_H_START_END));
- writel_relaxed(((input->y1 >> 1) << 16) | (input->y2 >> 1),
- priv->io_base + _REG(VPP_OSD_SCO_V_START_END));
-
- /* 2:1 scaling values */
- writel_relaxed(BIT(16), priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE));
- writel_relaxed(BIT(25), priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP));
-
- writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
-
- writel_relaxed((4 << 0) /* osd_vsc_bank_length */ |
- (4 << 3) /* osd_vsc_top_ini_rcv_num0 */ |
- (1 << 8) /* osd_vsc_top_rpt_p0_num0 */ |
- (6 << 11) /* osd_vsc_bot_ini_rcv_num0 */ |
- (2 << 16) /* osd_vsc_bot_rpt_p0_num0 */ |
- BIT(23) /* osd_prog_interlace */ |
- BIT(24), /* Enable vertical scaler */
- priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
-}
-
-void meson_vpp_disable_interlace_vscaler_osd1(struct meson_drm *priv)
-{
- writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
- writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
- writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
-}
-
static unsigned int vpp_filter_coefs_4point_bspline[] = {
0x15561500, 0x14561600, 0x13561700, 0x12561800,
0x11551a00, 0x11541b00, 0x10541c00, 0x0f541d00,
@@ -122,6 +76,31 @@ static void meson_vpp_write_scaling_filter_coefs(struct meson_drm *priv,
priv->io_base + _REG(VPP_OSD_SCALE_COEF));
}
+static const uint32_t vpp_filter_coefs_bicubic[] = {
+ 0x00800000, 0x007f0100, 0xff7f0200, 0xfe7f0300,
+ 0xfd7e0500, 0xfc7e0600, 0xfb7d0800, 0xfb7c0900,
+ 0xfa7b0b00, 0xfa7a0dff, 0xf9790fff, 0xf97711ff,
+ 0xf87613ff, 0xf87416fe, 0xf87218fe, 0xf8701afe,
+ 0xf76f1dfd, 0xf76d1ffd, 0xf76b21fd, 0xf76824fd,
+ 0xf76627fc, 0xf76429fc, 0xf7612cfc, 0xf75f2ffb,
+ 0xf75d31fb, 0xf75a34fb, 0xf75837fa, 0xf7553afa,
+ 0xf8523cfa, 0xf8503ff9, 0xf84d42f9, 0xf84a45f9,
+ 0xf84848f8
+};
+
+static void meson_vpp_write_vd_scaling_filter_coefs(struct meson_drm *priv,
+ const unsigned int *coefs,
+ bool is_horizontal)
+{
+ int i;
+
+ writel_relaxed(is_horizontal ? BIT(8) : 0,
+ priv->io_base + _REG(VPP_SCALE_COEF_IDX));
+ for (i = 0; i < 33; i++)
+ writel_relaxed(coefs[i],
+ priv->io_base + _REG(VPP_SCALE_COEF));
+}
+
void meson_vpp_init(struct meson_drm *priv)
{
/* set dummy data default YUV black */
@@ -150,17 +129,34 @@ void meson_vpp_init(struct meson_drm *priv)
/* Force all planes off */
writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
- VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND, 0,
+ VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND |
+ VPP_VD1_PREBLEND | VPP_VD2_PREBLEND, 0,
priv->io_base + _REG(VPP_MISC));
+ /* Setup default VD settings */
+ writel_relaxed(4096,
+ priv->io_base + _REG(VPP_PREBLEND_VD1_H_START_END));
+ writel_relaxed(4096,
+ priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+
/* Disable Scalers */
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+ writel_relaxed(4 | (4 << 8) | BIT(15),
+ priv->io_base + _REG(VPP_SC_MISC));
+
+ writel_relaxed(1, priv->io_base + _REG(VPP_VADJ_CTRL));
/* Write in the proper filter coefficients. */
meson_vpp_write_scaling_filter_coefs(priv,
vpp_filter_coefs_4point_bspline, false);
meson_vpp_write_scaling_filter_coefs(priv,
vpp_filter_coefs_4point_bspline, true);
+
+ /* Write the VD proper filter coefficients. */
+ meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic,
+ false);
+ meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic,
+ true);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 04f1dfba12e5..0aaedc554879 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -212,8 +212,6 @@ struct mga_device {
int fb_mtrr;
struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 05570f0de4d7..d96a9b32455e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -36,63 +36,6 @@ mgag200_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct mga_device, ttm.bdev);
}
-static int
-mgag200_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void
-mgag200_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int mgag200_ttm_global_init(struct mga_device *ast)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- global_ref = &ast->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &mgag200_ttm_mem_global_init;
- global_ref->release = &mgag200_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- ast->ttm.bo_global_ref.mem_glob =
- ast->ttm.mem_global_ref.object;
- global_ref = &ast->ttm.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&ast->ttm.mem_global_ref);
- return r;
- }
- return 0;
-}
-
-static void
-mgag200_ttm_global_release(struct mga_device *ast)
-{
- if (ast->ttm.mem_global_ref.release == NULL)
- return;
-
- drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
- drm_global_item_unref(&ast->ttm.mem_global_ref);
- ast->ttm.mem_global_ref.release = NULL;
-}
-
-
static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct mgag200_bo *bo;
@@ -232,12 +175,7 @@ int mgag200_mm_init(struct mga_device *mdev)
struct drm_device *dev = mdev->dev;
struct ttm_bo_device *bdev = &mdev->ttm.bdev;
- ret = mgag200_ttm_global_init(mdev);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&mdev->ttm.bdev,
- mdev->ttm.bo_global_ref.ref.object,
&mgag200_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -268,8 +206,6 @@ void mgag200_mm_fini(struct mga_device *mdev)
ttm_bo_device_release(&mdev->ttm.bdev);
- mgag200_ttm_global_release(mdev);
-
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 48b5304f460c..8edd80bb0428 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1222,10 +1222,7 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
&dumper->bo, &dumper->iova);
- if (IS_ERR(dumper->ptr))
- return PTR_ERR(dumper->ptr);
-
- return 0;
+ return PTR_ERR_OR_ZERO(dumper->ptr);
}
static void a5xx_crashdumper_free(struct msm_gpu *gpu,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index f549daf30fe6..d77a8cb15404 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1179,8 +1179,6 @@ static void dpu_plane_destroy(struct drm_plane *plane)
mutex_destroy(&pdpu->lock);
- drm_plane_helper_disable(plane, NULL);
-
/* this will destroy the states as well */
drm_plane_cleanup(plane);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 79ff653d8081..7a499731ce93 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -68,7 +68,6 @@ static void mdp4_plane_destroy(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp4_plane);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 7f42c3e68a53..310459541e48 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -46,7 +46,6 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp5_plane);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index c79659ca5706..23670907a29d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -579,7 +579,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi_cfg = (struct hdmi_platform_config *)
of_device_get_match_data(dev);
if (!hdmi_cfg) {
- dev_err(dev, "unknown hdmi_cfg: %s\n", of_node->name);
+ dev_err(dev, "unknown hdmi_cfg: %pOFn\n", of_node);
return -ENXIO;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 4904d0d41409..5e758d95751a 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -312,6 +312,7 @@ static int msm_drm_uninit(struct device *dev)
if (fbdev && priv->fbdev)
msm_fbdev_free(ddev);
#endif
+ drm_atomic_helper_shutdown(ddev);
drm_mode_config_cleanup(ddev);
pm_runtime_get_sync(dev);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 7a7923e6220d..a90aedd6883a 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -241,7 +241,8 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
- ret = reservation_object_reserve_shared(msm_obj->resv);
+ ret = reservation_object_reserve_shared(msm_obj->resv,
+ 1);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 2393e6d16ffd..88ba003979e6 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -417,7 +417,7 @@ static int mxsfb_probe(struct platform_device *pdev)
err_unload:
mxsfb_unload(drm);
err_free:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -428,7 +428,7 @@ static int mxsfb_remove(struct platform_device *pdev)
drm_dev_unregister(drm);
mxsfb_unload(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 0b2191fa96f7..d20b9ba4b1c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -146,8 +146,6 @@ struct nouveau_drm {
/* TTM interface support */
struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
atomic_t validate_sequence;
int (*move)(struct nouveau_channel *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 99be61ddeb75..d4964f3397a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -341,7 +341,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
int ret = 0, i;
if (!exclusive) {
- ret = reservation_object_reserve_shared(resv);
+ ret = reservation_object_reserve_shared(resv, 1);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 8edb9f2a4269..1543c2f8d3d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -175,66 +175,6 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
}
static int
-nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void
-nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-int
-nouveau_ttm_global_init(struct nouveau_drm *drm)
-{
- struct drm_global_reference *global_ref;
- int ret;
-
- global_ref = &drm->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &nouveau_ttm_mem_global_init;
- global_ref->release = &nouveau_ttm_mem_global_release;
-
- ret = drm_global_item_ref(global_ref);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed setting up TTM memory accounting\n");
- drm->ttm.mem_global_ref.release = NULL;
- return ret;
- }
-
- drm->ttm.bo_global_ref.mem_glob = global_ref->object;
- global_ref = &drm->ttm.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
-
- ret = drm_global_item_ref(global_ref);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed setting up TTM BO subsystem\n");
- drm_global_item_unref(&drm->ttm.mem_global_ref);
- drm->ttm.mem_global_ref.release = NULL;
- return ret;
- }
-
- return 0;
-}
-
-void
-nouveau_ttm_global_release(struct nouveau_drm *drm)
-{
- if (drm->ttm.mem_global_ref.release == NULL)
- return;
-
- drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
- drm_global_item_unref(&drm->ttm.mem_global_ref);
- drm->ttm.mem_global_ref.release = NULL;
-}
-
-static int
nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
{
struct nvif_mmu *mmu = &drm->client.mmu;
@@ -296,12 +236,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
drm->agp.cma = pci->agp.cma;
}
- ret = nouveau_ttm_global_init(drm);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&drm->ttm.bdev,
- drm->ttm.bo_global_ref.ref.object,
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -356,8 +291,6 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
ttm_bo_device_release(&drm->ttm.bdev);
- nouveau_ttm_global_release(drm);
-
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 394c129cfb3b..0a485c5b982e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5409,11 +5409,14 @@ static int dsi_probe(struct platform_device *pdev)
/* DSI on OMAP3 doesn't have register DSI_GNQ, set number
* of data to 3 by default */
- if (dsi->data->quirks & DSI_QUIRK_GNQ)
+ if (dsi->data->quirks & DSI_QUIRK_GNQ) {
+ dsi_runtime_get(dsi);
/* NB_DATA_LANES */
dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9);
- else
+ dsi_runtime_put(dsi);
+ } else {
dsi->num_lanes_supported = 3;
+ }
r = dsi_init_output(dsi);
if (r)
@@ -5426,15 +5429,19 @@ static int dsi_probe(struct platform_device *pdev)
}
r = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (r)
+ if (r) {
DSSERR("Failed to populate DSI child devices: %d\n", r);
+ goto err_uninit_output;
+ }
r = component_add(&pdev->dev, &dsi_component_ops);
if (r)
- goto err_uninit_output;
+ goto err_of_depopulate;
return 0;
+err_of_depopulate:
+ of_platform_depopulate(dev);
err_uninit_output:
dsi_uninit_output(dsi);
err_pm_disable:
@@ -5470,19 +5477,12 @@ static int dsi_runtime_suspend(struct device *dev)
/* wait for current handler to finish before turning the DSI off */
synchronize_irq(dsi->irq);
- dispc_runtime_put(dsi->dss->dispc);
-
return 0;
}
static int dsi_runtime_resume(struct device *dev)
{
struct dsi_data *dsi = dev_get_drvdata(dev);
- int r;
-
- r = dispc_runtime_get(dsi->dss->dispc);
- if (r)
- return r;
dsi->is_enabled = true;
/* ensure the irq handler sees the is_enabled value */
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 1aaf260aa9b8..7553c7fc1c45 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1484,16 +1484,23 @@ static int dss_probe(struct platform_device *pdev)
dss);
/* Add all the child devices as components. */
+ r = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (r)
+ goto err_uninit_debugfs;
+
omapdss_gather_components(&pdev->dev);
device_for_each_child(&pdev->dev, &match, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
if (r)
- goto err_uninit_debugfs;
+ goto err_of_depopulate;
return 0;
+err_of_depopulate:
+ of_platform_depopulate(&pdev->dev);
+
err_uninit_debugfs:
dss_debugfs_remove_file(dss->debugfs.clk);
dss_debugfs_remove_file(dss->debugfs.dss);
@@ -1522,6 +1529,8 @@ static int dss_remove(struct platform_device *pdev)
{
struct dss_device *dss = platform_get_drvdata(pdev);
+ of_platform_depopulate(&pdev->dev);
+
component_master_del(&pdev->dev, &dss_component_ops);
dss_debugfs_remove_file(dss->debugfs.clk);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index cf6230eac31a..aabdda394c9c 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -635,10 +635,14 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
hdmi->dss = dss;
- r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
+ r = hdmi_runtime_get(hdmi);
if (r)
return r;
+ r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
+ if (r)
+ goto err_runtime_put;
+
r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp);
if (r)
goto err_pll_uninit;
@@ -652,12 +656,16 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
hdmi);
+ hdmi_runtime_put(hdmi);
+
return 0;
err_cec_uninit:
hdmi4_cec_uninit(&hdmi->core);
err_pll_uninit:
hdmi_pll_uninit(&hdmi->pll);
+err_runtime_put:
+ hdmi_runtime_put(hdmi);
return r;
}
@@ -833,32 +841,6 @@ static int hdmi4_remove(struct platform_device *pdev)
return 0;
}
-static int hdmi_runtime_suspend(struct device *dev)
-{
- struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-
- dispc_runtime_put(hdmi->dss->dispc);
-
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- struct omap_hdmi *hdmi = dev_get_drvdata(dev);
- int r;
-
- r = dispc_runtime_get(hdmi->dss->dispc);
- if (r < 0)
- return r;
-
- return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- .runtime_suspend = hdmi_runtime_suspend,
- .runtime_resume = hdmi_runtime_resume,
-};
-
static const struct of_device_id hdmi_of_match[] = {
{ .compatible = "ti,omap4-hdmi", },
{},
@@ -869,7 +851,6 @@ struct platform_driver omapdss_hdmi4hw_driver = {
.remove = hdmi4_remove,
.driver = {
.name = "omapdss_hdmi",
- .pm = &hdmi_pm_ops,
.of_match_table = hdmi_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index b0e4a7463f8c..9e8556f67a29 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -825,32 +825,6 @@ static int hdmi5_remove(struct platform_device *pdev)
return 0;
}
-static int hdmi_runtime_suspend(struct device *dev)
-{
- struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-
- dispc_runtime_put(hdmi->dss->dispc);
-
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- struct omap_hdmi *hdmi = dev_get_drvdata(dev);
- int r;
-
- r = dispc_runtime_get(hdmi->dss->dispc);
- if (r < 0)
- return r;
-
- return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- .runtime_suspend = hdmi_runtime_suspend,
- .runtime_resume = hdmi_runtime_resume,
-};
-
static const struct of_device_id hdmi_of_match[] = {
{ .compatible = "ti,omap5-hdmi", },
{ .compatible = "ti,dra7-hdmi", },
@@ -862,7 +836,6 @@ struct platform_driver omapdss_hdmi5hw_driver = {
.remove = hdmi5_remove,
.driver = {
.name = "omapdss_hdmi5",
- .pm = &hdmi_pm_ops,
.of_match_table = hdmi_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index ff0b18c8e4ac..b5f52727f8b1 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -946,19 +946,12 @@ static int venc_runtime_suspend(struct device *dev)
if (venc->tv_dac_clk)
clk_disable_unprepare(venc->tv_dac_clk);
- dispc_runtime_put(venc->dss->dispc);
-
return 0;
}
static int venc_runtime_resume(struct device *dev)
{
struct venc_device *venc = dev_get_drvdata(dev);
- int r;
-
- r = dispc_runtime_get(venc->dss->dispc);
- if (r < 0)
- return r;
if (venc->tv_dac_clk)
clk_prepare_enable(venc->tv_dac_clk);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 62928ec0e7db..caffc547ef97 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -350,11 +350,14 @@ static void omap_crtc_arm_event(struct drm_crtc *crtc)
static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
int ret;
DBG("%s", omap_crtc->name);
+ priv->dispc_ops->runtime_get(priv->dispc);
+
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_vblank_on(crtc);
ret = drm_crtc_vblank_get(crtc);
@@ -367,6 +370,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->name);
@@ -379,6 +383,8 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
spin_unlock_irq(&crtc->dev->event_lock);
drm_crtc_vblank_off(crtc);
+
+ priv->dispc_ops->runtime_put(priv->dispc);
}
static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 6020c30a33b3..3f3537719beb 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -90,6 +90,18 @@ config DRM_PANEL_LG_LG4573
Say Y here if you want to enable support for LG4573 RGB panel.
To compile this driver as a module, choose M here.
+config DRM_PANEL_OLIMEX_LCD_OLINUXINO
+ tristate "Olimex LCD-OLinuXino panel"
+ depends on OF
+ depends on I2C
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ The panel is used with different sizes LCDs, from 480x272 to
+ 1280x800, and 24 bit per pixel.
+
+ Say Y here if you want to enable support for Olimex Ltd.
+ LCD-OLinuXino panel.
+
config DRM_PANEL_ORISETECH_OTM8009A
tristate "Orise Technology otm8009a 480x800 dsi 2dl panel"
depends on OF
@@ -126,6 +138,12 @@ config DRM_PANEL_RAYDIUM_RM68200
Say Y here if you want to enable support for Raydium RM68200
720x1280 DSI video mode panel.
+config DRM_PANEL_SAMSUNG_S6D16D0
+ tristate "Samsung S6D16D0 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_S6E3HA2
tristate "Samsung S6E3HA2 DSI video mode panel"
depends on OF
@@ -186,4 +204,11 @@ config DRM_PANEL_SITRONIX_ST7789V
Say Y here if you want to enable support for the Sitronix
ST7789V controller for 240x320 LCD panels
+config DRM_PANEL_TRULY_NT35597_WQXGA
+ tristate "Truly WQXGA"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ help
+ Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
+ Video Mode panel
endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 5ccaaa9d13af..4396658a7996 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -7,11 +7,13 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
@@ -19,3 +21,4 @@ obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index 72edb334d997..ca4ae45dd307 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -506,8 +506,7 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
static void innolux_panel_del(struct innolux_panel *innolux)
{
- if (innolux->base.dev)
- drm_panel_remove(&innolux->base);
+ drm_panel_remove(&innolux->base);
}
static int innolux_panel_probe(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
new file mode 100644
index 000000000000..5e8d4523e9ed
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * LCD-OLinuXino support for panel driver
+ *
+ * Copyright (C) 2018 Olimex Ltd.
+ * Author: Stefan Mavrodiev <stefan@olimex.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/crc32.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drmP.h>
+
+#include <video/videomode.h>
+#include <video/display_timing.h>
+
+#define LCD_OLINUXINO_HEADER_MAGIC 0x4F4CB727
+#define LCD_OLINUXINO_DATA_LEN 256
+
+struct lcd_olinuxino_mode {
+ u32 pixelclock;
+ u32 hactive;
+ u32 hfp;
+ u32 hbp;
+ u32 hpw;
+ u32 vactive;
+ u32 vfp;
+ u32 vbp;
+ u32 vpw;
+ u32 refresh;
+ u32 flags;
+};
+
+struct lcd_olinuxino_info {
+ char name[32];
+ u32 width_mm;
+ u32 height_mm;
+ u32 bpc;
+ u32 bus_format;
+ u32 bus_flag;
+} __attribute__((__packed__));
+
+struct lcd_olinuxino_eeprom {
+ u32 header;
+ u32 id;
+ char revision[4];
+ u32 serial;
+ struct lcd_olinuxino_info info;
+ u32 num_modes;
+ u8 reserved[180];
+ u32 checksum;
+} __attribute__((__packed__));
+
+struct lcd_olinuxino {
+ struct drm_panel panel;
+ struct device *dev;
+ struct i2c_client *client;
+ struct mutex mutex;
+
+ bool prepared;
+ bool enabled;
+
+ struct backlight_device *backlight;
+ struct regulator *supply;
+ struct gpio_desc *enable_gpio;
+
+ struct lcd_olinuxino_eeprom eeprom;
+};
+
+static inline struct lcd_olinuxino *to_lcd_olinuxino(struct drm_panel *panel)
+{
+ return container_of(panel, struct lcd_olinuxino, panel);
+}
+
+static int lcd_olinuxino_disable(struct drm_panel *panel)
+{
+ struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+
+ if (!lcd->enabled)
+ return 0;
+
+ backlight_disable(lcd->backlight);
+
+ lcd->enabled = false;
+
+ return 0;
+}
+
+static int lcd_olinuxino_unprepare(struct drm_panel *panel)
+{
+ struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+
+ if (!lcd->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(lcd->enable_gpio, 0);
+ regulator_disable(lcd->supply);
+
+ lcd->prepared = false;
+
+ return 0;
+}
+
+static int lcd_olinuxino_prepare(struct drm_panel *panel)
+{
+ struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+ int ret;
+
+ if (lcd->prepared)
+ return 0;
+
+ ret = regulator_enable(lcd->supply);
+ if (ret < 0)
+ return ret;
+
+ gpiod_set_value_cansleep(lcd->enable_gpio, 1);
+ lcd->prepared = true;
+
+ return 0;
+}
+
+static int lcd_olinuxino_enable(struct drm_panel *panel)
+{
+ struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+
+ if (lcd->enabled)
+ return 0;
+
+ backlight_enable(lcd->backlight);
+
+ lcd->enabled = true;
+
+ return 0;
+}
+
+static int lcd_olinuxino_get_modes(struct drm_panel *panel)
+{
+ struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+ struct drm_connector *connector = lcd->panel.connector;
+ struct lcd_olinuxino_info *lcd_info = &lcd->eeprom.info;
+ struct drm_device *drm = lcd->panel.drm;
+ struct lcd_olinuxino_mode *lcd_mode;
+ struct drm_display_mode *mode;
+ u32 i, num = 0;
+
+ for (i = 0; i < lcd->eeprom.num_modes; i++) {
+ lcd_mode = (struct lcd_olinuxino_mode *)
+ &lcd->eeprom.reserved[i * sizeof(*lcd_mode)];
+
+ mode = drm_mode_create(drm);
+ if (!mode) {
+ dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ lcd_mode->hactive,
+ lcd_mode->vactive,
+ lcd_mode->refresh);
+ continue;
+ }
+
+ mode->clock = lcd_mode->pixelclock;
+ mode->hdisplay = lcd_mode->hactive;
+ mode->hsync_start = lcd_mode->hactive + lcd_mode->hfp;
+ mode->hsync_end = lcd_mode->hactive + lcd_mode->hfp +
+ lcd_mode->hpw;
+ mode->htotal = lcd_mode->hactive + lcd_mode->hfp +
+ lcd_mode->hpw + lcd_mode->hbp;
+ mode->vdisplay = lcd_mode->vactive;
+ mode->vsync_start = lcd_mode->vactive + lcd_mode->vfp;
+ mode->vsync_end = lcd_mode->vactive + lcd_mode->vfp +
+ lcd_mode->vpw;
+ mode->vtotal = lcd_mode->vactive + lcd_mode->vfp +
+ lcd_mode->vpw + lcd_mode->vbp;
+ mode->vrefresh = lcd_mode->refresh;
+
+ /* Always make the first mode preferred */
+ if (i == 0)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ num++;
+ }
+
+ memcpy(connector->display_info.name, lcd_info->name, 32);
+ connector->display_info.width_mm = lcd_info->width_mm;
+ connector->display_info.height_mm = lcd_info->height_mm;
+ connector->display_info.bpc = lcd_info->bpc;
+
+ if (lcd_info->bus_format)
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &lcd_info->bus_format, 1);
+ connector->display_info.bus_flags = lcd_info->bus_flag;
+
+ return num;
+}
+
+static const struct drm_panel_funcs lcd_olinuxino_funcs = {
+ .disable = lcd_olinuxino_disable,
+ .unprepare = lcd_olinuxino_unprepare,
+ .prepare = lcd_olinuxino_prepare,
+ .enable = lcd_olinuxino_enable,
+ .get_modes = lcd_olinuxino_get_modes,
+};
+
+static int lcd_olinuxino_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct lcd_olinuxino *lcd;
+ u32 checksum, i;
+ int ret = 0;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK))
+ return -ENODEV;
+
+ lcd = devm_kzalloc(dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, lcd);
+ lcd->dev = dev;
+ lcd->client = client;
+
+ mutex_init(&lcd->mutex);
+
+ /* Copy data into buffer */
+ for (i = 0; i < LCD_OLINUXINO_DATA_LEN; i += I2C_SMBUS_BLOCK_MAX) {
+ mutex_lock(&lcd->mutex);
+ ret = i2c_smbus_read_i2c_block_data(client,
+ i,
+ I2C_SMBUS_BLOCK_MAX,
+ (u8 *)&lcd->eeprom + i);
+ mutex_unlock(&lcd->mutex);
+ if (ret < 0) {
+ dev_err(dev, "error reading from device at %02x\n", i);
+ return ret;
+ }
+ }
+
+ /* Check configuration checksum */
+ checksum = ~crc32(~0, (u8 *)&lcd->eeprom, 252);
+ if (checksum != lcd->eeprom.checksum) {
+ dev_err(dev, "configuration checksum does not match!\n");
+ return -EINVAL;
+ }
+
+ /* Check magic header */
+ if (lcd->eeprom.header != LCD_OLINUXINO_HEADER_MAGIC) {
+ dev_err(dev, "magic header does not match\n");
+ return -EINVAL;
+ }
+
+ dev_info(dev, "Detected %s, Rev. %s, Serial: %08x\n",
+ lcd->eeprom.info.name,
+ lcd->eeprom.revision,
+ lcd->eeprom.serial);
+
+ /*
+ * The eeprom can hold up to 4 modes.
+ * If the stored value is bigger, overwrite it.
+ */
+ if (lcd->eeprom.num_modes > 4) {
+ dev_warn(dev, "invalid number of modes, falling back to 4\n");
+ lcd->eeprom.num_modes = 4;
+ }
+
+ lcd->enabled = false;
+ lcd->prepared = false;
+
+ lcd->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(lcd->supply))
+ return PTR_ERR(lcd->supply);
+
+ lcd->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->enable_gpio))
+ return PTR_ERR(lcd->enable_gpio);
+
+ lcd->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(lcd->backlight))
+ return PTR_ERR(lcd->backlight);
+
+ drm_panel_init(&lcd->panel);
+ lcd->panel.dev = dev;
+ lcd->panel.funcs = &lcd_olinuxino_funcs;
+
+ return drm_panel_add(&lcd->panel);
+}
+
+static int lcd_olinuxino_remove(struct i2c_client *client)
+{
+ struct lcd_olinuxino *panel = i2c_get_clientdata(client);
+
+ drm_panel_remove(&panel->panel);
+
+ lcd_olinuxino_disable(&panel->panel);
+ lcd_olinuxino_unprepare(&panel->panel);
+
+ return 0;
+}
+
+static const struct of_device_id lcd_olinuxino_of_ids[] = {
+ { .compatible = "olimex,lcd-olinuxino" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lcd_olinuxino_of_ids);
+
+static struct i2c_driver lcd_olinuxino_driver = {
+ .driver = {
+ .name = "lcd_olinuxino",
+ .of_match_table = lcd_olinuxino_of_ids,
+ },
+ .probe = lcd_olinuxino_probe,
+ .remove = lcd_olinuxino_remove,
+};
+
+module_i2c_driver(lcd_olinuxino_driver);
+
+MODULE_AUTHOR("Stefan Mavrodiev <stefan@olimex.com>");
+MODULE_DESCRIPTION("LCD-OLinuXino driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
new file mode 100644
index 000000000000..33c22ee036f8
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MIPI-DSI Samsung s6d16d0 panel driver. This is a 864x480
+ * AMOLED panel with a command-only DSI interface.
+ */
+
+#include <drm/drm_modes.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+
+struct s6d16d0 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+};
+
+/*
+ * The timings are not very helpful as the display is used in
+ * command mode.
+ */
+static const struct drm_display_mode samsung_s6d16d0_mode = {
+ /* HS clock, (htotal*vtotal*vrefresh)/1000 */
+ .clock = 420160,
+ .hdisplay = 864,
+ .hsync_start = 864 + 154,
+ .hsync_end = 864 + 154 + 16,
+ .htotal = 864 + 154 + 16 + 32,
+ .vdisplay = 480,
+ .vsync_start = 480 + 1,
+ .vsync_end = 480 + 1 + 1,
+ .vtotal = 480 + 1 + 1 + 1,
+ /*
+ * This depends on the clocking HS vs LP rate, this value
+ * is calculated as:
+ * vrefresh = (clock * 1000) / (htotal*vtotal)
+ */
+ .vrefresh = 816,
+ .width_mm = 84,
+ .height_mm = 48,
+};
+
+static inline struct s6d16d0 *panel_to_s6d16d0(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6d16d0, panel);
+}
+
+static int s6d16d0_unprepare(struct drm_panel *panel)
+{
+ struct s6d16d0 *s6 = panel_to_s6d16d0(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev);
+ int ret;
+
+ /* Enter sleep mode */
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(s6->dev, "failed to enter sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Assert RESET */
+ gpiod_set_value_cansleep(s6->reset_gpio, 1);
+ regulator_disable(s6->supply);
+
+ return 0;
+}
+
+static int s6d16d0_prepare(struct drm_panel *panel)
+{
+ struct s6d16d0 *s6 = panel_to_s6d16d0(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev);
+ int ret;
+
+ ret = regulator_enable(s6->supply);
+ if (ret) {
+ DRM_DEV_ERROR(s6->dev, "failed to enable supply (%d)\n", ret);
+ return ret;
+ }
+
+ /* Assert RESET */
+ gpiod_set_value_cansleep(s6->reset_gpio, 1);
+ udelay(10);
+ /* De-assert RESET */
+ gpiod_set_value_cansleep(s6->reset_gpio, 0);
+ msleep(120);
+
+ /* Enabe tearing mode: send TE (tearing effect) at VBLANK */
+ ret = mipi_dsi_dcs_set_tear_on(dsi,
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret) {
+ DRM_DEV_ERROR(s6->dev, "failed to enable vblank TE (%d)\n",
+ ret);
+ return ret;
+ }
+ /* Exit sleep mode and power on */
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(s6->dev, "failed to exit sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6d16d0_enable(struct drm_panel *panel)
+{
+ struct s6d16d0 *s6 = panel_to_s6d16d0(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(s6->dev, "failed to turn display on (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6d16d0_disable(struct drm_panel *panel)
+{
+ struct s6d16d0 *s6 = panel_to_s6d16d0(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(s6->dev, "failed to turn display off (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6d16d0_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ strncpy(connector->display_info.name, "Samsung S6D16D0\0",
+ DRM_DISPLAY_INFO_LEN);
+
+ mode = drm_mode_duplicate(panel->drm, &samsung_s6d16d0_mode);
+ if (!mode) {
+ DRM_ERROR("bad mode or failed to add mode\n");
+ return -EINVAL;
+ }
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs s6d16d0_drm_funcs = {
+ .disable = s6d16d0_disable,
+ .unprepare = s6d16d0_unprepare,
+ .prepare = s6d16d0_prepare,
+ .enable = s6d16d0_enable,
+ .get_modes = s6d16d0_get_modes,
+};
+
+static int s6d16d0_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6d16d0 *s6;
+ int ret;
+
+ s6 = devm_kzalloc(dev, sizeof(struct s6d16d0), GFP_KERNEL);
+ if (!s6)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, s6);
+ s6->dev = dev;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->hs_rate = 420160000;
+ dsi->lp_rate = 19200000;
+ /*
+ * This display uses command mode so no MIPI_DSI_MODE_VIDEO
+ * or MIPI_DSI_MODE_VIDEO_SYNC_PULSE
+ *
+ * As we only send commands we do not need to be continuously
+ * clocked.
+ */
+ dsi->mode_flags =
+ MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_EOT_PACKET;
+
+ s6->supply = devm_regulator_get(dev, "vdd1");
+ if (IS_ERR(s6->supply))
+ return PTR_ERR(s6->supply);
+
+ /* This asserts RESET by default */
+ s6->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(s6->reset_gpio)) {
+ ret = PTR_ERR(s6->reset_gpio);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to request GPIO (%d)\n",
+ ret);
+ return ret;
+ }
+
+ drm_panel_init(&s6->panel);
+ s6->panel.dev = dev;
+ s6->panel.funcs = &s6d16d0_drm_funcs;
+
+ ret = drm_panel_add(&s6->panel);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ drm_panel_remove(&s6->panel);
+
+ return ret;
+}
+
+static int s6d16d0_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6d16d0 *s6 = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&s6->panel);
+
+ return 0;
+}
+
+static const struct of_device_id s6d16d0_of_match[] = {
+ { .compatible = "samsung,s6d16d0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s6d16d0_of_match);
+
+static struct mipi_dsi_driver s6d16d0_driver = {
+ .probe = s6d16d0_probe,
+ .remove = s6d16d0_remove,
+ .driver = {
+ .name = "panel-samsung-s6d16d0",
+ .of_match_table = s6d16d0_of_match,
+ },
+};
+module_mipi_dsi_driver(s6d16d0_driver);
+
+MODULE_AUTHOR("Linus Wallei <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("MIPI-DSI s6d16d0 Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 75f925390551..2d99e28ff117 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 NXP Semiconductors.
* Author: Marco Franchi <marco.franchi@nxp.com>
*
* Based on Panel Simple driver by Thierry Reding <treding@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/backlight.h>
@@ -366,6 +363,6 @@ static struct platform_driver seiko_panel_platform_driver = {
};
module_platform_driver(seiko_panel_platform_driver);
-MODULE_AUTHOR("Marco Franchi <marco.franchi@nxp.com");
+MODULE_AUTHOR("Marco Franchi <marco.franchi@nxp.com>");
MODULE_DESCRIPTION("Seiko 43WVF1G panel driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index a04ffb3b2174..9c69e739a524 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -618,6 +618,30 @@ static const struct panel_desc auo_g070vvn01 = {
},
};
+static const struct drm_display_mode auo_g101evn010_mode = {
+ .clock = 68930,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 82,
+ .hsync_end = 1280 + 82 + 2,
+ .htotal = 1280 + 82 + 2 + 84,
+ .vdisplay = 800,
+ .vsync_start = 800 + 8,
+ .vsync_end = 800 + 8 + 2,
+ .vtotal = 800 + 8 + 2 + 6,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_g101evn010 = {
+ .modes = &auo_g101evn010_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct drm_display_mode auo_g104sn02_mode = {
.clock = 40000,
.hdisplay = 800,
@@ -782,16 +806,38 @@ static const struct panel_desc avic_tm070ddh03 = {
},
};
+static const struct drm_display_mode bananapi_s070wv20_ct16_mode = {
+ .clock = 30000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 48,
+ .htotal = 800 + 40 + 48 + 40,
+ .vdisplay = 480,
+ .vsync_start = 480 + 13,
+ .vsync_end = 480 + 13 + 3,
+ .vtotal = 480 + 13 + 3 + 29,
+};
+
+static const struct panel_desc bananapi_s070wv20_ct16 = {
+ .modes = &bananapi_s070wv20_ct16_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+};
+
static const struct drm_display_mode boe_hv070wsa_mode = {
- .clock = 40800,
+ .clock = 42105,
.hdisplay = 1024,
- .hsync_start = 1024 + 90,
- .hsync_end = 1024 + 90 + 90,
- .htotal = 1024 + 90 + 90 + 90,
+ .hsync_start = 1024 + 30,
+ .hsync_end = 1024 + 30 + 30,
+ .htotal = 1024 + 30 + 30 + 30,
.vdisplay = 600,
- .vsync_start = 600 + 3,
- .vsync_end = 600 + 3 + 4,
- .vtotal = 600 + 3 + 4 + 3,
+ .vsync_start = 600 + 10,
+ .vsync_end = 600 + 10 + 10,
+ .vtotal = 600 + 10 + 10 + 10,
.vrefresh = 60,
};
@@ -846,6 +892,55 @@ static const struct panel_desc boe_nv101wxmn51 = {
},
};
+static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 5,
+ .hsync_end = 480 + 5 + 5,
+ .htotal = 480 + 5 + 5 + 40,
+ .vdisplay = 272,
+ .vsync_start = 272 + 8,
+ .vsync_end = 272 + 8 + 8,
+ .vtotal = 272 + 8 + 8 + 8,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc cdtech_s043wq26h_ct7 = {
+ .modes = &cdtech_s043wq26h_ct7_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 95,
+ .height = 54,
+ },
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
+static const struct drm_display_mode cdtech_s070wv95_ct16_mode = {
+ .clock = 35000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 40,
+ .htotal = 800 + 40 + 40 + 48,
+ .vdisplay = 480,
+ .vsync_start = 480 + 29,
+ .vsync_end = 480 + 29 + 13,
+ .vtotal = 480 + 29 + 13 + 3,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc cdtech_s070wv95_ct16 = {
+ .modes = &cdtech_s070wv95_ct16_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 154,
+ .height = 85,
+ },
+};
+
static const struct drm_display_mode chunghwa_claa070wp03xg_mode = {
.clock = 66770,
.hdisplay = 800,
@@ -971,6 +1066,36 @@ static const struct panel_desc dlc_dlc0700yzg_1 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
};
+static const struct display_timing dlc_dlc1010gig_timing = {
+ .pixelclock = { 68900000, 71100000, 73400000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 43, 53, 63 },
+ .hback_porch = { 43, 53, 63 },
+ .hsync_len = { 44, 54, 64 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 5, 8, 11 },
+ .vback_porch = { 5, 8, 11 },
+ .vsync_len = { 5, 7, 11 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc dlc_dlc1010gig = {
+ .timings = &dlc_dlc1010gig_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .delay = {
+ .prepare = 60,
+ .enable = 150,
+ .disable = 100,
+ .unprepare = 60,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
static const struct drm_display_mode edt_et057090dhu_mode = {
.clock = 25175,
.hdisplay = 640,
@@ -2334,6 +2459,33 @@ static const struct panel_desc winstar_wf35ltiacd = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode arm_rtsm_mode[] = {
+ {
+ .clock = 65000,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 24,
+ .hsync_end = 1024 + 24 + 136,
+ .htotal = 1024 + 24 + 136 + 160,
+ .vdisplay = 768,
+ .vsync_start = 768 + 3,
+ .vsync_end = 768 + 3 + 6,
+ .vtotal = 768 + 3 + 6 + 29,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ },
+};
+
+static const struct panel_desc arm_rtsm = {
+ .modes = arm_rtsm_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 400,
+ .height = 300,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct of_device_id platform_of_match[] = {
{
.compatible = "ampire,am-480272h3tmqw-t01h",
@@ -2342,6 +2494,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "ampire,am800480r3tmqwa1h",
.data = &ampire_am800480r3tmqwa1h,
}, {
+ .compatible = "arm,rtsm-display",
+ .data = &arm_rtsm,
+ }, {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
@@ -2363,6 +2518,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,g070vvn01",
.data = &auo_g070vvn01,
}, {
+ .compatible = "auo,g101evn010",
+ .data = &auo_g101evn010,
+ }, {
.compatible = "auo,g104sn02",
.data = &auo_g104sn02,
}, {
@@ -2381,12 +2539,21 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "avic,tm070ddh03",
.data = &avic_tm070ddh03,
}, {
+ .compatible = "bananapi,s070wv20-ct16",
+ .data = &bananapi_s070wv20_ct16,
+ }, {
.compatible = "boe,hv070wsa-100",
.data = &boe_hv070wsa
}, {
.compatible = "boe,nv101wxmn51",
.data = &boe_nv101wxmn51,
}, {
+ .compatible = "cdtech,s043wq26h-ct7",
+ .data = &cdtech_s043wq26h_ct7,
+ }, {
+ .compatible = "cdtech,s070wv95-ct16",
+ .data = &cdtech_s070wv95_ct16,
+ }, {
.compatible = "chunghwa,claa070wp03xg",
.data = &chunghwa_claa070wp03xg,
}, {
@@ -2402,6 +2569,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "dlc,dlc0700yzg-1",
.data = &dlc_dlc0700yzg_1,
}, {
+ .compatible = "dlc,dlc1010gig",
+ .data = &dlc_dlc1010gig,
+ }, {
.compatible = "edt,et057090dhu",
.data = &edt_et057090dhu,
}, {
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
new file mode 100644
index 000000000000..fc2a66c53db4
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+static const char * const regulator_names[] = {
+ "vdda",
+ "vdispp",
+ "vdispn",
+};
+
+static unsigned long const regulator_enable_loads[] = {
+ 62000,
+ 100000,
+ 100000,
+};
+
+static unsigned long const regulator_disable_loads[] = {
+ 80,
+ 100,
+ 100,
+};
+
+struct cmd_set {
+ u8 commands[4];
+ u8 size;
+};
+
+struct nt35597_config {
+ u32 width_mm;
+ u32 height_mm;
+ const char *panel_name;
+ const struct cmd_set *panel_on_cmds;
+ u32 num_on_cmds;
+ const struct drm_display_mode *dm;
+};
+
+struct truly_nt35597 {
+ struct device *dev;
+ struct drm_panel panel;
+
+ struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
+
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *mode_gpio;
+
+ struct backlight_device *backlight;
+
+ struct mipi_dsi_device *dsi[2];
+
+ const struct nt35597_config *config;
+ bool prepared;
+ bool enabled;
+};
+
+static inline struct truly_nt35597 *panel_to_ctx(struct drm_panel *panel)
+{
+ return container_of(panel, struct truly_nt35597, panel);
+}
+
+static const struct cmd_set qcom_2k_panel_magic_cmds[] = {
+ /* CMD2_P0 */
+ { { 0xff, 0x20 }, 2 },
+ { { 0xfb, 0x01 }, 2 },
+ { { 0x00, 0x01 }, 2 },
+ { { 0x01, 0x55 }, 2 },
+ { { 0x02, 0x45 }, 2 },
+ { { 0x05, 0x40 }, 2 },
+ { { 0x06, 0x19 }, 2 },
+ { { 0x07, 0x1e }, 2 },
+ { { 0x0b, 0x73 }, 2 },
+ { { 0x0c, 0x73 }, 2 },
+ { { 0x0e, 0xb0 }, 2 },
+ { { 0x0f, 0xae }, 2 },
+ { { 0x11, 0xb8 }, 2 },
+ { { 0x13, 0x00 }, 2 },
+ { { 0x58, 0x80 }, 2 },
+ { { 0x59, 0x01 }, 2 },
+ { { 0x5a, 0x00 }, 2 },
+ { { 0x5b, 0x01 }, 2 },
+ { { 0x5c, 0x80 }, 2 },
+ { { 0x5d, 0x81 }, 2 },
+ { { 0x5e, 0x00 }, 2 },
+ { { 0x5f, 0x01 }, 2 },
+ { { 0x72, 0x11 }, 2 },
+ { { 0x68, 0x03 }, 2 },
+ /* CMD2_P4 */
+ { { 0xFF, 0x24 }, 2 },
+ { { 0xFB, 0x01 }, 2 },
+ { { 0x00, 0x1C }, 2 },
+ { { 0x01, 0x0B }, 2 },
+ { { 0x02, 0x0C }, 2 },
+ { { 0x03, 0x01 }, 2 },
+ { { 0x04, 0x0F }, 2 },
+ { { 0x05, 0x10 }, 2 },
+ { { 0x06, 0x10 }, 2 },
+ { { 0x07, 0x10 }, 2 },
+ { { 0x08, 0x89 }, 2 },
+ { { 0x09, 0x8A }, 2 },
+ { { 0x0A, 0x13 }, 2 },
+ { { 0x0B, 0x13 }, 2 },
+ { { 0x0C, 0x15 }, 2 },
+ { { 0x0D, 0x15 }, 2 },
+ { { 0x0E, 0x17 }, 2 },
+ { { 0x0F, 0x17 }, 2 },
+ { { 0x10, 0x1C }, 2 },
+ { { 0x11, 0x0B }, 2 },
+ { { 0x12, 0x0C }, 2 },
+ { { 0x13, 0x01 }, 2 },
+ { { 0x14, 0x0F }, 2 },
+ { { 0x15, 0x10 }, 2 },
+ { { 0x16, 0x10 }, 2 },
+ { { 0x17, 0x10 }, 2 },
+ { { 0x18, 0x89 }, 2 },
+ { { 0x19, 0x8A }, 2 },
+ { { 0x1A, 0x13 }, 2 },
+ { { 0x1B, 0x13 }, 2 },
+ { { 0x1C, 0x15 }, 2 },
+ { { 0x1D, 0x15 }, 2 },
+ { { 0x1E, 0x17 }, 2 },
+ { { 0x1F, 0x17 }, 2 },
+ /* STV */
+ { { 0x20, 0x40 }, 2 },
+ { { 0x21, 0x01 }, 2 },
+ { { 0x22, 0x00 }, 2 },
+ { { 0x23, 0x40 }, 2 },
+ { { 0x24, 0x40 }, 2 },
+ { { 0x25, 0x6D }, 2 },
+ { { 0x26, 0x40 }, 2 },
+ { { 0x27, 0x40 }, 2 },
+ /* Vend */
+ { { 0xE0, 0x00 }, 2 },
+ { { 0xDC, 0x21 }, 2 },
+ { { 0xDD, 0x22 }, 2 },
+ { { 0xDE, 0x07 }, 2 },
+ { { 0xDF, 0x07 }, 2 },
+ { { 0xE3, 0x6D }, 2 },
+ { { 0xE1, 0x07 }, 2 },
+ { { 0xE2, 0x07 }, 2 },
+ /* UD */
+ { { 0x29, 0xD8 }, 2 },
+ { { 0x2A, 0x2A }, 2 },
+ /* CLK */
+ { { 0x4B, 0x03 }, 2 },
+ { { 0x4C, 0x11 }, 2 },
+ { { 0x4D, 0x10 }, 2 },
+ { { 0x4E, 0x01 }, 2 },
+ { { 0x4F, 0x01 }, 2 },
+ { { 0x50, 0x10 }, 2 },
+ { { 0x51, 0x00 }, 2 },
+ { { 0x52, 0x80 }, 2 },
+ { { 0x53, 0x00 }, 2 },
+ { { 0x56, 0x00 }, 2 },
+ { { 0x54, 0x07 }, 2 },
+ { { 0x58, 0x07 }, 2 },
+ { { 0x55, 0x25 }, 2 },
+ /* Reset XDONB */
+ { { 0x5B, 0x43 }, 2 },
+ { { 0x5C, 0x00 }, 2 },
+ { { 0x5F, 0x73 }, 2 },
+ { { 0x60, 0x73 }, 2 },
+ { { 0x63, 0x22 }, 2 },
+ { { 0x64, 0x00 }, 2 },
+ { { 0x67, 0x08 }, 2 },
+ { { 0x68, 0x04 }, 2 },
+ /* Resolution:1440x2560 */
+ { { 0x72, 0x02 }, 2 },
+ /* mux */
+ { { 0x7A, 0x80 }, 2 },
+ { { 0x7B, 0x91 }, 2 },
+ { { 0x7C, 0xD8 }, 2 },
+ { { 0x7D, 0x60 }, 2 },
+ { { 0x7F, 0x15 }, 2 },
+ { { 0x75, 0x15 }, 2 },
+ /* ABOFF */
+ { { 0xB3, 0xC0 }, 2 },
+ { { 0xB4, 0x00 }, 2 },
+ { { 0xB5, 0x00 }, 2 },
+ /* Source EQ */
+ { { 0x78, 0x00 }, 2 },
+ { { 0x79, 0x00 }, 2 },
+ { { 0x80, 0x00 }, 2 },
+ { { 0x83, 0x00 }, 2 },
+ /* FP BP */
+ { { 0x93, 0x0A }, 2 },
+ { { 0x94, 0x0A }, 2 },
+ /* Inversion Type */
+ { { 0x8A, 0x00 }, 2 },
+ { { 0x9B, 0xFF }, 2 },
+ /* IMGSWAP =1 @PortSwap=1 */
+ { { 0x9D, 0xB0 }, 2 },
+ { { 0x9F, 0x63 }, 2 },
+ { { 0x98, 0x10 }, 2 },
+ /* FRM */
+ { { 0xEC, 0x00 }, 2 },
+ /* CMD1 */
+ { { 0xFF, 0x10 }, 2 },
+ /* VBP+VSA=,VFP = 10H */
+ { { 0x3B, 0x03, 0x0A, 0x0A }, 4 },
+ /* FTE on */
+ { { 0x35, 0x00 }, 2 },
+ /* EN_BK =1(auto black) */
+ { { 0xE5, 0x01 }, 2 },
+ /* CMD mode(10) VDO mode(03) */
+ { { 0xBB, 0x03 }, 2 },
+ /* Non Reload MTP */
+ { { 0xFB, 0x01 }, 2 },
+};
+
+static int truly_dcs_write(struct drm_panel *panel, u32 command)
+{
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
+ ret = mipi_dsi_dcs_write(ctx->dsi[i], command, NULL, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "cmd 0x%x failed for dsi = %d\n",
+ command, i);
+ }
+ }
+
+ return ret;
+}
+
+static int truly_dcs_write_buf(struct drm_panel *panel,
+ u32 size, const u8 *buf)
+{
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi[i], buf, size);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "failed to tx cmd [%d], err: %d\n", i, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int truly_35597_power_on(struct truly_nt35597 *ctx)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
+ ret = regulator_set_load(ctx->supplies[i].consumer,
+ regulator_enable_loads[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Reset sequence of truly panel requires the panel to be
+ * out of reset for 10ms, followed by being held in reset
+ * for 10ms and then out again
+ */
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 0);
+
+ return 0;
+}
+
+static int truly_nt35597_power_off(struct truly_nt35597 *ctx)
+{
+ int ret = 0;
+ int i;
+
+ gpiod_set_value(ctx->reset_gpio, 1);
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
+ ret = regulator_set_load(ctx->supplies[i].consumer,
+ regulator_disable_loads[i]);
+ if (ret) {
+ DRM_DEV_ERROR(ctx->dev,
+ "regulator_set_load failed %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret) {
+ DRM_DEV_ERROR(ctx->dev,
+ "regulator_bulk_disable failed %d\n", ret);
+ }
+ return ret;
+}
+
+static int truly_nt35597_disable(struct drm_panel *panel)
+{
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ int ret;
+
+ if (!ctx->enabled)
+ return 0;
+
+ if (ctx->backlight) {
+ ret = backlight_disable(ctx->backlight);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->dev, "backlight disable failed %d\n",
+ ret);
+ }
+
+ ctx->enabled = false;
+ return 0;
+}
+
+static int truly_nt35597_unprepare(struct drm_panel *panel)
+{
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ int ret = 0;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ctx->dsi[0]->mode_flags = 0;
+ ctx->dsi[1]->mode_flags = 0;
+
+ ret = truly_dcs_write(panel, MIPI_DCS_SET_DISPLAY_OFF);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "set_display_off cmd failed ret = %d\n",
+ ret);
+ }
+
+ /* 120ms delay required here as per DCS spec */
+ msleep(120);
+
+ ret = truly_dcs_write(panel, MIPI_DCS_ENTER_SLEEP_MODE);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "enter_sleep cmd failed ret = %d\n", ret);
+ }
+
+ ret = truly_nt35597_power_off(ctx);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->dev, "power_off failed ret = %d\n", ret);
+
+ ctx->prepared = false;
+ return ret;
+}
+
+static int truly_nt35597_prepare(struct drm_panel *panel)
+{
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ int ret;
+ int i;
+ const struct cmd_set *panel_on_cmds;
+ const struct nt35597_config *config;
+ u32 num_cmds;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = truly_35597_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM;
+ ctx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ config = ctx->config;
+ panel_on_cmds = config->panel_on_cmds;
+ num_cmds = config->num_on_cmds;
+
+ for (i = 0; i < num_cmds; i++) {
+ ret = truly_dcs_write_buf(panel,
+ panel_on_cmds[i].size,
+ panel_on_cmds[i].commands);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "cmd set tx failed i = %d ret = %d\n",
+ i, ret);
+ goto power_off;
+ }
+ }
+
+ ret = truly_dcs_write(panel, MIPI_DCS_EXIT_SLEEP_MODE);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "exit_sleep_mode cmd failed ret = %d\n",
+ ret);
+ goto power_off;
+ }
+
+ /* Per DSI spec wait 120ms after sending exit sleep DCS command */
+ msleep(120);
+
+ ret = truly_dcs_write(panel, MIPI_DCS_SET_DISPLAY_ON);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "set_display_on cmd failed ret = %d\n", ret);
+ goto power_off;
+ }
+
+ /* Per DSI spec wait 120ms after sending set_display_on DCS command */
+ msleep(120);
+
+ ctx->prepared = true;
+
+ return 0;
+
+power_off:
+ if (truly_nt35597_power_off(ctx))
+ DRM_DEV_ERROR(ctx->dev, "power_off failed\n");
+ return ret;
+}
+
+static int truly_nt35597_enable(struct drm_panel *panel)
+{
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ int ret;
+
+ if (ctx->enabled)
+ return 0;
+
+ if (ctx->backlight) {
+ ret = backlight_enable(ctx->backlight);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->dev, "backlight enable failed %d\n",
+ ret);
+ }
+
+ ctx->enabled = true;
+
+ return 0;
+}
+
+static int truly_nt35597_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ struct drm_display_mode *mode;
+ const struct nt35597_config *config;
+
+ config = ctx->config;
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->dev,
+ "failed to create a new display mode\n");
+ return 0;
+ }
+
+ connector->display_info.width_mm = config->width_mm;
+ connector->display_info.height_mm = config->height_mm;
+ drm_mode_copy(mode, config->dm);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs truly_nt35597_drm_funcs = {
+ .disable = truly_nt35597_disable,
+ .unprepare = truly_nt35597_unprepare,
+ .prepare = truly_nt35597_prepare,
+ .enable = truly_nt35597_enable,
+ .get_modes = truly_nt35597_get_modes,
+};
+
+static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
+{
+ struct device *dev = ctx->dev;
+ int ret, i;
+ const struct nt35597_config *config;
+
+ config = ctx->config;
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
+ ctx->supplies[i].supply = regulator_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio %ld\n",
+ PTR_ERR(ctx->reset_gpio));
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->mode_gpio = devm_gpiod_get(dev, "mode", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->mode_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get mode gpio %ld\n",
+ PTR_ERR(ctx->mode_gpio));
+ return PTR_ERR(ctx->mode_gpio);
+ }
+
+ /* dual port */
+ gpiod_set_value(ctx->mode_gpio, 0);
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &truly_nt35597_drm_funcs;
+ drm_panel_add(&ctx->panel);
+
+ return 0;
+}
+
+static const struct drm_display_mode qcom_sdm845_mtp_2k_mode = {
+ .name = "1440x2560",
+ .clock = 268316,
+ .hdisplay = 1440,
+ .hsync_start = 1440 + 200,
+ .hsync_end = 1440 + 200 + 32,
+ .htotal = 1440 + 200 + 32 + 64,
+ .vdisplay = 2560,
+ .vsync_start = 2560 + 8,
+ .vsync_end = 2560 + 8 + 1,
+ .vtotal = 2560 + 8 + 1 + 7,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static const struct nt35597_config nt35597_dir = {
+ .width_mm = 74,
+ .height_mm = 131,
+ .panel_name = "qcom_sdm845_mtp_2k_panel",
+ .dm = &qcom_sdm845_mtp_2k_mode,
+ .panel_on_cmds = qcom_2k_panel_magic_cmds,
+ .num_on_cmds = ARRAY_SIZE(qcom_2k_panel_magic_cmds),
+};
+
+static int truly_nt35597_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct truly_nt35597 *ctx;
+ struct mipi_dsi_device *dsi1_device;
+ struct device_node *dsi1;
+ struct mipi_dsi_host *dsi1_host;
+ struct mipi_dsi_device *dsi_dev;
+ int ret = 0;
+ int i;
+
+ const struct mipi_dsi_device_info info = {
+ .type = "trulynt35597",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+
+ if (!ctx)
+ return -ENOMEM;
+
+ /*
+ * This device represents itself as one with two input ports which are
+ * fed by the output ports of the two DSI controllers . The DSI0 is
+ * the master controller and has most of the panel related info in its
+ * child node.
+ */
+
+ ctx->config = of_device_get_match_data(dev);
+
+ if (!ctx->config) {
+ dev_err(dev, "missing device configuration\n");
+ return -ENODEV;
+ }
+
+ dsi1 = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
+ if (!dsi1) {
+ DRM_DEV_ERROR(dev,
+ "failed to get remote node for dsi1_device\n");
+ return -ENODEV;
+ }
+
+ dsi1_host = of_find_mipi_dsi_host_by_node(dsi1);
+ of_node_put(dsi1);
+ if (!dsi1_host) {
+ DRM_DEV_ERROR(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* register the second DSI device */
+ dsi1_device = mipi_dsi_device_register_full(dsi1_host, &info);
+ if (IS_ERR(dsi1_device)) {
+ DRM_DEV_ERROR(dev, "failed to create dsi device\n");
+ return PTR_ERR(dsi1_device);
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+ ctx->dsi[0] = dsi;
+ ctx->dsi[1] = dsi1_device;
+
+ ret = truly_nt35597_panel_add(ctx);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to add panel\n");
+ goto err_panel_add;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
+ dsi_dev = ctx->dsi[i];
+ dsi_dev->lanes = 4;
+ dsi_dev->format = MIPI_DSI_FMT_RGB888;
+ dsi_dev->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ ret = mipi_dsi_attach(dsi_dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev,
+ "dsi attach failed i = %d\n", i);
+ goto err_dsi_attach;
+ }
+ }
+
+ return 0;
+
+err_dsi_attach:
+ drm_panel_remove(&ctx->panel);
+err_panel_add:
+ mipi_dsi_device_unregister(dsi1_device);
+ return ret;
+}
+
+static int truly_nt35597_remove(struct mipi_dsi_device *dsi)
+{
+ struct truly_nt35597 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ if (ctx->dsi[0])
+ mipi_dsi_detach(ctx->dsi[0]);
+ if (ctx->dsi[1]) {
+ mipi_dsi_detach(ctx->dsi[1]);
+ mipi_dsi_device_unregister(ctx->dsi[1]);
+ }
+
+ drm_panel_remove(&ctx->panel);
+ return 0;
+}
+
+static const struct of_device_id truly_nt35597_of_match[] = {
+ {
+ .compatible = "truly,nt35597-2K-display",
+ .data = &nt35597_dir,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, truly_nt35597_of_match);
+
+static struct mipi_dsi_driver truly_nt35597_driver = {
+ .driver = {
+ .name = "panel-truly-nt35597",
+ .of_match_table = truly_nt35597_of_match,
+ },
+ .probe = truly_nt35597_probe,
+ .remove = truly_nt35597_remove,
+};
+module_mipi_dsi_driver(truly_nt35597_driver);
+
+MODULE_DESCRIPTION("Truly NT35597 DSI Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
index 5fa0441bb6df..38c938c9adda 100644
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.c
@@ -55,6 +55,8 @@ int pl111_vexpress_clcd_init(struct device *dev,
}
}
+ of_node_put(root);
+
/*
* If there is a coretile HDLCD and it has a driver,
* do not mux the CLCD on the motherboard to the DVI.
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 208af9f37914..dffc5093ff16 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -84,6 +84,7 @@ static int qxl_check_header(struct qxl_ring *ring)
int ret;
struct qxl_ring_header *header = &(ring->ring->header);
unsigned long flags;
+
spin_lock_irqsave(&ring->lock, flags);
ret = header->prod - header->cons < header->num_items;
if (ret == 0)
@@ -97,6 +98,7 @@ int qxl_check_idle(struct qxl_ring *ring)
int ret;
struct qxl_ring_header *header = &(ring->ring->header);
unsigned long flags;
+
spin_lock_irqsave(&ring->lock, flags);
ret = header->prod == header->cons;
spin_unlock_irqrestore(&ring->lock, flags);
@@ -110,6 +112,7 @@ int qxl_ring_push(struct qxl_ring *ring,
uint8_t *elt;
int idx, ret;
unsigned long flags;
+
spin_lock_irqsave(&ring->lock, flags);
if (header->prod - header->cons == header->num_items) {
header->notify_on_cons = header->cons + 1;
@@ -156,6 +159,7 @@ static bool qxl_ring_pop(struct qxl_ring *ring,
volatile uint8_t *ring_elt;
int idx;
unsigned long flags;
+
spin_lock_irqsave(&ring->lock, flags);
if (header->cons == header->prod) {
header->notify_on_prod = header->cons + 1;
@@ -365,7 +369,6 @@ void qxl_io_flush_surfaces(struct qxl_device *qdev)
wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
}
-
void qxl_io_destroy_primary(struct qxl_device *qdev)
{
wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
@@ -373,7 +376,7 @@ void qxl_io_destroy_primary(struct qxl_device *qdev)
}
void qxl_io_create_primary(struct qxl_device *qdev,
- unsigned offset, struct qxl_bo *bo)
+ unsigned int offset, struct qxl_bo *bo)
{
struct qxl_surface_create *create;
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 15c84068d3fb..118422549828 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -34,7 +34,6 @@
#include "qxl_drv.h"
#include "qxl_object.h"
-
#if defined(CONFIG_DEBUG_FS)
static int
qxl_debugfs_irq_received(struct seq_file *m, void *data)
@@ -102,9 +101,9 @@ qxl_debugfs_init(struct drm_minor *minor)
int qxl_debugfs_add_files(struct qxl_device *qdev,
struct drm_info_list *files,
- unsigned nfiles)
+ unsigned int nfiles)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < qdev->debugfs_count; i++) {
if (qdev->debugfs[i].files == files) {
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h
index 94c5aec71920..a0ee41632d7e 100644
--- a/drivers/gpu/drm/qxl/qxl_dev.h
+++ b/drivers/gpu/drm/qxl/qxl_dev.h
@@ -28,7 +28,6 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
#ifndef H_QXL_DEV
#define H_QXL_DEV
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 87d16a0ce01e..ce0b9c40fc21 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -253,12 +253,13 @@ static struct mode_size {
};
static int qxl_add_common_modes(struct drm_connector *connector,
- unsigned pwidth,
- unsigned pheight)
+ unsigned int pwidth,
+ unsigned int pheight)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode = NULL;
int i;
+
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
60, false, false, false);
@@ -315,6 +316,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
oldcount = qdev->monitors_config->count;
if (crtc->state->active) {
struct drm_display_mode *mode = &crtc->mode;
+
head.width = mode->hdisplay;
head.height = mode->vdisplay;
head.x = crtc->x;
@@ -391,9 +393,9 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
- unsigned flags, unsigned color,
+ unsigned int flags, unsigned int color,
struct drm_clip_rect *clips,
- unsigned num_clips)
+ unsigned int num_clips)
{
/* TODO: vmwgfx where this was cribbed from had locking. Why? */
struct qxl_device *qdev = fb->dev->dev_private;
@@ -620,10 +622,14 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
if (ret)
goto out_kunmap;
- ret = qxl_release_reserve_list(release, true);
+ ret = qxl_bo_pin(cursor_bo);
if (ret)
goto out_free_bo;
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_unpin;
+
ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
if (ret)
goto out_backoff;
@@ -668,15 +674,17 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
- if (old_cursor_bo)
- qxl_bo_unref(&old_cursor_bo);
-
+ if (old_cursor_bo != NULL)
+ qxl_bo_unpin(old_cursor_bo);
+ qxl_bo_unref(&old_cursor_bo);
qxl_bo_unref(&cursor_bo);
return;
out_backoff:
qxl_release_backoff_reserve_list(release);
+out_unpin:
+ qxl_bo_unpin(cursor_bo);
out_free_bo:
qxl_bo_unref(&cursor_bo);
out_kunmap:
@@ -755,7 +763,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
}
}
- ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
+ ret = qxl_bo_pin(user_bo);
if (ret)
return ret;
@@ -917,8 +925,8 @@ free_mem:
static int qxl_conn_get_modes(struct drm_connector *connector)
{
- unsigned pwidth = 1024;
- unsigned pheight = 768;
+ unsigned int pwidth = 1024;
+ unsigned int pheight = 768;
int ret = 0;
ret = qxl_add_monitors_config_modes(connector, &pwidth, &pheight);
@@ -938,8 +946,8 @@ static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
/* TODO: is this called for user defined modes? (xrandr --add-mode)
* TODO: check that the mode fits in the framebuffer */
- if(qdev->monitors_config_width == mode->hdisplay &&
- qdev->monitors_config_height == mode->vdisplay)
+ if (qdev->monitors_config_width == mode->hdisplay &&
+ qdev->monitors_config_height == mode->vdisplay)
return MODE_OK;
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
@@ -958,7 +966,6 @@ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
return &qxl_output->enc;
}
-
static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
};
@@ -1103,7 +1110,7 @@ int qxl_create_monitors_object(struct qxl_device *qdev)
}
qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
- ret = qxl_bo_pin(qdev->monitors_config_bo, QXL_GEM_DOMAIN_VRAM, NULL);
+ ret = qxl_bo_pin(qdev->monitors_config_bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index cc5b32e749ce..c408bb83c7a9 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -25,7 +25,7 @@
static int alloc_clips(struct qxl_device *qdev,
struct qxl_release *release,
- unsigned num_clips,
+ unsigned int num_clips,
struct qxl_bo **clips_bo)
{
int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
@@ -37,7 +37,7 @@ static int alloc_clips(struct qxl_device *qdev,
* the qxl_clip_rects. This is *not* the same as the memory allocated
* on the device, it is offset to qxl_clip_rects.chunk.data */
static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
- unsigned num_clips,
+ unsigned int num_clips,
struct qxl_bo *clips_bo)
{
struct qxl_clip_rects *dev_clips;
@@ -168,6 +168,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
int ret;
struct qxl_drm_image *dimage;
struct qxl_bo *palette_bo = NULL;
+
if (stride == 0)
stride = depth * width / 8;
@@ -214,6 +215,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
if (depth == 1) {
void *ptr;
+
ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image);
ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0);
@@ -245,8 +247,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
qxl_release_fence_buffer_objects(release);
out_free_palette:
- if (palette_bo)
- qxl_bo_unref(&palette_bo);
+ qxl_bo_unref(&palette_bo);
out_free_image:
qxl_image_free_objects(qdev, dimage);
out_free_drawable:
@@ -264,9 +265,9 @@ out_free_drawable:
void qxl_draw_dirty_fb(struct qxl_device *qdev,
struct drm_framebuffer *fb,
struct qxl_bo *bo,
- unsigned flags, unsigned color,
+ unsigned int flags, unsigned int color,
struct drm_clip_rect *clips,
- unsigned num_clips, int inc)
+ unsigned int num_clips, int inc)
{
/*
* TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
@@ -340,7 +341,6 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
if (ret)
goto out_release_backoff;
-
ret = qxl_image_init(qdev, release, dimage, surface_base,
left, top, width, height, depth, stride);
qxl_bo_kunmap(bo);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 8ff70a7281a7..13a0254b59a1 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -23,7 +23,6 @@
* Alon Levy
*/
-
#ifndef QXL_DRV_H
#define QXL_DRV_H
@@ -83,16 +82,16 @@ struct qxl_bo {
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
- unsigned pin_count;
+ unsigned int pin_count;
void *kptr;
int type;
/* Constant after initialization */
struct drm_gem_object gem_base;
- bool is_primary; /* is this now a primary surface */
- bool is_dumb;
+ unsigned int is_primary:1; /* is this now a primary surface */
+ unsigned int is_dumb:1;
struct qxl_bo *shadow;
- bool hw_surf_alloc;
+ unsigned int hw_surf_alloc:1;
struct qxl_surface surf;
uint32_t surface_id;
struct qxl_release *surf_create;
@@ -127,13 +126,9 @@ struct qxl_output {
#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
struct qxl_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
- bool mem_global_referenced;
struct ttm_bo_device bdev;
};
-
struct qxl_memslot {
uint8_t generation;
uint64_t start_phys_addr;
@@ -191,12 +186,12 @@ struct qxl_draw_fill {
*/
struct qxl_debugfs {
struct drm_info_list *files;
- unsigned num_files;
+ unsigned int num_files;
};
int qxl_debugfs_add_files(struct qxl_device *rdev,
struct drm_info_list *files,
- unsigned nfiles);
+ unsigned int nfiles);
int qxl_debugfs_fence_init(struct qxl_device *rdev);
struct qxl_device;
@@ -231,7 +226,7 @@ struct qxl_device {
struct qxl_ram_header *ram_header;
- bool primary_created;
+ unsigned int primary_created:1;
struct qxl_memslot *mem_slots;
uint8_t n_mem_slots;
@@ -254,7 +249,7 @@ struct qxl_device {
atomic_t irq_received_display;
atomic_t irq_received_cursor;
atomic_t irq_received_io_cmd;
- unsigned irq_received_error;
+ unsigned int irq_received_error;
wait_queue_head_t display_event;
wait_queue_head_t cursor_event;
wait_queue_head_t io_cmd_event;
@@ -262,7 +257,7 @@ struct qxl_device {
/* debugfs */
struct qxl_debugfs debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
- unsigned debugfs_count;
+ unsigned int debugfs_count;
struct mutex update_area_mutex;
@@ -372,7 +367,6 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-
/* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev);
@@ -398,7 +392,7 @@ void qxl_update_screen(struct qxl_device *qxl);
/* qxl io operations (qxl_cmd.c) */
void qxl_io_create_primary(struct qxl_device *qdev,
- unsigned offset,
+ unsigned int offset,
struct qxl_bo *bo);
void qxl_io_destroy_primary(struct qxl_device *qdev);
void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
@@ -449,9 +443,9 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
void qxl_draw_dirty_fb(struct qxl_device *qdev,
struct drm_framebuffer *fb,
struct qxl_bo *bo,
- unsigned flags, unsigned color,
+ unsigned int flags, unsigned int color,
struct drm_clip_rect *clips,
- unsigned num_clips, int inc);
+ unsigned int num_clips, int inc);
void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
@@ -496,7 +490,7 @@ bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj);
int qxl_debugfs_add_files(struct qxl_device *qdev,
struct drm_info_list *files,
- unsigned nfiles);
+ unsigned int nfiles);
int qxl_surface_id_alloc(struct qxl_device *qdev,
struct qxl_bo *surf);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index c666b89eed5d..e3765739c396 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -38,6 +38,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
int r;
struct qxl_surface surf;
uint32_t pitch, format;
+
pitch = args->width * ((args->bpp + 1) / 8);
args->size = pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
@@ -52,7 +53,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
default:
return -EINVAL;
}
-
+
surf.width = args->width;
surf.height = args->height;
surf.stride = pitch;
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 2294b7f14fdf..a819d24225d2 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -111,7 +111,7 @@ static int qxlfb_create_pinned_object(struct qxl_device *qdev,
qbo->surf.stride = mode_cmd->pitches[0];
qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
- ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
+ ret = qxl_bo_pin(qbo);
if (ret) {
goto out_unref;
}
@@ -134,9 +134,9 @@ out_unref:
*/
static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
- unsigned flags, unsigned color,
+ unsigned int flags, unsigned int color,
struct drm_clip_rect *clips,
- unsigned num_clips)
+ unsigned int num_clips)
{
struct qxl_device *qdev = fb->dev->dev_private;
struct fb_info *info = qdev->fb_helper.fbdev;
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index 7fbcc35e8ad3..43688ecdd8a0 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -136,6 +136,7 @@ qxl_image_init_helper(struct qxl_device *qdev,
int remain;
int page;
int size;
+
if (stride == linesize && chunk_stride == stride) {
remain = linesize * height;
page = 0;
@@ -162,7 +163,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
page++;
}
} else {
- unsigned page_base, page_offset, out_offset;
+ unsigned int page_base, page_offset, out_offset;
+
for (i = 0 ; i < height ; ++i) {
i_data = (void *)data + i * stride;
remain = linesize;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 6cc9f3367fa0..6e828158bcb0 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -85,6 +85,7 @@ static void
apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
{
void *reloc_page;
+
reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
info->src_bo,
@@ -189,6 +190,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
{
struct qxl_drawable *draw = fb_cmd;
+
draw->mm_time = qdev->rom->mm_clock;
}
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index e25c589d5f50..15238a413f9d 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -92,6 +92,7 @@ void qxl_reinit_memslots(struct qxl_device *qdev)
static void qxl_gc_work(struct work_struct *work)
{
struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
+
qxl_garbage_collect(qdev);
}
@@ -284,7 +285,6 @@ int qxl_device_init(struct qxl_device *qdev,
(unsigned long)qdev->surfaceram_base,
(unsigned long)qdev->surfaceram_size);
-
INIT_WORK(&qdev->gc_work, qxl_gc_work);
return 0;
@@ -313,10 +313,8 @@ error:
void qxl_device_fini(struct qxl_device *qdev)
{
- if (qdev->current_release_bo[0])
- qxl_bo_unref(&qdev->current_release_bo[0]);
- if (qdev->current_release_bo[1])
- qxl_bo_unref(&qdev->current_release_bo[1]);
+ qxl_bo_unref(&qdev->current_release_bo[0]);
+ qxl_bo_unref(&qdev->current_release_bo[1]);
flush_work(&qdev->gc_work);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 6a30196e9d6c..91f3bbc73ecc 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -54,7 +54,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
{
u32 c = 0;
u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
- unsigned i;
+ unsigned int i;
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
@@ -74,7 +74,6 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
}
}
-
int qxl_bo_create(struct qxl_device *qdev,
unsigned long size, bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
@@ -187,13 +186,9 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
- struct io_mapping *map;
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
- map = qdev->vram_mapping;
- else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
- map = qdev->surface_mapping;
- else
+ if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
+ (bo->tbo.mem.mem_type != TTM_PL_PRIV))
goto fallback;
io_mapping_unmap_atomic(pmap);
@@ -201,7 +196,7 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
ttm_mem_io_unlock(man);
- return ;
+ return;
fallback:
qxl_bo_kunmap(bo);
}
@@ -221,7 +216,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
return bo;
}
-static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+static int __qxl_bo_pin(struct qxl_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
struct drm_device *ddev = bo->gem_base.dev;
@@ -229,16 +224,12 @@ static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
if (bo->pin_count) {
bo->pin_count++;
- if (gpu_addr)
- *gpu_addr = qxl_bo_gpu_offset(bo);
return 0;
}
- qxl_ttm_placement_from_domain(bo, domain, true);
+ qxl_ttm_placement_from_domain(bo, bo->type, true);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) {
bo->pin_count = 1;
- if (gpu_addr != NULL)
- *gpu_addr = qxl_bo_gpu_offset(bo);
}
if (unlikely(r != 0))
dev_err(ddev->dev, "%p pin failed\n", bo);
@@ -266,13 +257,12 @@ static int __qxl_bo_unpin(struct qxl_bo *bo)
return r;
}
-
/*
* Reserve the BO before pinning the object. If the BO was reserved
* beforehand, use the internal version directly __qxl_bo_pin.
*
*/
-int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+int qxl_bo_pin(struct qxl_bo *bo)
{
int r;
@@ -280,7 +270,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
if (r)
return r;
- r = __qxl_bo_pin(bo, bo->type, NULL);
+ r = __qxl_bo_pin(bo);
qxl_bo_unreserve(bo);
return r;
}
@@ -335,6 +325,7 @@ void qxl_bo_fini(struct qxl_device *qdev)
int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
{
int ret;
+
if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
/* allocate a surface id for this surface now */
ret = qxl_surface_id_alloc(qdev, bo);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 0374fd93f4d6..255b914e2a7b 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -35,6 +35,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct drm_device *ddev = bo->gem_base.dev;
+
dev_err(ddev->dev, "%p reserve failed\n", bo);
}
return r;
@@ -71,6 +72,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct drm_device *ddev = bo->gem_base.dev;
+
dev_err(ddev->dev, "%p reserve failed for wait\n",
bo);
}
@@ -95,7 +97,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int pa
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
extern void qxl_bo_unref(struct qxl_bo **bo);
-extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
+extern int qxl_bo_pin(struct qxl_bo *bo);
extern int qxl_bo_unpin(struct qxl_bo *bo);
extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 9f029dda1f07..a55dece118b2 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -38,7 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
WARN_ONCE(1, "not implemented");
}
-
struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index e37f0097f744..30f85f0130cb 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -217,7 +217,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
qxl_bo_ref(bo);
entry->tv.bo = &bo->tbo;
- entry->tv.shared = false;
+ entry->tv.num_shared = 0;
list_add_tail(&entry->tv.head, &release->bos);
return 0;
}
@@ -234,7 +234,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
return ret;
}
- ret = reservation_object_reserve_shared(bo->tbo.resv);
+ ret = reservation_object_reserve_shared(bo->tbo.resv, 1);
if (ret)
return ret;
@@ -282,7 +282,6 @@ void qxl_release_backoff_reserve_list(struct qxl_release *release)
ttm_eu_backoff_reservation(&release->ticket, &release->bos);
}
-
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
enum qxl_surface_cmd_type surface_cmd_type,
struct qxl_release *create_rel,
@@ -428,8 +427,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
- struct ttm_bo_driver *driver;
- struct qxl_bo *qbo;
struct ttm_validate_buffer *entry;
struct qxl_device *qdev;
@@ -450,14 +447,12 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- driver = bdev->driver;
glob = bdev->glob;
spin_lock(&glob->lru_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
- qbo = to_qxl_bo(bo);
reservation_object_add_shared_fence(bo->resv, &release->base);
ttm_bo_add_to_lru(bo);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 86a1fb32f6db..886f61e94f24 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -46,62 +46,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
-static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int qxl_ttm_global_init(struct qxl_device *qdev)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- qdev->mman.mem_global_referenced = false;
- global_ref = &qdev->mman.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &qxl_ttm_mem_global_init;
- global_ref->release = &qxl_ttm_mem_global_release;
-
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- qdev->mman.bo_global_ref.mem_glob =
- qdev->mman.mem_global_ref.object;
- global_ref = &qdev->mman.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&qdev->mman.mem_global_ref);
- return r;
- }
-
- qdev->mman.mem_global_referenced = true;
- return 0;
-}
-
-static void qxl_ttm_global_fini(struct qxl_device *qdev)
-{
- if (qdev->mman.mem_global_referenced) {
- drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
- drm_global_item_unref(&qdev->mman.mem_global_ref);
- qdev->mman.mem_global_referenced = false;
- }
-}
-
static struct vm_operations_struct qxl_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops;
@@ -174,7 +118,7 @@ static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
return -EINVAL;
}
return 0;
@@ -331,7 +275,6 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
if (ret)
return ret;
-
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
qxl_move_null(bo, new_mem);
return 0;
@@ -373,12 +316,8 @@ int qxl_ttm_init(struct qxl_device *qdev)
int r;
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
- r = qxl_ttm_global_init(qdev);
- if (r)
- return r;
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&qdev->mman.bdev,
- qdev->mman.bo_global_ref.ref.object,
&qxl_bo_driver,
qdev->ddev.anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, 0);
@@ -401,11 +340,11 @@ int qxl_ttm_init(struct qxl_device *qdev)
return r;
}
DRM_INFO("qxl: %uM of VRAM memory size\n",
- (unsigned)qdev->vram_size / (1024 * 1024));
+ (unsigned int)qdev->vram_size / (1024 * 1024));
DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
- ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
+ ((unsigned int)num_io_pages * PAGE_SIZE) / (1024 * 1024));
DRM_INFO("qxl: %uM of Surface memory size\n",
- (unsigned)qdev->surfaceram_size / (1024 * 1024));
+ (unsigned int)qdev->surfaceram_size / (1024 * 1024));
return 0;
}
@@ -414,11 +353,9 @@ void qxl_ttm_fini(struct qxl_device *qdev)
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
ttm_bo_device_release(&qdev->mman.bdev);
- qxl_ttm_global_fini(qdev);
DRM_INFO("qxl: ttm finalized\n");
}
-
#define QXL_DEBUGFS_MEM_TYPES 2
#if defined(CONFIG_DEBUG_FS)
@@ -443,7 +380,7 @@ int qxl_ttm_debugfs_init(struct qxl_device *qdev)
#if defined(CONFIG_DEBUG_FS)
static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
- unsigned i;
+ unsigned int i;
for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
if (i == 0)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 21161aa8acbf..652126fd6dd4 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -814,7 +814,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
((idx_value >> 21) & 0xF));
return -EINVAL;
}
- /* Pass through. */
+ /* Fall through. */
case 6:
track->cb[i].cpp = 4;
break;
@@ -965,7 +965,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return -EINVAL;
}
/* The same rules apply as for DXT3/5. */
- /* Pass through. */
+ /* Fall through. */
case R300_TX_FORMAT_DXT3:
case R300_TX_FORMAT_DXT5:
track->textures[i].cpp = 1;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 45e1d4e60759..2318d9e3ed96 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -109,6 +109,7 @@ void r420_pipes_init(struct radeon_device *rdev)
default:
/* force to 1 pipe */
num_pipes = 1;
+ /* fall through */
case 1:
tmp = (0 << 1);
break;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1a6f6edb3515..32808e50be12 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -448,10 +448,7 @@ struct radeon_surface_reg {
* TTM.
*/
struct radeon_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
struct ttm_bo_device bdev;
- bool mem_global_referenced;
bool initialized;
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 1ae31dbc61c6..f43305329939 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -178,7 +178,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
- p->relocs[i].tv.shared = !r->write_domain;
+ p->relocs[i].tv.num_shared = !r->write_domain;
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
priority);
@@ -253,7 +253,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
resv = reloc->robj->tbo.resv;
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
- reloc->tv.shared);
+ reloc->tv.num_shared);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 27d8e7dd2d06..44617dec8183 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -552,7 +552,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
INIT_LIST_HEAD(&list);
tv.bo = &bo_va->bo->tbo;
- tv.shared = true;
+ tv.num_shared = 1;
list_add(&tv.head, &list);
vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 4278272e3191..3dae2c4dec71 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -421,24 +421,14 @@ static void radeon_legacy_write_tv_restarts(struct radeon_encoder *radeon_encode
static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
- struct radeon_crtc *radeon_crtc;
int restart;
unsigned int h_total, v_total, f_total;
int v_offset, h_offset;
u16 p1, p2, h_inc;
bool h_changed;
const struct radeon_tv_mode_constants *const_ptr;
- struct radeon_pll *pll;
-
- radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
- if (radeon_crtc->crtc_id == 1)
- pll = &rdev->clock.p2pll;
- else
- pll = &rdev->clock.p1pll;
const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
if (!const_ptr)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 92f6d4002eea..833e909706a9 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -314,11 +314,9 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
void radeon_bo_unref(struct radeon_bo **bo)
{
struct ttm_buffer_object *tbo;
- struct radeon_device *rdev;
if ((*bo) == NULL)
return;
- rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
ttm_bo_put(tbo);
*bo = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index cbb67e9ffb3a..9920a6fc11bf 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -60,65 +60,6 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
return rdev;
}
-
-/*
- * Global memory.
- */
-static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int radeon_ttm_global_init(struct radeon_device *rdev)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- rdev->mman.mem_global_referenced = false;
- global_ref = &rdev->mman.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &radeon_ttm_mem_global_init;
- global_ref->release = &radeon_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- rdev->mman.bo_global_ref.mem_glob =
- rdev->mman.mem_global_ref.object;
- global_ref = &rdev->mman.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&rdev->mman.mem_global_ref);
- return r;
- }
-
- rdev->mman.mem_global_referenced = true;
- return 0;
-}
-
-static void radeon_ttm_global_fini(struct radeon_device *rdev)
-{
- if (rdev->mman.mem_global_referenced) {
- drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
- drm_global_item_unref(&rdev->mman.mem_global_ref);
- rdev->mman.mem_global_referenced = false;
- }
-}
-
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -847,13 +788,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
{
int r;
- r = radeon_ttm_global_init(rdev);
- if (r) {
- return r;
- }
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev,
- rdev->mman.bo_global_ref.ref.object,
&radeon_bo_driver,
rdev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -925,7 +861,6 @@ void radeon_ttm_fini(struct radeon_device *rdev)
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
ttm_bo_device_release(&rdev->mman.bdev);
radeon_gart_fini(rdev);
- radeon_ttm_global_fini(rdev);
rdev->mman.initialized = false;
DRM_INFO("radeon: ttm finalized\n");
}
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 7f1a9c787bd1..0d374211661c 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -142,7 +142,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
- list[0].tv.shared = true;
+ list[0].tv.num_shared = 1;
list[0].tiling_flags = 0;
list_add(&list[0].tv.head, head);
@@ -154,7 +154,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
- list[idx].tv.shared = true;
+ list[idx].tv.num_shared = 1;
list[idx].tiling_flags = 0;
list_add(&list[idx++].tv.head, head);
}
@@ -831,7 +831,7 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
int r;
radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
- r = reservation_object_reserve_shared(pt->tbo.resv);
+ r = reservation_object_reserve_shared(pt->tbo.resv, 1);
if (r)
return r;
@@ -946,7 +946,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
if (mem) {
- addr = mem->start << PAGE_SHIFT;
+ addr = (u64)mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) {
bo_va->flags |= RADEON_VM_PAGE_VALID;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 17741843cf51..90dacab67be5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -226,9 +226,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
* system clock, and have no internal clock divider.
*/
- if (WARN_ON(!rcrtc->extclock))
- return;
-
/*
* The H3 ES1.x exhibits dot clock duty cycle stability issues.
* We can work around them by configuring the DPLL to twice the
@@ -701,7 +698,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
* CRTC will be put later in .atomic_disable().
*
* If a mode set is not in progress the CRTC is enabled, and the
- * following get call will be a no-op. There is thus no need to belance
+ * following get call will be a no-op. There is thus no need to balance
* it in .atomic_flush() either.
*/
rcar_du_crtc_get(rcrtc);
@@ -738,10 +735,22 @@ enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_device *rcdu = rcrtc->group->dev;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ unsigned int vbp;
if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
return MODE_NO_INTERLACE;
+ /*
+ * The hardware requires a minimum combined horizontal sync and back
+ * porch of 20 pixels and a minimum vertical back porch of 3 lines.
+ */
+ if (mode->htotal - mode->hsync_start < 20)
+ return MODE_HBLANK_NARROW;
+
+ vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1);
+ if (vbp < 3)
+ return MODE_VBLANK_NARROW;
+
return MODE_OK;
}
@@ -1002,7 +1011,7 @@ unlock:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- return 0;
+ return ret;
}
static const struct drm_crtc_funcs crtc_funcs_gen2 = {
@@ -1113,9 +1122,16 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
clk = devm_clk_get(rcdu->dev, clk_name);
if (!IS_ERR(clk)) {
rcrtc->extclock = clk;
- } else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) {
- dev_info(rcdu->dev, "can't get external clock %u\n", hwindex);
+ } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
+ } else if (rcdu->info->dpll_mask & BIT(hwindex)) {
+ /*
+ * DU channels that have a display PLL can't use the internal
+ * system clock and thus require an external clock.
+ */
+ ret = PTR_ERR(clk);
+ dev_err(rcdu->dev, "can't get dclkin.%u: %d\n", hwindex, ret);
+ return ret;
}
init_waitqueue_head(&rcrtc->flip_wait);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 084f58df4a8c..f50a3b1864bb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -21,6 +21,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "rcar_du_drv.h"
@@ -41,7 +42,7 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
- * R8A7743 has one RGB output and one LVDS output
+ * R8A774[34] has one RGB output and one LVDS output
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(1) | BIT(0),
@@ -77,6 +78,33 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
},
};
+static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
+ .gen = 2,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_INTERLACED
+ | RCAR_DU_FEATURE_TVM_SYNC,
+ .channels_mask = BIT(1) | BIT(0),
+ .routes = {
+ /*
+ * R8A77470 has two RGB outputs, one LVDS output, and
+ * one (currently unsupported) analog video output
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(0),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_DPAD1] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0) | BIT(1),
+ .port = 2,
+ },
+ },
+};
+
static const struct rcar_du_device_info rcar_du_r8a7779_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_INTERLACED
@@ -341,7 +369,9 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
+ { .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info },
{ .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
+ { .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
@@ -363,19 +393,11 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
* DRM operations
*/
-static void rcar_du_lastclose(struct drm_device *dev)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
-
- drm_fbdev_cma_restore_mode(rcdu->fbdev);
-}
-
DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops);
static struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
| DRIVER_ATOMIC,
- .lastclose = rcar_du_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -404,32 +426,15 @@ static struct drm_driver rcar_du_driver = {
static int rcar_du_pm_suspend(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
- struct drm_atomic_state *state;
- drm_kms_helper_poll_disable(rcdu->ddev);
- drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, true);
-
- state = drm_atomic_helper_suspend(rcdu->ddev);
- if (IS_ERR(state)) {
- drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, false);
- drm_kms_helper_poll_enable(rcdu->ddev);
- return PTR_ERR(state);
- }
-
- rcdu->suspend_state = state;
-
- return 0;
+ return drm_mode_config_helper_suspend(rcdu->ddev);
}
static int rcar_du_pm_resume(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
- drm_atomic_helper_resume(rcdu->ddev, rcdu->suspend_state);
- drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, false);
- drm_kms_helper_poll_enable(rcdu->ddev);
-
- return 0;
+ return drm_mode_config_helper_resume(rcdu->ddev);
}
#endif
@@ -448,13 +453,10 @@ static int rcar_du_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
- if (rcdu->fbdev)
- drm_fbdev_cma_fini(rcdu->fbdev);
-
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return 0;
}
@@ -510,6 +512,8 @@ static int rcar_du_probe(struct platform_device *pdev)
DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
+ drm_fbdev_generic_setup(ddev, 32);
+
return 0;
error:
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 143c037e2c0f..a68da79b424e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -20,7 +20,6 @@
struct clk;
struct device;
struct drm_device;
-struct drm_fbdev_cma;
struct rcar_du_device;
#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */
@@ -78,8 +77,6 @@ struct rcar_du_device {
void __iomem *mmio;
struct drm_device *ddev;
- struct drm_fbdev_cma *fbdev;
- struct drm_atomic_state *suspend_state;
struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
unsigned int num_crtcs;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 4ebd61ecbee1..9c7007d45408 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -255,13 +255,6 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
-static void rcar_du_output_poll_changed(struct drm_device *dev)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
-
- drm_fbdev_cma_hotplug_event(rcdu->fbdev);
-}
-
/* -----------------------------------------------------------------------------
* Atomic Check and Update
*/
@@ -308,7 +301,6 @@ static const struct drm_mode_config_helper_funcs rcar_du_mode_config_helper = {
static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
.fb_create = rcar_du_fb_create,
- .output_poll_changed = rcar_du_output_poll_changed,
.atomic_check = rcar_du_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -543,7 +535,6 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
struct drm_device *dev = rcdu->ddev;
struct drm_encoder *encoder;
- struct drm_fbdev_cma *fbdev;
unsigned int dpad0_sources;
unsigned int num_encoders;
unsigned int num_groups;
@@ -582,7 +573,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
* Initialize vertical blanking interrupts handling. Start with vblank
* disabled for all CRTCs.
*/
- ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
+ ret = drm_vblank_init(dev, rcdu->num_crtcs);
if (ret < 0)
return ret;
@@ -682,17 +673,5 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
drm_kms_helper_poll_init(dev);
- if (dev->mode_config.num_connector) {
- fbdev = drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_connector);
- if (IS_ERR(fbdev))
- return PTR_ERR(fbdev);
-
- rcdu->fbdev = fbdev;
- } else {
- dev_info(rcdu->dev,
- "no connector found, disabling fbdev emulation\n");
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 9e07758a755c..39d5ae3fdf72 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -783,13 +783,14 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
drm_plane_helper_add(&plane->plane,
&rcar_du_plane_helper_funcs);
+ drm_plane_create_alpha_property(&plane->plane);
+
if (type == DRM_PLANE_TYPE_PRIMARY)
continue;
drm_object_attach_property(&plane->plane.base,
rcdu->props.colorkey,
RCAR_DU_COLORKEY_NONE);
- drm_plane_create_alpha_property(&plane->plane);
drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 173d7ad0b991..534a128a869d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -790,6 +790,7 @@ static const struct of_device_id rcar_lvds_of_table[] = {
{ .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a7796-lvds", .data = &rcar_lvds_gen3_info },
+ { .compatible = "renesas,r8a77965-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a77970-lvds", .data = &rcar_lvds_r8a77970_info },
{ .compatible = "renesas,r8a77980-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a77990-lvds", .data = &rcar_lvds_r8a77990_info },
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 26438d45732b..1e75196f9659 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -7,7 +7,7 @@ config DRM_ROCKCHIP
select VIDEOMODE_HELPERS
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
- select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
+ select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
select DRM_RGB if ROCKCHIP_RGB
select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
help
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 868263ff0302..f6fc9d5dd0ad 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -11,7 +11,7 @@ rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
-rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
+rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 3105965fc260..5a485489a1e2 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -147,7 +147,7 @@ static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
}
static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
- u8 *buff, u8 buff_size)
+ u8 *buff, u16 buff_size)
{
u32 i;
int ret;
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
new file mode 100644
index 000000000000..7ee359bcee62
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -0,0 +1,1076 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:
+ * Chris Zhong <zyw@rock-chips.com>
+ * Nickey Yang <nickey.yang@rock-chips.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/bridge/dw_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <video/mipi_display.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+#define DSI_PHY_RSTZ 0xa0
+#define PHY_DISFORCEPLL 0
+#define PHY_ENFORCEPLL BIT(3)
+#define PHY_DISABLECLK 0
+#define PHY_ENABLECLK BIT(2)
+#define PHY_RSTZ 0
+#define PHY_UNRSTZ BIT(1)
+#define PHY_SHUTDOWNZ 0
+#define PHY_UNSHUTDOWNZ BIT(0)
+
+#define DSI_PHY_IF_CFG 0xa4
+#define N_LANES(n) ((((n) - 1) & 0x3) << 0)
+#define PHY_STOP_WAIT_TIME(cycle) (((cycle) & 0xff) << 8)
+
+#define DSI_PHY_STATUS 0xb0
+#define LOCK BIT(0)
+#define STOP_STATE_CLK_LANE BIT(2)
+
+#define DSI_PHY_TST_CTRL0 0xb4
+#define PHY_TESTCLK BIT(1)
+#define PHY_UNTESTCLK 0
+#define PHY_TESTCLR BIT(0)
+#define PHY_UNTESTCLR 0
+
+#define DSI_PHY_TST_CTRL1 0xb8
+#define PHY_TESTEN BIT(16)
+#define PHY_UNTESTEN 0
+#define PHY_TESTDOUT(n) (((n) & 0xff) << 8)
+#define PHY_TESTDIN(n) (((n) & 0xff) << 0)
+
+#define DSI_INT_ST0 0xbc
+#define DSI_INT_ST1 0xc0
+#define DSI_INT_MSK0 0xc4
+#define DSI_INT_MSK1 0xc8
+
+#define PHY_STATUS_TIMEOUT_US 10000
+#define CMD_PKT_STATUS_TIMEOUT_US 20000
+
+#define BYPASS_VCO_RANGE BIT(7)
+#define VCO_RANGE_CON_SEL(val) (((val) & 0x7) << 3)
+#define VCO_IN_CAP_CON_DEFAULT (0x0 << 1)
+#define VCO_IN_CAP_CON_LOW (0x1 << 1)
+#define VCO_IN_CAP_CON_HIGH (0x2 << 1)
+#define REF_BIAS_CUR_SEL BIT(0)
+
+#define CP_CURRENT_3UA 0x1
+#define CP_CURRENT_4_5UA 0x2
+#define CP_CURRENT_7_5UA 0x6
+#define CP_CURRENT_6UA 0x9
+#define CP_CURRENT_12UA 0xb
+#define CP_CURRENT_SEL(val) ((val) & 0xf)
+#define CP_PROGRAM_EN BIT(7)
+
+#define LPF_RESISTORS_15_5KOHM 0x1
+#define LPF_RESISTORS_13KOHM 0x2
+#define LPF_RESISTORS_11_5KOHM 0x4
+#define LPF_RESISTORS_10_5KOHM 0x8
+#define LPF_RESISTORS_8KOHM 0x10
+#define LPF_PROGRAM_EN BIT(6)
+#define LPF_RESISTORS_SEL(val) ((val) & 0x3f)
+
+#define HSFREQRANGE_SEL(val) (((val) & 0x3f) << 1)
+
+#define INPUT_DIVIDER(val) (((val) - 1) & 0x7f)
+#define LOW_PROGRAM_EN 0
+#define HIGH_PROGRAM_EN BIT(7)
+#define LOOP_DIV_LOW_SEL(val) (((val) - 1) & 0x1f)
+#define LOOP_DIV_HIGH_SEL(val) ((((val) - 1) >> 5) & 0xf)
+#define PLL_LOOP_DIV_EN BIT(5)
+#define PLL_INPUT_DIV_EN BIT(4)
+
+#define POWER_CONTROL BIT(6)
+#define INTERNAL_REG_CURRENT BIT(3)
+#define BIAS_BLOCK_ON BIT(2)
+#define BANDGAP_ON BIT(0)
+
+#define TER_RESISTOR_HIGH BIT(7)
+#define TER_RESISTOR_LOW 0
+#define LEVEL_SHIFTERS_ON BIT(6)
+#define TER_CAL_DONE BIT(5)
+#define SETRD_MAX (0x7 << 2)
+#define POWER_MANAGE BIT(1)
+#define TER_RESISTORS_ON BIT(0)
+
+#define BIASEXTR_SEL(val) ((val) & 0x7)
+#define BANDGAP_SEL(val) ((val) & 0x7)
+#define TLP_PROGRAM_EN BIT(7)
+#define THS_PRE_PROGRAM_EN BIT(7)
+#define THS_ZERO_PROGRAM_EN BIT(6)
+
+#define PLL_BIAS_CUR_SEL_CAP_VCO_CONTROL 0x10
+#define PLL_CP_CONTROL_PLL_LOCK_BYPASS 0x11
+#define PLL_LPF_AND_CP_CONTROL 0x12
+#define PLL_INPUT_DIVIDER_RATIO 0x17
+#define PLL_LOOP_DIVIDER_RATIO 0x18
+#define PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL 0x19
+#define BANDGAP_AND_BIAS_CONTROL 0x20
+#define TERMINATION_RESISTER_CONTROL 0x21
+#define AFE_BIAS_BANDGAP_ANALOG_PROGRAMMABILITY 0x22
+#define HS_RX_CONTROL_OF_LANE_0 0x44
+#define HS_TX_CLOCK_LANE_REQUEST_STATE_TIME_CONTROL 0x60
+#define HS_TX_CLOCK_LANE_PREPARE_STATE_TIME_CONTROL 0x61
+#define HS_TX_CLOCK_LANE_HS_ZERO_STATE_TIME_CONTROL 0x62
+#define HS_TX_CLOCK_LANE_TRAIL_STATE_TIME_CONTROL 0x63
+#define HS_TX_CLOCK_LANE_EXIT_STATE_TIME_CONTROL 0x64
+#define HS_TX_CLOCK_LANE_POST_TIME_CONTROL 0x65
+#define HS_TX_DATA_LANE_REQUEST_STATE_TIME_CONTROL 0x70
+#define HS_TX_DATA_LANE_PREPARE_STATE_TIME_CONTROL 0x71
+#define HS_TX_DATA_LANE_HS_ZERO_STATE_TIME_CONTROL 0x72
+#define HS_TX_DATA_LANE_TRAIL_STATE_TIME_CONTROL 0x73
+#define HS_TX_DATA_LANE_EXIT_STATE_TIME_CONTROL 0x74
+
+#define DW_MIPI_NEEDS_PHY_CFG_CLK BIT(0)
+#define DW_MIPI_NEEDS_GRF_CLK BIT(1)
+
+#define RK3288_GRF_SOC_CON6 0x025c
+#define RK3288_DSI0_LCDC_SEL BIT(6)
+#define RK3288_DSI1_LCDC_SEL BIT(9)
+
+#define RK3399_GRF_SOC_CON20 0x6250
+#define RK3399_DSI0_LCDC_SEL BIT(0)
+#define RK3399_DSI1_LCDC_SEL BIT(4)
+
+#define RK3399_GRF_SOC_CON22 0x6258
+#define RK3399_DSI0_TURNREQUEST (0xf << 12)
+#define RK3399_DSI0_TURNDISABLE (0xf << 8)
+#define RK3399_DSI0_FORCETXSTOPMODE (0xf << 4)
+#define RK3399_DSI0_FORCERXMODE (0xf << 0)
+
+#define RK3399_GRF_SOC_CON23 0x625c
+#define RK3399_DSI1_TURNDISABLE (0xf << 12)
+#define RK3399_DSI1_FORCETXSTOPMODE (0xf << 8)
+#define RK3399_DSI1_FORCERXMODE (0xf << 4)
+#define RK3399_DSI1_ENABLE (0xf << 0)
+
+#define RK3399_GRF_SOC_CON24 0x6260
+#define RK3399_TXRX_MASTERSLAVEZ BIT(7)
+#define RK3399_TXRX_ENABLECLK BIT(6)
+#define RK3399_TXRX_BASEDIR BIT(5)
+
+#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
+
+#define to_dsi(nm) container_of(nm, struct dw_mipi_dsi_rockchip, nm)
+
+enum {
+ BANDGAP_97_07,
+ BANDGAP_98_05,
+ BANDGAP_99_02,
+ BANDGAP_100_00,
+ BANDGAP_93_17,
+ BANDGAP_94_15,
+ BANDGAP_95_12,
+ BANDGAP_96_10,
+};
+
+enum {
+ BIASEXTR_87_1,
+ BIASEXTR_91_5,
+ BIASEXTR_95_9,
+ BIASEXTR_100,
+ BIASEXTR_105_94,
+ BIASEXTR_111_88,
+ BIASEXTR_118_8,
+ BIASEXTR_127_7,
+};
+
+struct rockchip_dw_dsi_chip_data {
+ u32 reg;
+
+ u32 lcdsel_grf_reg;
+ u32 lcdsel_big;
+ u32 lcdsel_lit;
+
+ u32 enable_grf_reg;
+ u32 enable;
+
+ u32 lanecfg1_grf_reg;
+ u32 lanecfg1;
+ u32 lanecfg2_grf_reg;
+ u32 lanecfg2;
+
+ unsigned int flags;
+ unsigned int max_data_lanes;
+};
+
+struct dw_mipi_dsi_rockchip {
+ struct device *dev;
+ struct drm_encoder encoder;
+ void __iomem *base;
+
+ struct regmap *grf_regmap;
+ struct clk *pllref_clk;
+ struct clk *grf_clk;
+ struct clk *phy_cfg_clk;
+
+ /* dual-channel */
+ bool is_slave;
+ struct dw_mipi_dsi_rockchip *slave;
+
+ unsigned int lane_mbps; /* per lane */
+ u16 input_div;
+ u16 feedback_div;
+ u32 format;
+
+ struct dw_mipi_dsi *dmd;
+ const struct rockchip_dw_dsi_chip_data *cdata;
+ struct dw_mipi_dsi_plat_data pdata;
+ int devcnt;
+};
+
+struct dphy_pll_parameter_map {
+ unsigned int max_mbps;
+ u8 hsfreqrange;
+ u8 icpctrl;
+ u8 lpfctrl;
+};
+
+/* The table is based on 27MHz DPHY pll reference clock. */
+static const struct dphy_pll_parameter_map dppa_map[] = {
+ { 89, 0x00, CP_CURRENT_3UA, LPF_RESISTORS_13KOHM },
+ { 99, 0x10, CP_CURRENT_3UA, LPF_RESISTORS_13KOHM },
+ { 109, 0x20, CP_CURRENT_3UA, LPF_RESISTORS_13KOHM },
+ { 129, 0x01, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+ { 139, 0x11, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+ { 149, 0x21, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+ { 169, 0x02, CP_CURRENT_6UA, LPF_RESISTORS_13KOHM },
+ { 179, 0x12, CP_CURRENT_6UA, LPF_RESISTORS_13KOHM },
+ { 199, 0x22, CP_CURRENT_6UA, LPF_RESISTORS_13KOHM },
+ { 219, 0x03, CP_CURRENT_4_5UA, LPF_RESISTORS_13KOHM },
+ { 239, 0x13, CP_CURRENT_4_5UA, LPF_RESISTORS_13KOHM },
+ { 249, 0x23, CP_CURRENT_4_5UA, LPF_RESISTORS_13KOHM },
+ { 269, 0x04, CP_CURRENT_6UA, LPF_RESISTORS_11_5KOHM },
+ { 299, 0x14, CP_CURRENT_6UA, LPF_RESISTORS_11_5KOHM },
+ { 329, 0x05, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+ { 359, 0x15, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+ { 399, 0x25, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+ { 449, 0x06, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 499, 0x16, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 549, 0x07, CP_CURRENT_7_5UA, LPF_RESISTORS_10_5KOHM },
+ { 599, 0x17, CP_CURRENT_7_5UA, LPF_RESISTORS_10_5KOHM },
+ { 649, 0x08, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 699, 0x18, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 749, 0x09, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 799, 0x19, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 849, 0x29, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 899, 0x39, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 949, 0x0a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
+ { 999, 0x1a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
+ {1049, 0x2a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
+ {1099, 0x3a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
+ {1149, 0x0b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1199, 0x1b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1249, 0x2b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1299, 0x3b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1349, 0x0c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1399, 0x1c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1449, 0x2c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1500, 0x3c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM }
+};
+
+static int max_mbps_to_parameter(unsigned int max_mbps)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dppa_map); i++)
+ if (dppa_map[i].max_mbps >= max_mbps)
+ return i;
+
+ return -EINVAL;
+}
+
+static inline void dsi_write(struct dw_mipi_dsi_rockchip *dsi, u32 reg, u32 val)
+{
+ writel(val, dsi->base + reg);
+}
+
+static inline u32 dsi_read(struct dw_mipi_dsi_rockchip *dsi, u32 reg)
+{
+ return readl(dsi->base + reg);
+}
+
+static inline void dsi_set(struct dw_mipi_dsi_rockchip *dsi, u32 reg, u32 mask)
+{
+ dsi_write(dsi, reg, dsi_read(dsi, reg) | mask);
+}
+
+static inline void dsi_update_bits(struct dw_mipi_dsi_rockchip *dsi, u32 reg,
+ u32 mask, u32 val)
+{
+ dsi_write(dsi, reg, (dsi_read(dsi, reg) & ~mask) | val);
+}
+
+static void dw_mipi_dsi_phy_write(struct dw_mipi_dsi_rockchip *dsi,
+ u8 test_code,
+ u8 test_data)
+{
+ /*
+ * With the falling edge on TESTCLK, the TESTDIN[7:0] signal content
+ * is latched internally as the current test code. Test data is
+ * programmed internally by rising edge on TESTCLK.
+ */
+ dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
+
+ dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_TESTEN | PHY_TESTDOUT(0) |
+ PHY_TESTDIN(test_code));
+
+ dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLK | PHY_UNTESTCLR);
+
+ dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_UNTESTEN | PHY_TESTDOUT(0) |
+ PHY_TESTDIN(test_data));
+
+ dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
+}
+
+/**
+ * ns2bc - Nanoseconds to byte clock cycles
+ */
+static inline unsigned int ns2bc(struct dw_mipi_dsi_rockchip *dsi, int ns)
+{
+ return DIV_ROUND_UP(ns * dsi->lane_mbps / 8, 1000);
+}
+
+/**
+ * ns2ui - Nanoseconds to UI time periods
+ */
+static inline unsigned int ns2ui(struct dw_mipi_dsi_rockchip *dsi, int ns)
+{
+ return DIV_ROUND_UP(ns * dsi->lane_mbps, 1000);
+}
+
+static int dw_mipi_dsi_phy_init(void *priv_data)
+{
+ struct dw_mipi_dsi_rockchip *dsi = priv_data;
+ int ret, i, vco;
+
+ /*
+ * Get vco from frequency(lane_mbps)
+ * vco frequency table
+ * 000 - between 80 and 200 MHz
+ * 001 - between 200 and 300 MHz
+ * 010 - between 300 and 500 MHz
+ * 011 - between 500 and 700 MHz
+ * 100 - between 700 and 900 MHz
+ * 101 - between 900 and 1100 MHz
+ * 110 - between 1100 and 1300 MHz
+ * 111 - between 1300 and 1500 MHz
+ */
+ vco = (dsi->lane_mbps < 200) ? 0 : (dsi->lane_mbps + 100) / 200;
+
+ i = max_mbps_to_parameter(dsi->lane_mbps);
+ if (i < 0) {
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to get parameter for %dmbps clock\n",
+ dsi->lane_mbps);
+ return i;
+ }
+
+ ret = clk_prepare_enable(dsi->phy_cfg_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable phy_cfg_clk\n");
+ return ret;
+ }
+
+ dw_mipi_dsi_phy_write(dsi, PLL_BIAS_CUR_SEL_CAP_VCO_CONTROL,
+ BYPASS_VCO_RANGE |
+ VCO_RANGE_CON_SEL(vco) |
+ VCO_IN_CAP_CON_LOW |
+ REF_BIAS_CUR_SEL);
+
+ dw_mipi_dsi_phy_write(dsi, PLL_CP_CONTROL_PLL_LOCK_BYPASS,
+ CP_CURRENT_SEL(dppa_map[i].icpctrl));
+ dw_mipi_dsi_phy_write(dsi, PLL_LPF_AND_CP_CONTROL,
+ CP_PROGRAM_EN | LPF_PROGRAM_EN |
+ LPF_RESISTORS_SEL(dppa_map[i].lpfctrl));
+
+ dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_0,
+ HSFREQRANGE_SEL(dppa_map[i].hsfreqrange));
+
+ dw_mipi_dsi_phy_write(dsi, PLL_INPUT_DIVIDER_RATIO,
+ INPUT_DIVIDER(dsi->input_div));
+ dw_mipi_dsi_phy_write(dsi, PLL_LOOP_DIVIDER_RATIO,
+ LOOP_DIV_LOW_SEL(dsi->feedback_div) |
+ LOW_PROGRAM_EN);
+ /*
+ * We need set PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL immediately
+ * to make the configured LSB effective according to IP simulation
+ * and lab test results.
+ * Only in this way can we get correct mipi phy pll frequency.
+ */
+ dw_mipi_dsi_phy_write(dsi, PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL,
+ PLL_LOOP_DIV_EN | PLL_INPUT_DIV_EN);
+ dw_mipi_dsi_phy_write(dsi, PLL_LOOP_DIVIDER_RATIO,
+ LOOP_DIV_HIGH_SEL(dsi->feedback_div) |
+ HIGH_PROGRAM_EN);
+ dw_mipi_dsi_phy_write(dsi, PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL,
+ PLL_LOOP_DIV_EN | PLL_INPUT_DIV_EN);
+
+ dw_mipi_dsi_phy_write(dsi, AFE_BIAS_BANDGAP_ANALOG_PROGRAMMABILITY,
+ LOW_PROGRAM_EN | BIASEXTR_SEL(BIASEXTR_127_7));
+ dw_mipi_dsi_phy_write(dsi, AFE_BIAS_BANDGAP_ANALOG_PROGRAMMABILITY,
+ HIGH_PROGRAM_EN | BANDGAP_SEL(BANDGAP_96_10));
+
+ dw_mipi_dsi_phy_write(dsi, BANDGAP_AND_BIAS_CONTROL,
+ POWER_CONTROL | INTERNAL_REG_CURRENT |
+ BIAS_BLOCK_ON | BANDGAP_ON);
+
+ dw_mipi_dsi_phy_write(dsi, TERMINATION_RESISTER_CONTROL,
+ TER_RESISTOR_LOW | TER_CAL_DONE |
+ SETRD_MAX | TER_RESISTORS_ON);
+ dw_mipi_dsi_phy_write(dsi, TERMINATION_RESISTER_CONTROL,
+ TER_RESISTOR_HIGH | LEVEL_SHIFTERS_ON |
+ SETRD_MAX | POWER_MANAGE |
+ TER_RESISTORS_ON);
+
+ dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_REQUEST_STATE_TIME_CONTROL,
+ TLP_PROGRAM_EN | ns2bc(dsi, 500));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_PREPARE_STATE_TIME_CONTROL,
+ THS_PRE_PROGRAM_EN | ns2ui(dsi, 40));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_HS_ZERO_STATE_TIME_CONTROL,
+ THS_ZERO_PROGRAM_EN | ns2bc(dsi, 300));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_TRAIL_STATE_TIME_CONTROL,
+ THS_PRE_PROGRAM_EN | ns2ui(dsi, 100));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_EXIT_STATE_TIME_CONTROL,
+ BIT(5) | ns2bc(dsi, 100));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_POST_TIME_CONTROL,
+ BIT(5) | (ns2bc(dsi, 60) + 7));
+
+ dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_REQUEST_STATE_TIME_CONTROL,
+ TLP_PROGRAM_EN | ns2bc(dsi, 500));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_PREPARE_STATE_TIME_CONTROL,
+ THS_PRE_PROGRAM_EN | (ns2ui(dsi, 50) + 20));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_HS_ZERO_STATE_TIME_CONTROL,
+ THS_ZERO_PROGRAM_EN | (ns2bc(dsi, 140) + 2));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_TRAIL_STATE_TIME_CONTROL,
+ THS_PRE_PROGRAM_EN | (ns2ui(dsi, 60) + 8));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_EXIT_STATE_TIME_CONTROL,
+ BIT(5) | ns2bc(dsi, 100));
+
+ clk_disable_unprepare(dsi->phy_cfg_clk);
+
+ return ret;
+}
+
+static int
+dw_mipi_dsi_get_lane_mbps(void *priv_data, struct drm_display_mode *mode,
+ unsigned long mode_flags, u32 lanes, u32 format,
+ unsigned int *lane_mbps)
+{
+ struct dw_mipi_dsi_rockchip *dsi = priv_data;
+ int bpp;
+ unsigned long mpclk, tmp;
+ unsigned int target_mbps = 1000;
+ unsigned int max_mbps = dppa_map[ARRAY_SIZE(dppa_map) - 1].max_mbps;
+ unsigned long best_freq = 0;
+ unsigned long fvco_min, fvco_max, fin, fout;
+ unsigned int min_prediv, max_prediv;
+ unsigned int _prediv, uninitialized_var(best_prediv);
+ unsigned long _fbdiv, uninitialized_var(best_fbdiv);
+ unsigned long min_delta = ULONG_MAX;
+
+ dsi->format = format;
+ bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+ if (bpp < 0) {
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to get bpp for pixel format %d\n",
+ dsi->format);
+ return bpp;
+ }
+
+ mpclk = DIV_ROUND_UP(mode->clock, MSEC_PER_SEC);
+ if (mpclk) {
+ /* take 1 / 0.8, since mbps must big than bandwidth of RGB */
+ tmp = mpclk * (bpp / lanes) * 10 / 8;
+ if (tmp < max_mbps)
+ target_mbps = tmp;
+ else
+ DRM_DEV_ERROR(dsi->dev,
+ "DPHY clock frequency is out of range\n");
+ }
+
+ fin = clk_get_rate(dsi->pllref_clk);
+ fout = target_mbps * USEC_PER_SEC;
+
+ /* constraint: 5Mhz <= Fref / N <= 40MHz */
+ min_prediv = DIV_ROUND_UP(fin, 40 * USEC_PER_SEC);
+ max_prediv = fin / (5 * USEC_PER_SEC);
+
+ /* constraint: 80MHz <= Fvco <= 1500Mhz */
+ fvco_min = 80 * USEC_PER_SEC;
+ fvco_max = 1500 * USEC_PER_SEC;
+
+ for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
+ u64 tmp;
+ u32 delta;
+ /* Fvco = Fref * M / N */
+ tmp = (u64)fout * _prediv;
+ do_div(tmp, fin);
+ _fbdiv = tmp;
+ /*
+ * Due to the use of a "by 2 pre-scaler," the range of the
+ * feedback multiplication value M is limited to even division
+ * numbers, and m must be greater than 6, not bigger than 512.
+ */
+ if (_fbdiv < 6 || _fbdiv > 512)
+ continue;
+
+ _fbdiv += _fbdiv % 2;
+
+ tmp = (u64)_fbdiv * fin;
+ do_div(tmp, _prediv);
+ if (tmp < fvco_min || tmp > fvco_max)
+ continue;
+
+ delta = abs(fout - tmp);
+ if (delta < min_delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ min_delta = delta;
+ best_freq = tmp;
+ }
+ }
+
+ if (best_freq) {
+ dsi->lane_mbps = DIV_ROUND_UP(best_freq, USEC_PER_SEC);
+ *lane_mbps = dsi->lane_mbps;
+ dsi->input_div = best_prediv;
+ dsi->feedback_div = best_fbdiv;
+ } else {
+ DRM_DEV_ERROR(dsi->dev, "Can not find best_freq for DPHY\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_rockchip_phy_ops = {
+ .init = dw_mipi_dsi_phy_init,
+ .get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
+};
+
+static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
+ int mux)
+{
+ if (dsi->cdata->lcdsel_grf_reg)
+ regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
+ mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
+
+ if (dsi->cdata->lanecfg1_grf_reg)
+ regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg1_grf_reg,
+ dsi->cdata->lanecfg1);
+
+ if (dsi->cdata->lanecfg2_grf_reg)
+ regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg2_grf_reg,
+ dsi->cdata->lanecfg2);
+
+ if (dsi->cdata->enable_grf_reg)
+ regmap_write(dsi->grf_regmap, dsi->cdata->enable_grf_reg,
+ dsi->cdata->enable);
+}
+
+static int
+dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB888:
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ s->output_mode = ROCKCHIP_OUT_MODE_P666;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ s->output_mode = ROCKCHIP_OUT_MODE_P565;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ s->output_type = DRM_MODE_CONNECTOR_DSI;
+ if (dsi->slave)
+ s->output_flags = ROCKCHIP_OUTPUT_DSI_DUAL;
+
+ return 0;
+}
+
+static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
+ int ret, mux;
+
+ mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node,
+ &dsi->encoder);
+ if (mux < 0)
+ return;
+
+ pm_runtime_get_sync(dsi->dev);
+ if (dsi->slave)
+ pm_runtime_get_sync(dsi->slave->dev);
+
+ /*
+ * For the RK3399, the clk of grf must be enabled before writing grf
+ * register. And for RK3288 or other soc, this grf_clk must be NULL,
+ * the clk_prepare_enable return true directly.
+ */
+ ret = clk_prepare_enable(dsi->grf_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
+ return;
+ }
+
+ dw_mipi_dsi_rockchip_config(dsi, mux);
+ if (dsi->slave)
+ dw_mipi_dsi_rockchip_config(dsi->slave, mux);
+
+ clk_disable_unprepare(dsi->grf_clk);
+}
+
+static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
+
+ if (dsi->slave)
+ pm_runtime_put(dsi->slave->dev);
+ pm_runtime_put(dsi->dev);
+}
+
+static const struct drm_encoder_helper_funcs
+dw_mipi_dsi_encoder_helper_funcs = {
+ .atomic_check = dw_mipi_dsi_encoder_atomic_check,
+ .enable = dw_mipi_dsi_encoder_enable,
+ .disable = dw_mipi_dsi_encoder_disable,
+};
+
+static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
+ struct drm_device *drm_dev)
+{
+ struct drm_encoder *encoder = &dsi->encoder;
+ int ret;
+
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
+ dsi->dev->of_node);
+
+ ret = drm_encoder_init(drm_dev, encoder, &dw_mipi_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to initialize encoder with drm\n");
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &dw_mipi_dsi_encoder_helper_funcs);
+
+ return 0;
+}
+
+static struct device
+*dw_mipi_dsi_rockchip_find_second(struct dw_mipi_dsi_rockchip *dsi)
+{
+ const struct of_device_id *match;
+ struct device_node *node = NULL, *local;
+
+ match = of_match_device(dsi->dev->driver->of_match_table, dsi->dev);
+
+ local = of_graph_get_remote_node(dsi->dev->of_node, 1, 0);
+ if (!local)
+ return NULL;
+
+ while ((node = of_find_compatible_node(node, NULL,
+ match->compatible))) {
+ struct device_node *remote;
+
+ /* found ourself */
+ if (node == dsi->dev->of_node)
+ continue;
+
+ remote = of_graph_get_remote_node(node, 1, 0);
+ if (!remote)
+ continue;
+
+ /* same display device in port1-ep0 for both */
+ if (remote == local) {
+ struct dw_mipi_dsi_rockchip *dsi2;
+ struct platform_device *pdev;
+
+ pdev = of_find_device_by_node(node);
+
+ /*
+ * we have found the second, so will either return it
+ * or return with an error. In any case won't need the
+ * nodes anymore nor continue the loop.
+ */
+ of_node_put(remote);
+ of_node_put(node);
+ of_node_put(local);
+
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ dsi2 = platform_get_drvdata(pdev);
+ if (!dsi2) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return &pdev->dev;
+ }
+
+ of_node_put(remote);
+ }
+
+ of_node_put(local);
+
+ return NULL;
+}
+
+static int dw_mipi_dsi_rockchip_bind(struct device *dev,
+ struct device *master,
+ void *data)
+{
+ struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct device *second;
+ bool master1, master2;
+ int ret;
+
+ second = dw_mipi_dsi_rockchip_find_second(dsi);
+ if (IS_ERR(second))
+ return PTR_ERR(second);
+
+ if (second) {
+ master1 = of_property_read_bool(dsi->dev->of_node,
+ "clock-master");
+ master2 = of_property_read_bool(second->of_node,
+ "clock-master");
+
+ if (master1 && master2) {
+ DRM_DEV_ERROR(dsi->dev, "only one clock-master allowed\n");
+ return -EINVAL;
+ }
+
+ if (!master1 && !master2) {
+ DRM_DEV_ERROR(dsi->dev, "no clock-master defined\n");
+ return -EINVAL;
+ }
+
+ /* we are the slave in dual-DSI */
+ if (!master1) {
+ dsi->is_slave = true;
+ return 0;
+ }
+
+ dsi->slave = dev_get_drvdata(second);
+ if (!dsi->slave) {
+ DRM_DEV_ERROR(dev, "could not get slaves data\n");
+ return -ENODEV;
+ }
+
+ dsi->slave->is_slave = true;
+ dw_mipi_dsi_set_slave(dsi->dmd, dsi->slave->dmd);
+ put_device(second);
+ }
+
+ ret = clk_prepare_enable(dsi->pllref_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to enable pllref_clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to create drm encoder\n");
+ return ret;
+ }
+
+ ret = dw_mipi_dsi_bind(dsi->dmd, &dsi->encoder);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to bind: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dw_mipi_dsi_rockchip_unbind(struct device *dev,
+ struct device *master,
+ void *data)
+{
+ struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
+
+ if (dsi->is_slave)
+ return;
+
+ dw_mipi_dsi_unbind(dsi->dmd);
+
+ clk_disable_unprepare(dsi->pllref_clk);
+}
+
+static const struct component_ops dw_mipi_dsi_rockchip_ops = {
+ .bind = dw_mipi_dsi_rockchip_bind,
+ .unbind = dw_mipi_dsi_rockchip_unbind,
+};
+
+static int dw_mipi_dsi_rockchip_host_attach(void *priv_data,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi_rockchip *dsi = priv_data;
+ struct device *second;
+ int ret;
+
+ ret = component_add(dsi->dev, &dw_mipi_dsi_rockchip_ops);
+ if (ret) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to register component: %d\n",
+ ret);
+ return ret;
+ }
+
+ second = dw_mipi_dsi_rockchip_find_second(dsi);
+ if (IS_ERR(second))
+ return PTR_ERR(second);
+ if (second) {
+ ret = component_add(second, &dw_mipi_dsi_rockchip_ops);
+ if (ret) {
+ DRM_DEV_ERROR(second,
+ "Failed to register component: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int dw_mipi_dsi_rockchip_host_detach(void *priv_data,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi_rockchip *dsi = priv_data;
+ struct device *second;
+
+ second = dw_mipi_dsi_rockchip_find_second(dsi);
+ if (second && !IS_ERR(second))
+ component_del(second, &dw_mipi_dsi_rockchip_ops);
+
+ component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
+
+ return 0;
+}
+
+static const struct dw_mipi_dsi_host_ops dw_mipi_dsi_rockchip_host_ops = {
+ .attach = dw_mipi_dsi_rockchip_host_attach,
+ .detach = dw_mipi_dsi_rockchip_host_detach,
+};
+
+static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct dw_mipi_dsi_rockchip *dsi;
+ struct resource *res;
+ const struct rockchip_dw_dsi_chip_data *cdata =
+ of_device_get_match_data(dev);
+ int ret, i;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dsi->base)) {
+ DRM_DEV_ERROR(dev, "Unable to get dsi registers\n");
+ return PTR_ERR(dsi->base);
+ }
+
+ i = 0;
+ while (cdata[i].reg) {
+ if (cdata[i].reg == res->start) {
+ dsi->cdata = &cdata[i];
+ break;
+ }
+
+ i++;
+ }
+
+ if (!dsi->cdata) {
+ dev_err(dev, "no dsi-config for %s node\n", np->name);
+ return -EINVAL;
+ }
+
+ dsi->pllref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(dsi->pllref_clk)) {
+ ret = PTR_ERR(dsi->pllref_clk);
+ DRM_DEV_ERROR(dev,
+ "Unable to get pll reference clock: %d\n", ret);
+ return ret;
+ }
+
+ if (dsi->cdata->flags & DW_MIPI_NEEDS_PHY_CFG_CLK) {
+ dsi->phy_cfg_clk = devm_clk_get(dev, "phy_cfg");
+ if (IS_ERR(dsi->phy_cfg_clk)) {
+ ret = PTR_ERR(dsi->phy_cfg_clk);
+ DRM_DEV_ERROR(dev,
+ "Unable to get phy_cfg_clk: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (dsi->cdata->flags & DW_MIPI_NEEDS_GRF_CLK) {
+ dsi->grf_clk = devm_clk_get(dev, "grf");
+ if (IS_ERR(dsi->grf_clk)) {
+ ret = PTR_ERR(dsi->grf_clk);
+ DRM_DEV_ERROR(dev, "Unable to get grf_clk: %d\n", ret);
+ return ret;
+ }
+ }
+
+ dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ if (IS_ERR(dsi->grf_regmap)) {
+ DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n");
+ return PTR_ERR(dsi->grf_regmap);
+ }
+
+ dsi->dev = dev;
+ dsi->pdata.base = dsi->base;
+ dsi->pdata.max_data_lanes = dsi->cdata->max_data_lanes;
+ dsi->pdata.phy_ops = &dw_mipi_dsi_rockchip_phy_ops;
+ dsi->pdata.host_ops = &dw_mipi_dsi_rockchip_host_ops;
+ dsi->pdata.priv_data = dsi;
+ platform_set_drvdata(pdev, dsi);
+
+ dsi->dmd = dw_mipi_dsi_probe(pdev, &dsi->pdata);
+ if (IS_ERR(dsi->dmd)) {
+ ret = PTR_ERR(dsi->dmd);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to probe dw_mipi_dsi: %d\n", ret);
+ goto err_clkdisable;
+ }
+
+ return 0;
+
+err_clkdisable:
+ clk_disable_unprepare(dsi->pllref_clk);
+ return ret;
+}
+
+static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
+{
+ struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);
+
+ if (dsi->devcnt == 0)
+ component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
+
+ dw_mipi_dsi_remove(dsi->dmd);
+
+ return 0;
+}
+
+static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = {
+ {
+ .reg = 0xff960000,
+ .lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI0_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3288_DSI0_LCDC_SEL, RK3288_DSI0_LCDC_SEL),
+
+ .max_data_lanes = 4,
+ },
+ {
+ .reg = 0xff964000,
+ .lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI1_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3288_DSI1_LCDC_SEL, RK3288_DSI1_LCDC_SEL),
+
+ .max_data_lanes = 4,
+ },
+ { /* sentinel */ }
+};
+
+static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
+ {
+ .reg = 0xff960000,
+ .lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI0_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3399_DSI0_LCDC_SEL,
+ RK3399_DSI0_LCDC_SEL),
+
+ .lanecfg1_grf_reg = RK3399_GRF_SOC_CON22,
+ .lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI0_TURNREQUEST |
+ RK3399_DSI0_TURNDISABLE |
+ RK3399_DSI0_FORCETXSTOPMODE |
+ RK3399_DSI0_FORCERXMODE),
+
+ .flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
+ .max_data_lanes = 4,
+ },
+ {
+ .reg = 0xff968000,
+ .lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI1_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3399_DSI1_LCDC_SEL,
+ RK3399_DSI1_LCDC_SEL),
+
+ .lanecfg1_grf_reg = RK3399_GRF_SOC_CON23,
+ .lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI1_TURNDISABLE |
+ RK3399_DSI1_FORCETXSTOPMODE |
+ RK3399_DSI1_FORCERXMODE |
+ RK3399_DSI1_ENABLE),
+
+ .lanecfg2_grf_reg = RK3399_GRF_SOC_CON24,
+ .lanecfg2 = HIWORD_UPDATE(RK3399_TXRX_MASTERSLAVEZ |
+ RK3399_TXRX_ENABLECLK,
+ RK3399_TXRX_MASTERSLAVEZ |
+ RK3399_TXRX_ENABLECLK |
+ RK3399_TXRX_BASEDIR),
+
+ .enable_grf_reg = RK3399_GRF_SOC_CON23,
+ .enable = HIWORD_UPDATE(RK3399_DSI1_ENABLE, RK3399_DSI1_ENABLE),
+
+ .flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
+ .max_data_lanes = 4,
+ },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
+ {
+ .compatible = "rockchip,rk3288-mipi-dsi",
+ .data = &rk3288_chip_data,
+ }, {
+ .compatible = "rockchip,rk3399-mipi-dsi",
+ .data = &rk3399_chip_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dw_mipi_dsi_rockchip_dt_ids);
+
+struct platform_driver dw_mipi_dsi_rockchip_driver = {
+ .probe = dw_mipi_dsi_rockchip_probe,
+ .remove = dw_mipi_dsi_rockchip_remove,
+ .driver = {
+ .of_match_table = dw_mipi_dsi_rockchip_dt_ids,
+ .name = "dw-mipi-dsi-rockchip",
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
deleted file mode 100644
index 662b6cb5d3f0..000000000000
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ /dev/null
@@ -1,1349 +0,0 @@
-/*
- * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/iopoll.h>
-#include <linux/math64.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-#include <linux/mfd/syscon.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-#include <drm/drmP.h>
-#include <video/mipi_display.h>
-
-#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
-
-#define DRIVER_NAME "dw-mipi-dsi"
-
-#define RK3288_GRF_SOC_CON6 0x025c
-#define RK3288_DSI0_SEL_VOP_LIT BIT(6)
-#define RK3288_DSI1_SEL_VOP_LIT BIT(9)
-
-#define RK3399_GRF_SOC_CON20 0x6250
-#define RK3399_DSI0_SEL_VOP_LIT BIT(0)
-#define RK3399_DSI1_SEL_VOP_LIT BIT(4)
-
-/* disable turnrequest, turndisable, forcetxstopmode, forcerxmode */
-#define RK3399_GRF_SOC_CON22 0x6258
-#define RK3399_GRF_DSI_MODE 0xffff0000
-
-#define DSI_VERSION 0x00
-#define DSI_PWR_UP 0x04
-#define RESET 0
-#define POWERUP BIT(0)
-
-#define DSI_CLKMGR_CFG 0x08
-#define TO_CLK_DIVIDSION(div) (((div) & 0xff) << 8)
-#define TX_ESC_CLK_DIVIDSION(div) (((div) & 0xff) << 0)
-
-#define DSI_DPI_VCID 0x0c
-#define DPI_VID(vid) (((vid) & 0x3) << 0)
-
-#define DSI_DPI_COLOR_CODING 0x10
-#define EN18_LOOSELY BIT(8)
-#define DPI_COLOR_CODING_16BIT_1 0x0
-#define DPI_COLOR_CODING_16BIT_2 0x1
-#define DPI_COLOR_CODING_16BIT_3 0x2
-#define DPI_COLOR_CODING_18BIT_1 0x3
-#define DPI_COLOR_CODING_18BIT_2 0x4
-#define DPI_COLOR_CODING_24BIT 0x5
-
-#define DSI_DPI_CFG_POL 0x14
-#define COLORM_ACTIVE_LOW BIT(4)
-#define SHUTD_ACTIVE_LOW BIT(3)
-#define HSYNC_ACTIVE_LOW BIT(2)
-#define VSYNC_ACTIVE_LOW BIT(1)
-#define DATAEN_ACTIVE_LOW BIT(0)
-
-#define DSI_DPI_LP_CMD_TIM 0x18
-#define OUTVACT_LPCMD_TIME(p) (((p) & 0xff) << 16)
-#define INVACT_LPCMD_TIME(p) ((p) & 0xff)
-
-#define DSI_DBI_CFG 0x20
-#define DSI_DBI_CMDSIZE 0x28
-
-#define DSI_PCKHDL_CFG 0x2c
-#define EN_CRC_RX BIT(4)
-#define EN_ECC_RX BIT(3)
-#define EN_BTA BIT(2)
-#define EN_EOTP_RX BIT(1)
-#define EN_EOTP_TX BIT(0)
-
-#define DSI_MODE_CFG 0x34
-#define ENABLE_VIDEO_MODE 0
-#define ENABLE_CMD_MODE BIT(0)
-
-#define DSI_VID_MODE_CFG 0x38
-#define FRAME_BTA_ACK BIT(14)
-#define ENABLE_LOW_POWER (0x3f << 8)
-#define ENABLE_LOW_POWER_MASK (0x3f << 8)
-#define VID_MODE_TYPE_NON_BURST_SYNC_PULSES 0x0
-#define VID_MODE_TYPE_NON_BURST_SYNC_EVENTS 0x1
-#define VID_MODE_TYPE_BURST 0x2
-#define VID_MODE_TYPE_MASK 0x3
-
-#define DSI_VID_PKT_SIZE 0x3c
-#define VID_PKT_SIZE(p) (((p) & 0x3fff) << 0)
-#define VID_PKT_MAX_SIZE 0x3fff
-
-#define DSI_VID_HSA_TIME 0x48
-#define DSI_VID_HBP_TIME 0x4c
-#define DSI_VID_HLINE_TIME 0x50
-#define DSI_VID_VSA_LINES 0x54
-#define DSI_VID_VBP_LINES 0x58
-#define DSI_VID_VFP_LINES 0x5c
-#define DSI_VID_VACTIVE_LINES 0x60
-#define DSI_CMD_MODE_CFG 0x68
-#define MAX_RD_PKT_SIZE_LP BIT(24)
-#define DCS_LW_TX_LP BIT(19)
-#define DCS_SR_0P_TX_LP BIT(18)
-#define DCS_SW_1P_TX_LP BIT(17)
-#define DCS_SW_0P_TX_LP BIT(16)
-#define GEN_LW_TX_LP BIT(14)
-#define GEN_SR_2P_TX_LP BIT(13)
-#define GEN_SR_1P_TX_LP BIT(12)
-#define GEN_SR_0P_TX_LP BIT(11)
-#define GEN_SW_2P_TX_LP BIT(10)
-#define GEN_SW_1P_TX_LP BIT(9)
-#define GEN_SW_0P_TX_LP BIT(8)
-#define EN_ACK_RQST BIT(1)
-#define EN_TEAR_FX BIT(0)
-
-#define CMD_MODE_ALL_LP (MAX_RD_PKT_SIZE_LP | \
- DCS_LW_TX_LP | \
- DCS_SR_0P_TX_LP | \
- DCS_SW_1P_TX_LP | \
- DCS_SW_0P_TX_LP | \
- GEN_LW_TX_LP | \
- GEN_SR_2P_TX_LP | \
- GEN_SR_1P_TX_LP | \
- GEN_SR_0P_TX_LP | \
- GEN_SW_2P_TX_LP | \
- GEN_SW_1P_TX_LP | \
- GEN_SW_0P_TX_LP)
-
-#define DSI_GEN_HDR 0x6c
-#define GEN_HDATA(data) (((data) & 0xffff) << 8)
-#define GEN_HDATA_MASK (0xffff << 8)
-#define GEN_HTYPE(type) (((type) & 0xff) << 0)
-#define GEN_HTYPE_MASK 0xff
-
-#define DSI_GEN_PLD_DATA 0x70
-
-#define DSI_CMD_PKT_STATUS 0x74
-#define GEN_CMD_EMPTY BIT(0)
-#define GEN_CMD_FULL BIT(1)
-#define GEN_PLD_W_EMPTY BIT(2)
-#define GEN_PLD_W_FULL BIT(3)
-#define GEN_PLD_R_EMPTY BIT(4)
-#define GEN_PLD_R_FULL BIT(5)
-#define GEN_RD_CMD_BUSY BIT(6)
-
-#define DSI_TO_CNT_CFG 0x78
-#define HSTX_TO_CNT(p) (((p) & 0xffff) << 16)
-#define LPRX_TO_CNT(p) ((p) & 0xffff)
-
-#define DSI_BTA_TO_CNT 0x8c
-#define DSI_LPCLK_CTRL 0x94
-#define AUTO_CLKLANE_CTRL BIT(1)
-#define PHY_TXREQUESTCLKHS BIT(0)
-
-#define DSI_PHY_TMR_LPCLK_CFG 0x98
-#define PHY_CLKHS2LP_TIME(lbcc) (((lbcc) & 0x3ff) << 16)
-#define PHY_CLKLP2HS_TIME(lbcc) ((lbcc) & 0x3ff)
-
-#define DSI_PHY_TMR_CFG 0x9c
-#define PHY_HS2LP_TIME(lbcc) (((lbcc) & 0xff) << 24)
-#define PHY_LP2HS_TIME(lbcc) (((lbcc) & 0xff) << 16)
-#define MAX_RD_TIME(lbcc) ((lbcc) & 0x7fff)
-
-#define DSI_PHY_RSTZ 0xa0
-#define PHY_DISFORCEPLL 0
-#define PHY_ENFORCEPLL BIT(3)
-#define PHY_DISABLECLK 0
-#define PHY_ENABLECLK BIT(2)
-#define PHY_RSTZ 0
-#define PHY_UNRSTZ BIT(1)
-#define PHY_SHUTDOWNZ 0
-#define PHY_UNSHUTDOWNZ BIT(0)
-
-#define DSI_PHY_IF_CFG 0xa4
-#define N_LANES(n) ((((n) - 1) & 0x3) << 0)
-#define PHY_STOP_WAIT_TIME(cycle) (((cycle) & 0xff) << 8)
-
-#define DSI_PHY_STATUS 0xb0
-#define LOCK BIT(0)
-#define STOP_STATE_CLK_LANE BIT(2)
-
-#define DSI_PHY_TST_CTRL0 0xb4
-#define PHY_TESTCLK BIT(1)
-#define PHY_UNTESTCLK 0
-#define PHY_TESTCLR BIT(0)
-#define PHY_UNTESTCLR 0
-
-#define DSI_PHY_TST_CTRL1 0xb8
-#define PHY_TESTEN BIT(16)
-#define PHY_UNTESTEN 0
-#define PHY_TESTDOUT(n) (((n) & 0xff) << 8)
-#define PHY_TESTDIN(n) (((n) & 0xff) << 0)
-
-#define DSI_INT_ST0 0xbc
-#define DSI_INT_ST1 0xc0
-#define DSI_INT_MSK0 0xc4
-#define DSI_INT_MSK1 0xc8
-
-#define PHY_STATUS_TIMEOUT_US 10000
-#define CMD_PKT_STATUS_TIMEOUT_US 20000
-
-#define BYPASS_VCO_RANGE BIT(7)
-#define VCO_RANGE_CON_SEL(val) (((val) & 0x7) << 3)
-#define VCO_IN_CAP_CON_DEFAULT (0x0 << 1)
-#define VCO_IN_CAP_CON_LOW (0x1 << 1)
-#define VCO_IN_CAP_CON_HIGH (0x2 << 1)
-#define REF_BIAS_CUR_SEL BIT(0)
-
-#define CP_CURRENT_3MA BIT(3)
-#define CP_PROGRAM_EN BIT(7)
-#define LPF_PROGRAM_EN BIT(6)
-#define LPF_RESISTORS_20_KOHM 0
-
-#define HSFREQRANGE_SEL(val) (((val) & 0x3f) << 1)
-
-#define INPUT_DIVIDER(val) (((val) - 1) & 0x7f)
-#define LOW_PROGRAM_EN 0
-#define HIGH_PROGRAM_EN BIT(7)
-#define LOOP_DIV_LOW_SEL(val) (((val) - 1) & 0x1f)
-#define LOOP_DIV_HIGH_SEL(val) ((((val) - 1) >> 5) & 0x1f)
-#define PLL_LOOP_DIV_EN BIT(5)
-#define PLL_INPUT_DIV_EN BIT(4)
-
-#define POWER_CONTROL BIT(6)
-#define INTERNAL_REG_CURRENT BIT(3)
-#define BIAS_BLOCK_ON BIT(2)
-#define BANDGAP_ON BIT(0)
-
-#define TER_RESISTOR_HIGH BIT(7)
-#define TER_RESISTOR_LOW 0
-#define LEVEL_SHIFTERS_ON BIT(6)
-#define TER_CAL_DONE BIT(5)
-#define SETRD_MAX (0x7 << 2)
-#define POWER_MANAGE BIT(1)
-#define TER_RESISTORS_ON BIT(0)
-
-#define BIASEXTR_SEL(val) ((val) & 0x7)
-#define BANDGAP_SEL(val) ((val) & 0x7)
-#define TLP_PROGRAM_EN BIT(7)
-#define THS_PRE_PROGRAM_EN BIT(7)
-#define THS_ZERO_PROGRAM_EN BIT(6)
-
-#define DW_MIPI_NEEDS_PHY_CFG_CLK BIT(0)
-#define DW_MIPI_NEEDS_GRF_CLK BIT(1)
-
-enum {
- BANDGAP_97_07,
- BANDGAP_98_05,
- BANDGAP_99_02,
- BANDGAP_100_00,
- BANDGAP_93_17,
- BANDGAP_94_15,
- BANDGAP_95_12,
- BANDGAP_96_10,
-};
-
-enum {
- BIASEXTR_87_1,
- BIASEXTR_91_5,
- BIASEXTR_95_9,
- BIASEXTR_100,
- BIASEXTR_105_94,
- BIASEXTR_111_88,
- BIASEXTR_118_8,
- BIASEXTR_127_7,
-};
-
-struct dw_mipi_dsi_plat_data {
- u32 dsi0_en_bit;
- u32 dsi1_en_bit;
- u32 grf_switch_reg;
- u32 grf_dsi0_mode;
- u32 grf_dsi0_mode_reg;
- unsigned int flags;
- unsigned int max_data_lanes;
-};
-
-struct dw_mipi_dsi {
- struct drm_encoder encoder;
- struct drm_connector connector;
- struct mipi_dsi_host dsi_host;
- struct drm_panel *panel;
- struct device *dev;
- struct regmap *grf_regmap;
- void __iomem *base;
-
- struct clk *grf_clk;
- struct clk *pllref_clk;
- struct clk *pclk;
- struct clk *phy_cfg_clk;
-
- int dpms_mode;
- unsigned int lane_mbps; /* per lane */
- u32 channel;
- u32 lanes;
- u32 format;
- u16 input_div;
- u16 feedback_div;
- unsigned long mode_flags;
-
- const struct dw_mipi_dsi_plat_data *pdata;
-};
-
-enum dw_mipi_dsi_mode {
- DW_MIPI_DSI_CMD_MODE,
- DW_MIPI_DSI_VID_MODE,
-};
-
-struct dphy_pll_testdin_map {
- unsigned int max_mbps;
- u8 testdin;
-};
-
-/* The table is based on 27MHz DPHY pll reference clock. */
-static const struct dphy_pll_testdin_map dptdin_map[] = {
- { 90, 0x00}, { 100, 0x10}, { 110, 0x20}, { 130, 0x01},
- { 140, 0x11}, { 150, 0x21}, { 170, 0x02}, { 180, 0x12},
- { 200, 0x22}, { 220, 0x03}, { 240, 0x13}, { 250, 0x23},
- { 270, 0x04}, { 300, 0x14}, { 330, 0x05}, { 360, 0x15},
- { 400, 0x25}, { 450, 0x06}, { 500, 0x16}, { 550, 0x07},
- { 600, 0x17}, { 650, 0x08}, { 700, 0x18}, { 750, 0x09},
- { 800, 0x19}, { 850, 0x29}, { 900, 0x39}, { 950, 0x0a},
- {1000, 0x1a}, {1050, 0x2a}, {1100, 0x3a}, {1150, 0x0b},
- {1200, 0x1b}, {1250, 0x2b}, {1300, 0x3b}, {1350, 0x0c},
- {1400, 0x1c}, {1450, 0x2c}, {1500, 0x3c}
-};
-
-static int max_mbps_to_testdin(unsigned int max_mbps)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dptdin_map); i++)
- if (dptdin_map[i].max_mbps > max_mbps)
- return dptdin_map[i].testdin;
-
- return -EINVAL;
-}
-
-/*
- * The controller should generate 2 frames before
- * preparing the peripheral.
- */
-static void dw_mipi_dsi_wait_for_two_frames(struct drm_display_mode *mode)
-{
- int refresh, two_frames;
-
- refresh = drm_mode_vrefresh(mode);
- two_frames = DIV_ROUND_UP(MSEC_PER_SEC, refresh) * 2;
- msleep(two_frames);
-}
-
-static inline struct dw_mipi_dsi *host_to_dsi(struct mipi_dsi_host *host)
-{
- return container_of(host, struct dw_mipi_dsi, dsi_host);
-}
-
-static inline struct dw_mipi_dsi *con_to_dsi(struct drm_connector *con)
-{
- return container_of(con, struct dw_mipi_dsi, connector);
-}
-
-static inline struct dw_mipi_dsi *encoder_to_dsi(struct drm_encoder *encoder)
-{
- return container_of(encoder, struct dw_mipi_dsi, encoder);
-}
-
-static inline void dsi_write(struct dw_mipi_dsi *dsi, u32 reg, u32 val)
-{
- writel(val, dsi->base + reg);
-}
-
-static inline u32 dsi_read(struct dw_mipi_dsi *dsi, u32 reg)
-{
- return readl(dsi->base + reg);
-}
-
-static void dw_mipi_dsi_phy_write(struct dw_mipi_dsi *dsi, u8 test_code,
- u8 test_data)
-{
- /*
- * With the falling edge on TESTCLK, the TESTDIN[7:0] signal content
- * is latched internally as the current test code. Test data is
- * programmed internally by rising edge on TESTCLK.
- */
- dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
-
- dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_TESTEN | PHY_TESTDOUT(0) |
- PHY_TESTDIN(test_code));
-
- dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLK | PHY_UNTESTCLR);
-
- dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_UNTESTEN | PHY_TESTDOUT(0) |
- PHY_TESTDIN(test_data));
-
- dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
-}
-
-/**
- * ns2bc - Nanoseconds to byte clock cycles
- */
-static inline unsigned int ns2bc(struct dw_mipi_dsi *dsi, int ns)
-{
- return DIV_ROUND_UP(ns * dsi->lane_mbps / 8, 1000);
-}
-
-/**
- * ns2ui - Nanoseconds to UI time periods
- */
-static inline unsigned int ns2ui(struct dw_mipi_dsi *dsi, int ns)
-{
- return DIV_ROUND_UP(ns * dsi->lane_mbps, 1000);
-}
-
-static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
-{
- int ret, testdin, vco, val;
-
- vco = (dsi->lane_mbps < 200) ? 0 : (dsi->lane_mbps + 100) / 200;
-
- testdin = max_mbps_to_testdin(dsi->lane_mbps);
- if (testdin < 0) {
- DRM_DEV_ERROR(dsi->dev,
- "failed to get testdin for %dmbps lane clock\n",
- dsi->lane_mbps);
- return testdin;
- }
-
- /* Start by clearing PHY state */
- dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR);
- dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLR);
- dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR);
-
- ret = clk_prepare_enable(dsi->phy_cfg_clk);
- if (ret) {
- DRM_DEV_ERROR(dsi->dev, "Failed to enable phy_cfg_clk\n");
- return ret;
- }
-
- dw_mipi_dsi_phy_write(dsi, 0x10, BYPASS_VCO_RANGE |
- VCO_RANGE_CON_SEL(vco) |
- VCO_IN_CAP_CON_LOW |
- REF_BIAS_CUR_SEL);
-
- dw_mipi_dsi_phy_write(dsi, 0x11, CP_CURRENT_3MA);
- dw_mipi_dsi_phy_write(dsi, 0x12, CP_PROGRAM_EN | LPF_PROGRAM_EN |
- LPF_RESISTORS_20_KOHM);
-
- dw_mipi_dsi_phy_write(dsi, 0x44, HSFREQRANGE_SEL(testdin));
-
- dw_mipi_dsi_phy_write(dsi, 0x17, INPUT_DIVIDER(dsi->input_div));
- dw_mipi_dsi_phy_write(dsi, 0x18, LOOP_DIV_LOW_SEL(dsi->feedback_div) |
- LOW_PROGRAM_EN);
- dw_mipi_dsi_phy_write(dsi, 0x18, LOOP_DIV_HIGH_SEL(dsi->feedback_div) |
- HIGH_PROGRAM_EN);
- dw_mipi_dsi_phy_write(dsi, 0x19, PLL_LOOP_DIV_EN | PLL_INPUT_DIV_EN);
-
- dw_mipi_dsi_phy_write(dsi, 0x22, LOW_PROGRAM_EN |
- BIASEXTR_SEL(BIASEXTR_127_7));
- dw_mipi_dsi_phy_write(dsi, 0x22, HIGH_PROGRAM_EN |
- BANDGAP_SEL(BANDGAP_96_10));
-
- dw_mipi_dsi_phy_write(dsi, 0x20, POWER_CONTROL | INTERNAL_REG_CURRENT |
- BIAS_BLOCK_ON | BANDGAP_ON);
-
- dw_mipi_dsi_phy_write(dsi, 0x21, TER_RESISTOR_LOW | TER_CAL_DONE |
- SETRD_MAX | TER_RESISTORS_ON);
- dw_mipi_dsi_phy_write(dsi, 0x21, TER_RESISTOR_HIGH | LEVEL_SHIFTERS_ON |
- SETRD_MAX | POWER_MANAGE |
- TER_RESISTORS_ON);
-
- dw_mipi_dsi_phy_write(dsi, 0x60, TLP_PROGRAM_EN | ns2bc(dsi, 500));
- dw_mipi_dsi_phy_write(dsi, 0x61, THS_PRE_PROGRAM_EN | ns2ui(dsi, 40));
- dw_mipi_dsi_phy_write(dsi, 0x62, THS_ZERO_PROGRAM_EN | ns2bc(dsi, 300));
- dw_mipi_dsi_phy_write(dsi, 0x63, THS_PRE_PROGRAM_EN | ns2ui(dsi, 100));
- dw_mipi_dsi_phy_write(dsi, 0x64, BIT(5) | ns2bc(dsi, 100));
- dw_mipi_dsi_phy_write(dsi, 0x65, BIT(5) | (ns2bc(dsi, 60) + 7));
-
- dw_mipi_dsi_phy_write(dsi, 0x70, TLP_PROGRAM_EN | ns2bc(dsi, 500));
- dw_mipi_dsi_phy_write(dsi, 0x71,
- THS_PRE_PROGRAM_EN | (ns2ui(dsi, 50) + 5));
- dw_mipi_dsi_phy_write(dsi, 0x72,
- THS_ZERO_PROGRAM_EN | (ns2bc(dsi, 140) + 2));
- dw_mipi_dsi_phy_write(dsi, 0x73,
- THS_PRE_PROGRAM_EN | (ns2ui(dsi, 60) + 8));
- dw_mipi_dsi_phy_write(dsi, 0x74, BIT(5) | ns2bc(dsi, 100));
-
- dsi_write(dsi, DSI_PHY_RSTZ, PHY_ENFORCEPLL | PHY_ENABLECLK |
- PHY_UNRSTZ | PHY_UNSHUTDOWNZ);
-
- ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
- val, val & LOCK, 1000, PHY_STATUS_TIMEOUT_US);
- if (ret < 0) {
- DRM_DEV_ERROR(dsi->dev, "failed to wait for phy lock state\n");
- goto phy_init_end;
- }
-
- ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
- val, val & STOP_STATE_CLK_LANE, 1000,
- PHY_STATUS_TIMEOUT_US);
- if (ret < 0)
- DRM_DEV_ERROR(dsi->dev,
- "failed to wait for phy clk lane stop state\n");
-
-phy_init_end:
- clk_disable_unprepare(dsi->phy_cfg_clk);
-
- return ret;
-}
-
-static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi,
- struct drm_display_mode *mode)
-{
- unsigned int i, pre;
- unsigned long mpclk, pllref, tmp;
- unsigned int m = 1, n = 1, target_mbps = 1000;
- unsigned int max_mbps = dptdin_map[ARRAY_SIZE(dptdin_map) - 1].max_mbps;
- int bpp;
-
- bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
- if (bpp < 0) {
- DRM_DEV_ERROR(dsi->dev,
- "failed to get bpp for pixel format %d\n",
- dsi->format);
- return bpp;
- }
-
- mpclk = DIV_ROUND_UP(mode->clock, MSEC_PER_SEC);
- if (mpclk) {
- /* take 1 / 0.8, since mbps must big than bandwidth of RGB */
- tmp = mpclk * (bpp / dsi->lanes) * 10 / 8;
- if (tmp < max_mbps)
- target_mbps = tmp;
- else
- DRM_DEV_ERROR(dsi->dev,
- "DPHY clock frequency is out of range\n");
- }
-
- pllref = DIV_ROUND_UP(clk_get_rate(dsi->pllref_clk), USEC_PER_SEC);
- tmp = pllref;
-
- /*
- * The limits on the PLL divisor are:
- *
- * 5MHz <= (pllref / n) <= 40MHz
- *
- * we walk over these values in descreasing order so that if we hit
- * an exact match for target_mbps it is more likely that "m" will be
- * even.
- *
- * TODO: ensure that "m" is even after this loop.
- */
- for (i = pllref / 5; i > (pllref / 40); i--) {
- pre = pllref / i;
- if ((tmp > (target_mbps % pre)) && (target_mbps / pre < 512)) {
- tmp = target_mbps % pre;
- n = i;
- m = target_mbps / pre;
- }
- if (tmp == 0)
- break;
- }
-
- dsi->lane_mbps = pllref / n * m;
- dsi->input_div = n;
- dsi->feedback_div = m;
-
- return 0;
-}
-
-static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
-{
- struct dw_mipi_dsi *dsi = host_to_dsi(host);
-
- if (device->lanes > dsi->pdata->max_data_lanes) {
- DRM_DEV_ERROR(dsi->dev,
- "the number of data lanes(%u) is too many\n",
- device->lanes);
- return -EINVAL;
- }
-
- dsi->lanes = device->lanes;
- dsi->channel = device->channel;
- dsi->format = device->format;
- dsi->mode_flags = device->mode_flags;
- dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (!IS_ERR(dsi->panel))
- return drm_panel_attach(dsi->panel, &dsi->connector);
-
- return -EINVAL;
-}
-
-static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
-{
- struct dw_mipi_dsi *dsi = host_to_dsi(host);
-
- drm_panel_detach(dsi->panel);
-
- return 0;
-}
-
-static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
- const struct mipi_dsi_msg *msg)
-{
- bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM;
- u32 val = 0;
-
- if (msg->flags & MIPI_DSI_MSG_REQ_ACK)
- val |= EN_ACK_RQST;
- if (lpm)
- val |= CMD_MODE_ALL_LP;
-
- dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS);
- dsi_write(dsi, DSI_CMD_MODE_CFG, val);
-}
-
-static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
-{
- int ret;
- u32 val, mask;
-
- ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
- val, !(val & GEN_CMD_FULL), 1000,
- CMD_PKT_STATUS_TIMEOUT_US);
- if (ret < 0) {
- DRM_DEV_ERROR(dsi->dev,
- "failed to get available command FIFO\n");
- return ret;
- }
-
- dsi_write(dsi, DSI_GEN_HDR, hdr_val);
-
- mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY;
- ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
- val, (val & mask) == mask,
- 1000, CMD_PKT_STATUS_TIMEOUT_US);
- if (ret < 0) {
- DRM_DEV_ERROR(dsi->dev, "failed to write command FIFO\n");
- return ret;
- }
-
- return 0;
-}
-
-static int dw_mipi_dsi_dcs_short_write(struct dw_mipi_dsi *dsi,
- const struct mipi_dsi_msg *msg)
-{
- const u8 *tx_buf = msg->tx_buf;
- u16 data = 0;
- u32 val;
-
- if (msg->tx_len > 0)
- data |= tx_buf[0];
- if (msg->tx_len > 1)
- data |= tx_buf[1] << 8;
-
- if (msg->tx_len > 2) {
- DRM_DEV_ERROR(dsi->dev,
- "too long tx buf length %zu for short write\n",
- msg->tx_len);
- return -EINVAL;
- }
-
- val = GEN_HDATA(data) | GEN_HTYPE(msg->type);
- return dw_mipi_dsi_gen_pkt_hdr_write(dsi, val);
-}
-
-static int dw_mipi_dsi_dcs_long_write(struct dw_mipi_dsi *dsi,
- const struct mipi_dsi_msg *msg)
-{
- const u8 *tx_buf = msg->tx_buf;
- int len = msg->tx_len, pld_data_bytes = sizeof(u32), ret;
- u32 hdr_val = GEN_HDATA(msg->tx_len) | GEN_HTYPE(msg->type);
- u32 remainder;
- u32 val;
-
- if (msg->tx_len < 3) {
- DRM_DEV_ERROR(dsi->dev,
- "wrong tx buf length %zu for long write\n",
- msg->tx_len);
- return -EINVAL;
- }
-
- while (DIV_ROUND_UP(len, pld_data_bytes)) {
- if (len < pld_data_bytes) {
- remainder = 0;
- memcpy(&remainder, tx_buf, len);
- dsi_write(dsi, DSI_GEN_PLD_DATA, remainder);
- len = 0;
- } else {
- memcpy(&remainder, tx_buf, pld_data_bytes);
- dsi_write(dsi, DSI_GEN_PLD_DATA, remainder);
- tx_buf += pld_data_bytes;
- len -= pld_data_bytes;
- }
-
- ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
- val, !(val & GEN_PLD_W_FULL), 1000,
- CMD_PKT_STATUS_TIMEOUT_US);
- if (ret < 0) {
- DRM_DEV_ERROR(dsi->dev,
- "failed to get available write payload FIFO\n");
- return ret;
- }
- }
-
- return dw_mipi_dsi_gen_pkt_hdr_write(dsi, hdr_val);
-}
-
-static ssize_t dw_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
- const struct mipi_dsi_msg *msg)
-{
- struct dw_mipi_dsi *dsi = host_to_dsi(host);
- int ret;
-
- dw_mipi_message_config(dsi, msg);
-
- switch (msg->type) {
- case MIPI_DSI_DCS_SHORT_WRITE:
- case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
- ret = dw_mipi_dsi_dcs_short_write(dsi, msg);
- break;
- case MIPI_DSI_DCS_LONG_WRITE:
- ret = dw_mipi_dsi_dcs_long_write(dsi, msg);
- break;
- default:
- DRM_DEV_ERROR(dsi->dev, "unsupported message type 0x%02x\n",
- msg->type);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static const struct mipi_dsi_host_ops dw_mipi_dsi_host_ops = {
- .attach = dw_mipi_dsi_host_attach,
- .detach = dw_mipi_dsi_host_detach,
- .transfer = dw_mipi_dsi_host_transfer,
-};
-
-static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
-{
- u32 val;
-
- val = ENABLE_LOW_POWER;
-
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
- val |= VID_MODE_TYPE_BURST;
- else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- val |= VID_MODE_TYPE_NON_BURST_SYNC_PULSES;
- else
- val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS;
-
- dsi_write(dsi, DSI_VID_MODE_CFG, val);
-}
-
-static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
- enum dw_mipi_dsi_mode mode)
-{
- if (mode == DW_MIPI_DSI_CMD_MODE) {
- dsi_write(dsi, DSI_PWR_UP, RESET);
- dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
- dsi_write(dsi, DSI_PWR_UP, POWERUP);
- } else {
- dsi_write(dsi, DSI_PWR_UP, RESET);
- dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE);
- dw_mipi_dsi_video_mode_config(dsi);
- dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS);
- dsi_write(dsi, DSI_PWR_UP, POWERUP);
- }
-}
-
-static void dw_mipi_dsi_disable(struct dw_mipi_dsi *dsi)
-{
- dsi_write(dsi, DSI_PWR_UP, RESET);
- dsi_write(dsi, DSI_PHY_RSTZ, PHY_RSTZ);
-}
-
-static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
-{
- /*
- * The maximum permitted escape clock is 20MHz and it is derived from
- * lanebyteclk, which is running at "lane_mbps / 8". Thus we want:
- *
- * (lane_mbps >> 3) / esc_clk_division < 20
- * which is:
- * (lane_mbps >> 3) / 20 > esc_clk_division
- */
- u32 esc_clk_division = (dsi->lane_mbps >> 3) / 20 + 1;
-
- dsi_write(dsi, DSI_PWR_UP, RESET);
- dsi_write(dsi, DSI_PHY_RSTZ, PHY_DISFORCEPLL | PHY_DISABLECLK
- | PHY_RSTZ | PHY_SHUTDOWNZ);
- dsi_write(dsi, DSI_CLKMGR_CFG, TO_CLK_DIVIDSION(10) |
- TX_ESC_CLK_DIVIDSION(esc_clk_division));
-}
-
-static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
- struct drm_display_mode *mode)
-{
- u32 val = 0, color = 0;
-
- switch (dsi->format) {
- case MIPI_DSI_FMT_RGB888:
- color = DPI_COLOR_CODING_24BIT;
- break;
- case MIPI_DSI_FMT_RGB666:
- color = DPI_COLOR_CODING_18BIT_2 | EN18_LOOSELY;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- color = DPI_COLOR_CODING_18BIT_1;
- break;
- case MIPI_DSI_FMT_RGB565:
- color = DPI_COLOR_CODING_16BIT_1;
- break;
- }
-
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- val |= VSYNC_ACTIVE_LOW;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- val |= HSYNC_ACTIVE_LOW;
-
- dsi_write(dsi, DSI_DPI_VCID, DPI_VID(dsi->channel));
- dsi_write(dsi, DSI_DPI_COLOR_CODING, color);
- dsi_write(dsi, DSI_DPI_CFG_POL, val);
- dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(4)
- | INVACT_LPCMD_TIME(4));
-}
-
-static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
-{
- dsi_write(dsi, DSI_PCKHDL_CFG, EN_CRC_RX | EN_ECC_RX | EN_BTA);
-}
-
-static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
- struct drm_display_mode *mode)
-{
- dsi_write(dsi, DSI_VID_PKT_SIZE, VID_PKT_SIZE(mode->hdisplay));
-}
-
-static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
-{
- dsi_write(dsi, DSI_TO_CNT_CFG, HSTX_TO_CNT(1000) | LPRX_TO_CNT(1000));
- dsi_write(dsi, DSI_BTA_TO_CNT, 0xd00);
- dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
-}
-
-/* Get lane byte clock cycles. */
-static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
- struct drm_display_mode *mode,
- u32 hcomponent)
-{
- u32 frac, lbcc;
-
- lbcc = hcomponent * dsi->lane_mbps * MSEC_PER_SEC / 8;
-
- frac = lbcc % mode->clock;
- lbcc = lbcc / mode->clock;
- if (frac)
- lbcc++;
-
- return lbcc;
-}
-
-static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
- struct drm_display_mode *mode)
-{
- u32 htotal, hsa, hbp, lbcc;
-
- htotal = mode->htotal;
- hsa = mode->hsync_end - mode->hsync_start;
- hbp = mode->htotal - mode->hsync_end;
-
- lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, htotal);
- dsi_write(dsi, DSI_VID_HLINE_TIME, lbcc);
-
- lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, hsa);
- dsi_write(dsi, DSI_VID_HSA_TIME, lbcc);
-
- lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, hbp);
- dsi_write(dsi, DSI_VID_HBP_TIME, lbcc);
-}
-
-static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
- struct drm_display_mode *mode)
-{
- u32 vactive, vsa, vfp, vbp;
-
- vactive = mode->vdisplay;
- vsa = mode->vsync_end - mode->vsync_start;
- vfp = mode->vsync_start - mode->vdisplay;
- vbp = mode->vtotal - mode->vsync_end;
-
- dsi_write(dsi, DSI_VID_VACTIVE_LINES, vactive);
- dsi_write(dsi, DSI_VID_VSA_LINES, vsa);
- dsi_write(dsi, DSI_VID_VFP_LINES, vfp);
- dsi_write(dsi, DSI_VID_VBP_LINES, vbp);
-}
-
-static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
-{
- dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(0x40)
- | PHY_LP2HS_TIME(0x40) | MAX_RD_TIME(10000));
-
- dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG, PHY_CLKHS2LP_TIME(0x40)
- | PHY_CLKLP2HS_TIME(0x40));
-}
-
-static void dw_mipi_dsi_dphy_interface_config(struct dw_mipi_dsi *dsi)
-{
- dsi_write(dsi, DSI_PHY_IF_CFG, PHY_STOP_WAIT_TIME(0x20) |
- N_LANES(dsi->lanes));
-}
-
-static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi)
-{
- dsi_read(dsi, DSI_INT_ST0);
- dsi_read(dsi, DSI_INT_ST1);
- dsi_write(dsi, DSI_INT_MSK0, 0);
- dsi_write(dsi, DSI_INT_MSK1, 0);
-}
-
-static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
-{
- struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
-
- if (dsi->dpms_mode != DRM_MODE_DPMS_ON)
- return;
-
- if (clk_prepare_enable(dsi->pclk)) {
- DRM_DEV_ERROR(dsi->dev, "Failed to enable pclk\n");
- return;
- }
-
- drm_panel_disable(dsi->panel);
-
- dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_CMD_MODE);
- drm_panel_unprepare(dsi->panel);
-
- dw_mipi_dsi_disable(dsi);
- pm_runtime_put(dsi->dev);
- clk_disable_unprepare(dsi->pclk);
- dsi->dpms_mode = DRM_MODE_DPMS_OFF;
-}
-
-static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
-{
- struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- const struct dw_mipi_dsi_plat_data *pdata = dsi->pdata;
- int mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node, encoder);
- u32 val;
- int ret;
-
- ret = dw_mipi_dsi_get_lane_bps(dsi, mode);
- if (ret < 0)
- return;
-
- if (dsi->dpms_mode == DRM_MODE_DPMS_ON)
- return;
-
- if (clk_prepare_enable(dsi->pclk)) {
- DRM_DEV_ERROR(dsi->dev, "Failed to enable pclk\n");
- return;
- }
-
- pm_runtime_get_sync(dsi->dev);
- dw_mipi_dsi_init(dsi);
- dw_mipi_dsi_dpi_config(dsi, mode);
- dw_mipi_dsi_packet_handler_config(dsi);
- dw_mipi_dsi_video_mode_config(dsi);
- dw_mipi_dsi_video_packet_config(dsi, mode);
- dw_mipi_dsi_command_mode_config(dsi);
- dw_mipi_dsi_line_timer_config(dsi, mode);
- dw_mipi_dsi_vertical_timing_config(dsi, mode);
- dw_mipi_dsi_dphy_timing_config(dsi);
- dw_mipi_dsi_dphy_interface_config(dsi);
- dw_mipi_dsi_clear_err(dsi);
-
- /*
- * For the RK3399, the clk of grf must be enabled before writing grf
- * register. And for RK3288 or other soc, this grf_clk must be NULL,
- * the clk_prepare_enable return true directly.
- */
- ret = clk_prepare_enable(dsi->grf_clk);
- if (ret) {
- DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
- return;
- }
-
- if (pdata->grf_dsi0_mode_reg)
- regmap_write(dsi->grf_regmap, pdata->grf_dsi0_mode_reg,
- pdata->grf_dsi0_mode);
-
- dw_mipi_dsi_phy_init(dsi);
- dw_mipi_dsi_wait_for_two_frames(mode);
-
- dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_CMD_MODE);
- if (drm_panel_prepare(dsi->panel))
- DRM_DEV_ERROR(dsi->dev, "failed to prepare panel\n");
-
- dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_VID_MODE);
- drm_panel_enable(dsi->panel);
-
- clk_disable_unprepare(dsi->pclk);
-
- if (mux)
- val = pdata->dsi0_en_bit | (pdata->dsi0_en_bit << 16);
- else
- val = pdata->dsi0_en_bit << 16;
-
- regmap_write(dsi->grf_regmap, pdata->grf_switch_reg, val);
- DRM_DEV_DEBUG(dsi->dev,
- "vop %s output to dsi0\n", (mux) ? "LIT" : "BIG");
- dsi->dpms_mode = DRM_MODE_DPMS_ON;
-
- clk_disable_unprepare(dsi->grf_clk);
-}
-
-static int
-dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
- struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
-
- switch (dsi->format) {
- case MIPI_DSI_FMT_RGB888:
- s->output_mode = ROCKCHIP_OUT_MODE_P888;
- break;
- case MIPI_DSI_FMT_RGB666:
- s->output_mode = ROCKCHIP_OUT_MODE_P666;
- break;
- case MIPI_DSI_FMT_RGB565:
- s->output_mode = ROCKCHIP_OUT_MODE_P565;
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
-
- s->output_type = DRM_MODE_CONNECTOR_DSI;
-
- return 0;
-}
-
-static const struct drm_encoder_helper_funcs
-dw_mipi_dsi_encoder_helper_funcs = {
- .enable = dw_mipi_dsi_encoder_enable,
- .disable = dw_mipi_dsi_encoder_disable,
- .atomic_check = dw_mipi_dsi_encoder_atomic_check,
-};
-
-static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static int dw_mipi_dsi_connector_get_modes(struct drm_connector *connector)
-{
- struct dw_mipi_dsi *dsi = con_to_dsi(connector);
-
- return drm_panel_get_modes(dsi->panel);
-}
-
-static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = {
- .get_modes = dw_mipi_dsi_connector_get_modes,
-};
-
-static void dw_mipi_dsi_drm_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs dw_mipi_dsi_atomic_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = dw_mipi_dsi_drm_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int dw_mipi_dsi_register(struct drm_device *drm,
- struct dw_mipi_dsi *dsi)
-{
- struct drm_encoder *encoder = &dsi->encoder;
- struct drm_connector *connector = &dsi->connector;
- struct device *dev = dsi->dev;
- int ret;
-
- encoder->possible_crtcs = drm_of_find_possible_crtcs(drm,
- dev->of_node);
- /*
- * If we failed to find the CRTC(s) which this encoder is
- * supposed to be connected to, it's because the CRTC has
- * not been registered yet. Defer probing, and hope that
- * the required CRTC is added later.
- */
- if (encoder->possible_crtcs == 0)
- return -EPROBE_DEFER;
-
- drm_encoder_helper_add(&dsi->encoder,
- &dw_mipi_dsi_encoder_helper_funcs);
- ret = drm_encoder_init(drm, &dsi->encoder, &dw_mipi_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
- if (ret) {
- DRM_DEV_ERROR(dev, "Failed to initialize encoder with drm\n");
- return ret;
- }
-
- drm_connector_helper_add(connector,
- &dw_mipi_dsi_connector_helper_funcs);
-
- drm_connector_init(drm, &dsi->connector,
- &dw_mipi_dsi_atomic_connector_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
- drm_connector_attach_encoder(connector, encoder);
-
- return 0;
-}
-
-static int rockchip_mipi_parse_dt(struct dw_mipi_dsi *dsi)
-{
- struct device_node *np = dsi->dev->of_node;
-
- dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
- if (IS_ERR(dsi->grf_regmap)) {
- DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n");
- return PTR_ERR(dsi->grf_regmap);
- }
-
- return 0;
-}
-
-static struct dw_mipi_dsi_plat_data rk3288_mipi_dsi_drv_data = {
- .dsi0_en_bit = RK3288_DSI0_SEL_VOP_LIT,
- .dsi1_en_bit = RK3288_DSI1_SEL_VOP_LIT,
- .grf_switch_reg = RK3288_GRF_SOC_CON6,
- .max_data_lanes = 4,
-};
-
-static struct dw_mipi_dsi_plat_data rk3399_mipi_dsi_drv_data = {
- .dsi0_en_bit = RK3399_DSI0_SEL_VOP_LIT,
- .dsi1_en_bit = RK3399_DSI1_SEL_VOP_LIT,
- .grf_switch_reg = RK3399_GRF_SOC_CON20,
- .grf_dsi0_mode = RK3399_GRF_DSI_MODE,
- .grf_dsi0_mode_reg = RK3399_GRF_SOC_CON22,
- .flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
- .max_data_lanes = 4,
-};
-
-static const struct of_device_id dw_mipi_dsi_dt_ids[] = {
- {
- .compatible = "rockchip,rk3288-mipi-dsi",
- .data = &rk3288_mipi_dsi_drv_data,
- }, {
- .compatible = "rockchip,rk3399-mipi-dsi",
- .data = &rk3399_mipi_dsi_drv_data,
- },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, dw_mipi_dsi_dt_ids);
-
-static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
- void *data)
-{
- const struct of_device_id *of_id =
- of_match_device(dw_mipi_dsi_dt_ids, dev);
- const struct dw_mipi_dsi_plat_data *pdata = of_id->data;
- struct platform_device *pdev = to_platform_device(dev);
- struct reset_control *apb_rst;
- struct drm_device *drm = data;
- struct dw_mipi_dsi *dsi;
- struct resource *res;
- int ret;
-
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
-
- dsi->dev = dev;
- dsi->pdata = pdata;
- dsi->dpms_mode = DRM_MODE_DPMS_OFF;
-
- ret = rockchip_mipi_parse_dt(dsi);
- if (ret)
- return ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(dsi->base))
- return PTR_ERR(dsi->base);
-
- dsi->pllref_clk = devm_clk_get(dev, "ref");
- if (IS_ERR(dsi->pllref_clk)) {
- ret = PTR_ERR(dsi->pllref_clk);
- DRM_DEV_ERROR(dev,
- "Unable to get pll reference clock: %d\n", ret);
- return ret;
- }
-
- dsi->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(dsi->pclk)) {
- ret = PTR_ERR(dsi->pclk);
- DRM_DEV_ERROR(dev, "Unable to get pclk: %d\n", ret);
- return ret;
- }
-
- /*
- * Note that the reset was not defined in the initial device tree, so
- * we have to be prepared for it not being found.
- */
- apb_rst = devm_reset_control_get(dev, "apb");
- if (IS_ERR(apb_rst)) {
- ret = PTR_ERR(apb_rst);
- if (ret == -ENOENT) {
- apb_rst = NULL;
- } else {
- DRM_DEV_ERROR(dev,
- "Unable to get reset control: %d\n", ret);
- return ret;
- }
- }
-
- if (apb_rst) {
- ret = clk_prepare_enable(dsi->pclk);
- if (ret) {
- DRM_DEV_ERROR(dev, "Failed to enable pclk\n");
- return ret;
- }
-
- reset_control_assert(apb_rst);
- usleep_range(10, 20);
- reset_control_deassert(apb_rst);
-
- clk_disable_unprepare(dsi->pclk);
- }
-
- if (pdata->flags & DW_MIPI_NEEDS_PHY_CFG_CLK) {
- dsi->phy_cfg_clk = devm_clk_get(dev, "phy_cfg");
- if (IS_ERR(dsi->phy_cfg_clk)) {
- ret = PTR_ERR(dsi->phy_cfg_clk);
- DRM_DEV_ERROR(dev,
- "Unable to get phy_cfg_clk: %d\n", ret);
- return ret;
- }
- }
-
- if (pdata->flags & DW_MIPI_NEEDS_GRF_CLK) {
- dsi->grf_clk = devm_clk_get(dev, "grf");
- if (IS_ERR(dsi->grf_clk)) {
- ret = PTR_ERR(dsi->grf_clk);
- DRM_DEV_ERROR(dev, "Unable to get grf_clk: %d\n", ret);
- return ret;
- }
- }
-
- ret = clk_prepare_enable(dsi->pllref_clk);
- if (ret) {
- DRM_DEV_ERROR(dev, "Failed to enable pllref_clk\n");
- return ret;
- }
-
- ret = dw_mipi_dsi_register(drm, dsi);
- if (ret) {
- DRM_DEV_ERROR(dev, "Failed to register mipi_dsi: %d\n", ret);
- goto err_pllref;
- }
-
- dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
- dsi->dsi_host.dev = dev;
- ret = mipi_dsi_host_register(&dsi->dsi_host);
- if (ret) {
- DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret);
- goto err_cleanup;
- }
-
- if (!dsi->panel) {
- ret = -EPROBE_DEFER;
- goto err_mipi_dsi_host;
- }
-
- dev_set_drvdata(dev, dsi);
- pm_runtime_enable(dev);
- return 0;
-
-err_mipi_dsi_host:
- mipi_dsi_host_unregister(&dsi->dsi_host);
-err_cleanup:
- dsi->connector.funcs->destroy(&dsi->connector);
- dsi->encoder.funcs->destroy(&dsi->encoder);
-err_pllref:
- clk_disable_unprepare(dsi->pllref_clk);
- return ret;
-}
-
-static void dw_mipi_dsi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct dw_mipi_dsi *dsi = dev_get_drvdata(dev);
-
- mipi_dsi_host_unregister(&dsi->dsi_host);
- pm_runtime_disable(dev);
-
- dsi->connector.funcs->destroy(&dsi->connector);
- dsi->encoder.funcs->destroy(&dsi->encoder);
-
- clk_disable_unprepare(dsi->pllref_clk);
-}
-
-static const struct component_ops dw_mipi_dsi_ops = {
- .bind = dw_mipi_dsi_bind,
- .unbind = dw_mipi_dsi_unbind,
-};
-
-static int dw_mipi_dsi_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &dw_mipi_dsi_ops);
-}
-
-static int dw_mipi_dsi_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &dw_mipi_dsi_ops);
- return 0;
-}
-
-struct platform_driver dw_mipi_dsi_driver = {
- .probe = dw_mipi_dsi_probe,
- .remove = dw_mipi_dsi_remove,
- .driver = {
- .of_match_table = dw_mipi_dsi_dt_ids,
- .name = DRIVER_NAME,
- },
-};
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 11309a2a4e43..89c63cfde5c8 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -11,6 +11,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <drm/drm_of.h>
@@ -24,6 +25,24 @@
#define RK3288_GRF_SOC_CON6 0x025C
#define RK3288_HDMI_LCDC_SEL BIT(4)
+#define RK3328_GRF_SOC_CON2 0x0408
+
+#define RK3328_HDMI_SDAIN_MSK BIT(11)
+#define RK3328_HDMI_SCLIN_MSK BIT(10)
+#define RK3328_HDMI_HPD_IOE BIT(2)
+#define RK3328_GRF_SOC_CON3 0x040c
+/* need to be unset if hdmi or i2c should control voltage */
+#define RK3328_HDMI_SDA5V_GRF BIT(15)
+#define RK3328_HDMI_SCL5V_GRF BIT(14)
+#define RK3328_HDMI_HPD5V_GRF BIT(13)
+#define RK3328_HDMI_CEC5V_GRF BIT(12)
+#define RK3328_GRF_SOC_CON4 0x0410
+#define RK3328_HDMI_HPD_SARADC BIT(13)
+#define RK3328_HDMI_CEC_5V BIT(11)
+#define RK3328_HDMI_SDA_5V BIT(10)
+#define RK3328_HDMI_SCL_5V BIT(9)
+#define RK3328_HDMI_HPD_5V BIT(8)
+
#define RK3399_GRF_SOC_CON20 0x6250
#define RK3399_HDMI_LCDC_SEL BIT(6)
@@ -36,7 +55,7 @@
* @lcdsel_lit: reg value of selecting vop little for HDMI
*/
struct rockchip_hdmi_chip_data {
- u32 lcdsel_grf_reg;
+ int lcdsel_grf_reg;
u32 lcdsel_big;
u32 lcdsel_lit;
};
@@ -49,6 +68,7 @@ struct rockchip_hdmi {
struct clk *vpll_clk;
struct clk *grf_clk;
struct dw_hdmi *hdmi;
+ struct phy *phy;
};
#define to_rockchip_hdmi(x) container_of(x, struct rockchip_hdmi, x)
@@ -245,6 +265,9 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
u32 val;
int ret;
+ if (hdmi->chip_data->lcdsel_grf_reg < 0)
+ return;
+
ret = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
if (ret)
val = hdmi->chip_data->lcdsel_lit;
@@ -287,6 +310,66 @@ static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_fun
.atomic_check = dw_hdmi_rockchip_encoder_atomic_check,
};
+static int dw_hdmi_rockchip_genphy_init(struct dw_hdmi *dw_hdmi, void *data,
+ struct drm_display_mode *mode)
+{
+ struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+
+ return phy_power_on(hdmi->phy);
+}
+
+static void dw_hdmi_rockchip_genphy_disable(struct dw_hdmi *dw_hdmi, void *data)
+{
+ struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+
+ phy_power_off(hdmi->phy);
+}
+
+static enum drm_connector_status
+dw_hdmi_rk3328_read_hpd(struct dw_hdmi *dw_hdmi, void *data)
+{
+ struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+ enum drm_connector_status status;
+
+ status = dw_hdmi_phy_read_hpd(dw_hdmi, data);
+
+ if (status == connector_status_connected)
+ regmap_write(hdmi->regmap,
+ RK3328_GRF_SOC_CON4,
+ HIWORD_UPDATE(RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V,
+ RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V));
+ else
+ regmap_write(hdmi->regmap,
+ RK3328_GRF_SOC_CON4,
+ HIWORD_UPDATE(0, RK3328_HDMI_SDA_5V |
+ RK3328_HDMI_SCL_5V));
+ return status;
+}
+
+static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
+{
+ struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+
+ dw_hdmi_phy_setup_hpd(dw_hdmi, data);
+
+ /* Enable and map pins to 3V grf-controlled io-voltage */
+ regmap_write(hdmi->regmap,
+ RK3328_GRF_SOC_CON4,
+ HIWORD_UPDATE(0, RK3328_HDMI_HPD_SARADC | RK3328_HDMI_CEC_5V |
+ RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V |
+ RK3328_HDMI_HPD_5V));
+ regmap_write(hdmi->regmap,
+ RK3328_GRF_SOC_CON3,
+ HIWORD_UPDATE(0, RK3328_HDMI_SDA5V_GRF | RK3328_HDMI_SCL5V_GRF |
+ RK3328_HDMI_HPD5V_GRF |
+ RK3328_HDMI_CEC5V_GRF));
+ regmap_write(hdmi->regmap,
+ RK3328_GRF_SOC_CON2,
+ HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK,
+ RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK |
+ RK3328_HDMI_HPD_IOE));
+}
+
static struct rockchip_hdmi_chip_data rk3288_chip_data = {
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
.lcdsel_big = HIWORD_UPDATE(0, RK3288_HDMI_LCDC_SEL),
@@ -301,6 +384,29 @@ static const struct dw_hdmi_plat_data rk3288_hdmi_drv_data = {
.phy_data = &rk3288_chip_data,
};
+static const struct dw_hdmi_phy_ops rk3328_hdmi_phy_ops = {
+ .init = dw_hdmi_rockchip_genphy_init,
+ .disable = dw_hdmi_rockchip_genphy_disable,
+ .read_hpd = dw_hdmi_rk3328_read_hpd,
+ .update_hpd = dw_hdmi_phy_update_hpd,
+ .setup_hpd = dw_hdmi_rk3328_setup_hpd,
+};
+
+static struct rockchip_hdmi_chip_data rk3328_chip_data = {
+ .lcdsel_grf_reg = -1,
+};
+
+static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
+ .mode_valid = dw_hdmi_rockchip_mode_valid,
+ .mpll_cfg = rockchip_mpll_cfg,
+ .cur_ctr = rockchip_cur_ctr,
+ .phy_config = rockchip_phy_config,
+ .phy_data = &rk3328_chip_data,
+ .phy_ops = &rk3328_hdmi_phy_ops,
+ .phy_name = "inno_dw_hdmi_phy2",
+ .phy_force_vendor = true,
+};
+
static struct rockchip_hdmi_chip_data rk3399_chip_data = {
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
.lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL),
@@ -319,6 +425,9 @@ static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = {
{ .compatible = "rockchip,rk3288-dw-hdmi",
.data = &rk3288_hdmi_drv_data
},
+ { .compatible = "rockchip,rk3328-dw-hdmi",
+ .data = &rk3328_hdmi_drv_data
+ },
{ .compatible = "rockchip,rk3399-dw-hdmi",
.data = &rk3399_hdmi_drv_data
},
@@ -330,7 +439,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
- const struct dw_hdmi_plat_data *plat_data;
+ struct dw_hdmi_plat_data *plat_data;
const struct of_device_id *match;
struct drm_device *drm = data;
struct drm_encoder *encoder;
@@ -345,9 +454,14 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return -ENOMEM;
match = of_match_node(dw_hdmi_rockchip_dt_ids, pdev->dev.of_node);
- plat_data = match->data;
+ plat_data = devm_kmemdup(&pdev->dev, match->data,
+ sizeof(*plat_data), GFP_KERNEL);
+ if (!plat_data)
+ return -ENOMEM;
+
hdmi->dev = &pdev->dev;
hdmi->chip_data = plat_data->phy_data;
+ plat_data->phy_data = hdmi;
encoder = &hdmi->encoder;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
@@ -373,6 +487,14 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
+ hdmi->phy = devm_phy_optional_get(dev, "hdmi");
+ if (IS_ERR(hdmi->phy)) {
+ ret = PTR_ERR(hdmi->phy);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(hdmi->dev, "failed to get phy\n");
+ return ret;
+ }
+
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 941f35233b1f..37f9a3b651ab 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -489,7 +489,7 @@ static int __init rockchip_drm_init(void)
ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP);
ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver,
CONFIG_ROCKCHIP_DW_HDMI);
- ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_driver,
+ ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver,
CONFIG_ROCKCHIP_DW_MIPI_DSI);
ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 21a023a97bb8..ce48568ec8a0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -37,6 +37,7 @@ struct rockchip_crtc_state {
int output_type;
int output_mode;
int output_bpc;
+ int output_flags;
};
#define to_rockchip_crtc_state(s) \
container_of(s, struct rockchip_crtc_state, base)
@@ -67,7 +68,7 @@ int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout);
int rockchip_drm_endpoint_is_subdriver(struct device_node *ep);
extern struct platform_driver cdn_dp_driver;
extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
-extern struct platform_driver dw_mipi_dsi_driver;
+extern struct platform_driver dw_mipi_dsi_rockchip_driver;
extern struct platform_driver inno_hdmi_driver;
extern struct platform_driver rockchip_dp_driver;
extern struct platform_driver rockchip_lvds_driver;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index 79d00d861a31..01ff3c858875 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -189,12 +189,14 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
int rockchip_drm_psr_register(struct drm_encoder *encoder,
int (*psr_set)(struct drm_encoder *, bool enable))
{
- struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+ struct rockchip_drm_private *drm_drv;
struct psr_drv *psr;
if (!encoder || !psr_set)
return -EINVAL;
+ drm_drv = encoder->dev->dev_private;
+
psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
if (!psr)
return -ENOMEM;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 0c35a88e33dd..fb70fb486fbf 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -916,6 +916,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ?
BIT(VSYNC_POSITIVE) : 0;
VOP_REG_SET(vop, output, pin_pol, pin_pol);
+ VOP_REG_SET(vop, output, mipi_dual_channel_en, 0);
switch (s->output_type) {
case DRM_MODE_CONNECTOR_LVDS:
@@ -933,6 +934,8 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
case DRM_MODE_CONNECTOR_DSI:
VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, mipi_en, 1);
+ VOP_REG_SET(vop, output, mipi_dual_channel_en,
+ !!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL));
break;
case DRM_MODE_CONNECTOR_DisplayPort:
pin_pol &= ~BIT(DCLK_INVERT);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index fd5765dfd637..0fe40e1983d9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -60,6 +60,7 @@ struct vop_output {
struct vop_reg edp_en;
struct vop_reg hdmi_en;
struct vop_reg mipi_en;
+ struct vop_reg mipi_dual_channel_en;
struct vop_reg rgb_en;
};
@@ -214,6 +215,9 @@ struct vop_data {
/* for use special outface */
#define ROCKCHIP_OUT_MODE_AAAA 15
+/* output flags */
+#define ROCKCHIP_OUTPUT_DSI_DUAL BIT(0)
+
enum alpha_mode {
ALPHA_STRAIGHT,
ALPHA_INVERSE,
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index a6db3cd5544b..08fc40af52c8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -361,7 +361,11 @@ static const struct vop_win_data rk3188_vop_win_data[] = {
};
static const int rk3188_vop_intrs[] = {
- 0,
+ /*
+ * hs_start interrupt fires at frame-start, so serves
+ * the same purpose as dsp_hold in the driver.
+ */
+ DSP_HOLD_VALID_INTR,
FS_INTR,
LINE_FLAG_INTR,
BUS_ERROR_INTR,
@@ -630,6 +634,7 @@ static const struct vop_output rk3399_output = {
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
.mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
+ .mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
};
static const struct vop_data rk3399_vop_big = {
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 3e22a54a99c2..4463d3826ecb 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -130,7 +130,14 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
int i;
for (i = 0; i < entity->num_rq_list; ++i) {
- num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs);
+ struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
+
+ if (!entity->rq_list[i]->sched->ready) {
+ DRM_WARN("sched%s is not ready, skipping", sched->name);
+ continue;
+ }
+
+ num_jobs = atomic_read(&sched->num_jobs);
if (num_jobs < min_jobs) {
min_jobs = num_jobs;
rq = entity->rq_list[i];
@@ -204,7 +211,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
drm_sched_fence_finished(job->s_fence);
WARN_ON(job->s_fence->parent);
- dma_fence_put(&job->s_fence->finished);
job->sched->ops->free_job(job);
}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 44fe587aaef9..dbb69063b3d5 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -60,6 +60,8 @@
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
+static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job);
+
/**
* drm_sched_rq_init - initialize a given run queue struct
*
@@ -196,6 +198,75 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
schedule_delayed_work(&sched->work_tdr, sched->timeout);
}
+/**
+ * drm_sched_fault - immediately start timeout handler
+ *
+ * @sched: scheduler where the timeout handling should be started.
+ *
+ * Start timeout handling immediately when the driver detects a hardware fault.
+ */
+void drm_sched_fault(struct drm_gpu_scheduler *sched)
+{
+ mod_delayed_work(system_wq, &sched->work_tdr, 0);
+}
+EXPORT_SYMBOL(drm_sched_fault);
+
+/**
+ * drm_sched_suspend_timeout - Suspend scheduler job timeout
+ *
+ * @sched: scheduler instance for which to suspend the timeout
+ *
+ * Suspend the delayed work timeout for the scheduler. This is done by
+ * modifying the delayed work timeout to an arbitrary large value,
+ * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
+ * called from an IRQ context.
+ *
+ * Returns the timeout remaining
+ *
+ */
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
+{
+ unsigned long sched_timeout, now = jiffies;
+
+ sched_timeout = sched->work_tdr.timer.expires;
+
+ /*
+ * Modify the timeout to an arbitrarily large value. This also prevents
+ * the timeout to be restarted when new submissions arrive
+ */
+ if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
+ && time_after(sched_timeout, now))
+ return sched_timeout - now;
+ else
+ return sched->timeout;
+}
+EXPORT_SYMBOL(drm_sched_suspend_timeout);
+
+/**
+ * drm_sched_resume_timeout - Resume scheduler job timeout
+ *
+ * @sched: scheduler instance for which to resume the timeout
+ * @remaining: remaining timeout
+ *
+ * Resume the delayed work timeout for the scheduler. Note that
+ * this function can be called from an IRQ context.
+ */
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+
+ if (list_empty(&sched->ring_mirror_list))
+ cancel_delayed_work(&sched->work_tdr);
+ else
+ mod_delayed_work(system_wq, &sched->work_tdr, remaining);
+
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+}
+EXPORT_SYMBOL(drm_sched_resume_timeout);
+
/* job_finish is called after hw fence signaled
*/
static void drm_sched_job_finish(struct work_struct *work)
@@ -203,6 +274,7 @@ static void drm_sched_job_finish(struct work_struct *work)
struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
finish_work);
struct drm_gpu_scheduler *sched = s_job->sched;
+ unsigned long flags;
/*
* Canceling the timeout without removing our job from the ring mirror
@@ -213,14 +285,13 @@ static void drm_sched_job_finish(struct work_struct *work)
*/
cancel_delayed_work_sync(&sched->work_tdr);
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
/* remove job from ring_mirror_list */
- list_del(&s_job->node);
+ list_del_init(&s_job->node);
/* queue TDR for next job */
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
- dma_fence_put(&s_job->s_fence->finished);
sched->ops->free_job(s_job);
}
@@ -235,55 +306,33 @@ static void drm_sched_job_finish_cb(struct dma_fence *f,
static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
struct drm_gpu_scheduler *sched = s_job->sched;
+ unsigned long flags;
dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
drm_sched_job_finish_cb);
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
- int r;
+ unsigned long flags;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
-
- spin_lock(&sched->job_list_lock);
- list_for_each_entry_reverse(job, &sched->ring_mirror_list, node) {
- struct drm_sched_fence *fence = job->s_fence;
-
- if (!dma_fence_remove_callback(fence->parent, &fence->cb))
- goto already_signaled;
- }
-
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
- spin_unlock(&sched->job_list_lock);
if (job)
- sched->ops->timedout_job(job);
-
- spin_lock(&sched->job_list_lock);
- list_for_each_entry(job, &sched->ring_mirror_list, node) {
- struct drm_sched_fence *fence = job->s_fence;
+ job->sched->ops->timedout_job(job);
- if (!fence->parent || !list_empty(&fence->cb.node))
- continue;
-
- r = dma_fence_add_callback(fence->parent, &fence->cb,
- drm_sched_process_job);
- if (r)
- drm_sched_process_job(fence->parent, &fence->cb);
-
-already_signaled:
- ;
- }
- spin_unlock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ drm_sched_start_timeout(sched);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
/**
@@ -297,9 +346,10 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
{
struct drm_sched_job *s_job;
struct drm_sched_entity *entity, *tmp;
+ unsigned long flags;
int i;
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent,
@@ -309,7 +359,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
atomic_dec(&sched->hw_rq_count);
}
}
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma);
@@ -347,9 +397,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
bool found_guilty = false;
+ unsigned long flags;
int r;
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *fence;
@@ -363,7 +414,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
dma_fence_set_error(&s_fence->finished, -ECANCELED);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
@@ -378,12 +429,14 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
r);
dma_fence_put(fence);
} else {
+ if (s_fence->finished.error < 0)
+ drm_sched_expel_job_unlocked(s_job);
drm_sched_process_job(NULL, &s_fence->cb);
}
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
}
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
EXPORT_SYMBOL(drm_sched_job_recovery);
@@ -406,6 +459,9 @@ int drm_sched_job_init(struct drm_sched_job *job,
struct drm_gpu_scheduler *sched;
drm_sched_entity_select_rq(entity);
+ if (!entity->rq)
+ return -ENOENT;
+
sched = entity->rq->sched;
job->sched = sched;
@@ -424,6 +480,18 @@ int drm_sched_job_init(struct drm_sched_job *job,
EXPORT_SYMBOL(drm_sched_job_init);
/**
+ * drm_sched_job_cleanup - clean up scheduler job resources
+ *
+ * @job: scheduler job to clean up
+ */
+void drm_sched_job_cleanup(struct drm_sched_job *job)
+{
+ dma_fence_put(&job->s_fence->finished);
+ job->s_fence = NULL;
+}
+EXPORT_SYMBOL(drm_sched_job_cleanup);
+
+/**
* drm_sched_ready - is the scheduler ready
*
* @sched: scheduler instance
@@ -567,6 +635,8 @@ static int drm_sched_main(void *param)
r);
dma_fence_put(fence);
} else {
+ if (s_fence->finished.error < 0)
+ drm_sched_expel_job_unlocked(sched_job);
drm_sched_process_job(NULL, &s_fence->cb);
}
@@ -575,6 +645,15 @@ static int drm_sched_main(void *param)
return 0;
}
+static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
+{
+ struct drm_gpu_scheduler *sched = s_job->sched;
+
+ spin_lock(&sched->job_list_lock);
+ list_del_init(&s_job->node);
+ spin_unlock(&sched->job_list_lock);
+}
+
/**
* drm_sched_init - Init a gpu scheduler instance
*
@@ -594,7 +673,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
long timeout,
const char *name)
{
- int i;
+ int i, ret;
sched->ops = ops;
sched->hw_submission_limit = hw_submission;
sched->name = name;
@@ -615,10 +694,13 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(drm_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
+ ret = PTR_ERR(sched->thread);
+ sched->thread = NULL;
DRM_ERROR("Failed to create scheduler for %s.\n", name);
- return PTR_ERR(sched->thread);
+ return ret;
}
+ sched->ready = true;
return 0;
}
EXPORT_SYMBOL(drm_sched_init);
@@ -634,5 +716,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
if (sched->thread)
kthread_stop(sched->thread);
+
+ sched->ready = false;
}
EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index 9fc349fa18e9..1bb73dc4c88c 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -1 +1,5 @@
-obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm-helper.o
+test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
+ test-drm_format.o test-drm_framebuffer.o \
+ test-drm_damage_helper.o
+
+obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o
diff --git a/drivers/gpu/drm/selftests/drm_helper_selftests.h b/drivers/gpu/drm/selftests/drm_helper_selftests.h
deleted file mode 100644
index 9771290ed228..000000000000
--- a/drivers/gpu/drm/selftests/drm_helper_selftests.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
- *
- * The name is used as both an enum and expanded as igt__name to create
- * a module parameter. It must be unique and legal for a C identifier.
- *
- * Tests are executed in order by igt/drm_selftests_helper
- */
-selftest(check_plane_state, igt_check_plane_state)
diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
new file mode 100644
index 000000000000..464753746013
--- /dev/null
+++ b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as igt__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * Tests are executed in order by igt/drm_selftests_helper
+ */
+selftest(check_plane_state, igt_check_plane_state)
+selftest(check_drm_format_block_width, igt_check_drm_format_block_width)
+selftest(check_drm_format_block_height, igt_check_drm_format_block_height)
+selftest(check_drm_format_min_pitch, igt_check_drm_format_min_pitch)
+selftest(check_drm_framebuffer_create, igt_check_drm_framebuffer_create)
+selftest(damage_iter_no_damage, igt_damage_iter_no_damage)
+selftest(damage_iter_no_damage_fractional_src, igt_damage_iter_no_damage_fractional_src)
+selftest(damage_iter_no_damage_src_moved, igt_damage_iter_no_damage_src_moved)
+selftest(damage_iter_no_damage_fractional_src_moved, igt_damage_iter_no_damage_fractional_src_moved)
+selftest(damage_iter_no_damage_not_visible, igt_damage_iter_no_damage_not_visible)
+selftest(damage_iter_no_damage_no_crtc, igt_damage_iter_no_damage_no_crtc)
+selftest(damage_iter_no_damage_no_fb, igt_damage_iter_no_damage_no_fb)
+selftest(damage_iter_simple_damage, igt_damage_iter_simple_damage)
+selftest(damage_iter_single_damage, igt_damage_iter_single_damage)
+selftest(damage_iter_single_damage_intersect_src, igt_damage_iter_single_damage_intersect_src)
+selftest(damage_iter_single_damage_outside_src, igt_damage_iter_single_damage_outside_src)
+selftest(damage_iter_single_damage_fractional_src, igt_damage_iter_single_damage_fractional_src)
+selftest(damage_iter_single_damage_intersect_fractional_src, igt_damage_iter_single_damage_intersect_fractional_src)
+selftest(damage_iter_single_damage_outside_fractional_src, igt_damage_iter_single_damage_outside_fractional_src)
+selftest(damage_iter_single_damage_src_moved, igt_damage_iter_single_damage_src_moved)
+selftest(damage_iter_single_damage_fractional_src_moved, igt_damage_iter_single_damage_fractional_src_moved)
+selftest(damage_iter_damage, igt_damage_iter_damage)
+selftest(damage_iter_damage_one_intersect, igt_damage_iter_damage_one_intersect)
+selftest(damage_iter_damage_one_outside, igt_damage_iter_damage_one_outside)
+selftest(damage_iter_damage_src_moved, igt_damage_iter_damage_src_moved)
+selftest(damage_iter_damage_not_visible, igt_damage_iter_damage_not_visible)
diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
new file mode 100644
index 000000000000..a2f753205a3e
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test case for drm_damage_helper functions
+ */
+
+#define pr_fmt(fmt) "drm_damage_helper: " fmt
+
+#include <drm/drm_damage_helper.h>
+
+#include "test-drm_modeset_common.h"
+
+static void set_plane_src(struct drm_plane_state *state, int x1, int y1, int x2,
+ int y2)
+{
+ state->src.x1 = x1;
+ state->src.y1 = y1;
+ state->src.x2 = x2;
+ state->src.y2 = y2;
+}
+
+static void set_damage_clip(struct drm_mode_rect *r, int x1, int y1, int x2,
+ int y2)
+{
+ r->x1 = x1;
+ r->y1 = y1;
+ r->x2 = x2;
+ r->y2 = y2;
+}
+
+static void set_damage_blob(struct drm_property_blob *damage_blob,
+ struct drm_mode_rect *r, uint32_t size)
+{
+ damage_blob->length = size;
+ damage_blob->data = r;
+}
+
+static void set_plane_damage(struct drm_plane_state *state,
+ struct drm_property_blob *damage_blob)
+{
+ state->fb_damage_clips = damage_blob;
+}
+
+static bool check_damage_clip(struct drm_plane_state *state, struct drm_rect *r,
+ int x1, int y1, int x2, int y2)
+{
+ /*
+ * Round down x1/y1 and round up x2/y2. This is because damage is not in
+ * 16.16 fixed point so to catch all pixels.
+ */
+ int src_x1 = state->src.x1 >> 16;
+ int src_y1 = state->src.y1 >> 16;
+ int src_x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
+ int src_y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
+
+ if (x1 >= x2 || y1 >= y2) {
+ pr_err("Cannot have damage clip with no dimention.\n");
+ return false;
+ }
+
+ if (x1 < src_x1 || y1 < src_y1 || x2 > src_x2 || y2 > src_y2) {
+ pr_err("Damage cannot be outside rounded plane src.\n");
+ return false;
+ }
+
+ if (r->x1 != x1 || r->y1 != y1 || r->x2 != x2 || r->y2 != y2) {
+ pr_err("Damage = %d %d %d %d\n", r->x1, r->y1, r->x2, r->y2);
+ return false;
+ }
+
+ return true;
+}
+
+int igt_damage_iter_no_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src same as fb size. */
+ set_plane_src(&old_state, 0, 0, fb.width << 16, fb.height << 16);
+ set_plane_src(&state, 0, 0, fb.width << 16, fb.height << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 2048, 2048));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_fractional_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part and it moved since old plane state. */
+ set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_not_visible(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = false,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_no_crtc(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = 0,
+ .fb = &fb,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_no_fb(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = 0,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_simple_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage set to plane src */
+ set_damage_clip(&damage, 0, 0, 1024, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 1024, 768));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ set_damage_clip(&damage, 256, 192, 768, 576);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 768, 576));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_intersect_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 256, 192, 1360, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage clipped to src.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 1024, 768));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_outside_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_damage_clip(&damage, 10, 10, 256, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 256, 330));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 10, 1, 1360, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage clipped to rounded off src.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 4, 1029, 330));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_outside_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ set_damage_clip(&damage, 20, 30, 256, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_fractional_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src with fractional part moved since old plane state. */
+ set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 20, 30, 1360, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+
+ return 0;
+}
+
+int igt_damage_iter_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
+ if (num_hits == 1)
+ FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
+ num_hits++;
+ }
+
+ FAIL(num_hits != 2, "Should return damage when set.");
+
+ return 0;
+}
+
+int igt_damage_iter_damage_one_intersect(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* 2 damage clips, one intersect plane src. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 2, 2, 1360, 1360);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
+ if (num_hits == 1)
+ FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+ num_hits++;
+ }
+
+ FAIL(num_hits != 2, "Should return damage when set.");
+
+ return 0;
+}
+
+int igt_damage_iter_damage_one_outside(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
+
+ return 0;
+}
+
+int igt_damage_iter_damage_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return round off plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
+
+ return 0;
+}
+
+int igt_damage_iter_damage_not_visible(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = false,
+ };
+
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should not return any damage.");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_format.c b/drivers/gpu/drm/selftests/test-drm_format.c
new file mode 100644
index 000000000000..c5e212afa27a
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_format.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_format functions
+ */
+
+#define pr_fmt(fmt) "drm_format: " fmt
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "test-drm_modeset_common.h"
+
+int igt_check_drm_format_block_width(void *ignored)
+{
+ const struct drm_format_info *info = NULL;
+
+ /* Test invalid arguments */
+ FAIL_ON(drm_format_info_block_width(info, 0) != 0);
+ FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+ FAIL_ON(drm_format_info_block_width(info, 1) != 0);
+
+ /* Test 1 plane format */
+ info = drm_format_info(DRM_FORMAT_XRGB4444);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_width(info, 0) != 1);
+ FAIL_ON(drm_format_info_block_width(info, 1) != 0);
+ FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+
+ /* Test 2 planes format */
+ info = drm_format_info(DRM_FORMAT_NV12);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_width(info, 0) != 1);
+ FAIL_ON(drm_format_info_block_width(info, 1) != 1);
+ FAIL_ON(drm_format_info_block_width(info, 2) != 0);
+ FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+
+ /* Test 3 planes format */
+ info = drm_format_info(DRM_FORMAT_YUV422);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_width(info, 0) != 1);
+ FAIL_ON(drm_format_info_block_width(info, 1) != 1);
+ FAIL_ON(drm_format_info_block_width(info, 2) != 1);
+ FAIL_ON(drm_format_info_block_width(info, 3) != 0);
+ FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+
+ /* Test a tiled format */
+ info = drm_format_info(DRM_FORMAT_X0L0);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_width(info, 0) != 2);
+ FAIL_ON(drm_format_info_block_width(info, 1) != 0);
+ FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+
+ return 0;
+}
+
+int igt_check_drm_format_block_height(void *ignored)
+{
+ const struct drm_format_info *info = NULL;
+
+ /* Test invalid arguments */
+ FAIL_ON(drm_format_info_block_height(info, 0) != 0);
+ FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+ FAIL_ON(drm_format_info_block_height(info, 1) != 0);
+
+ /* Test 1 plane format */
+ info = drm_format_info(DRM_FORMAT_XRGB4444);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_height(info, 0) != 1);
+ FAIL_ON(drm_format_info_block_height(info, 1) != 0);
+ FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+
+ /* Test 2 planes format */
+ info = drm_format_info(DRM_FORMAT_NV12);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_height(info, 0) != 1);
+ FAIL_ON(drm_format_info_block_height(info, 1) != 1);
+ FAIL_ON(drm_format_info_block_height(info, 2) != 0);
+ FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+
+ /* Test 3 planes format */
+ info = drm_format_info(DRM_FORMAT_YUV422);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_height(info, 0) != 1);
+ FAIL_ON(drm_format_info_block_height(info, 1) != 1);
+ FAIL_ON(drm_format_info_block_height(info, 2) != 1);
+ FAIL_ON(drm_format_info_block_height(info, 3) != 0);
+ FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+
+ /* Test a tiled format */
+ info = drm_format_info(DRM_FORMAT_X0L0);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_height(info, 0) != 2);
+ FAIL_ON(drm_format_info_block_height(info, 1) != 0);
+ FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+
+ return 0;
+}
+
+int igt_check_drm_format_min_pitch(void *ignored)
+{
+ const struct drm_format_info *info = NULL;
+
+ /* Test invalid arguments */
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+ /* Test 1 plane 8 bits per pixel format */
+ info = drm_format_info(DRM_FORMAT_RGB332);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
+ (uint64_t)(UINT_MAX - 1));
+
+ /* Test 1 plane 16 bits per pixel format */
+ info = drm_format_info(DRM_FORMAT_XRGB4444);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 4);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1280);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 2048);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 3840);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 8192);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 1342);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX * 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
+ (uint64_t)(UINT_MAX - 1) * 2);
+
+ /* Test 1 plane 24 bits per pixel format */
+ info = drm_format_info(DRM_FORMAT_RGB888);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 3);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 6);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1920);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 3072);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 5760);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 12288);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 2013);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX * 3);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
+ (uint64_t)(UINT_MAX - 1) * 3);
+
+ /* Test 1 plane 32 bits per pixel format */
+ info = drm_format_info(DRM_FORMAT_ABGR8888);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 4);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 8);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 2560);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 4096);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 7680);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 16384);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 2684);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX * 4);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
+ (uint64_t)(UINT_MAX - 1) * 4);
+
+ /* Test 2 planes format */
+ info = drm_format_info(DRM_FORMAT_NV12);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 320) != 640);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 512) != 1024);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 960) != 1920);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 2048) != 4096);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 336) != 672);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1) !=
+ (uint64_t)UINT_MAX + 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
+ (uint64_t)(UINT_MAX - 1));
+ FAIL_ON(drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2) !=
+ (uint64_t)(UINT_MAX - 1));
+
+ /* Test 3 planes 8 bits per pixel format */
+ info = drm_format_info(DRM_FORMAT_YUV422);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 3, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 1) != 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 2) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 2) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 320) != 320);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 320) != 320);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 512) != 512);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 512) != 512);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 960) != 960);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 960) != 960);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 2048) != 2048);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 2048) != 2048);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 336) != 336);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 336) != 336);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1) !=
+ (uint64_t)UINT_MAX / 2 + 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, UINT_MAX / 2 + 1) !=
+ (uint64_t)UINT_MAX / 2 + 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1) / 2) !=
+ (uint64_t)(UINT_MAX - 1) / 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2) !=
+ (uint64_t)(UINT_MAX - 1) / 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, (UINT_MAX - 1) / 2) !=
+ (uint64_t)(UINT_MAX - 1) / 2);
+
+ /* Test tiled format */
+ info = drm_format_info(DRM_FORMAT_X0L2);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 4);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1280);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 2048);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 3840);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 8192);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 1342);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX * 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
+ (uint64_t)(UINT_MAX - 1) * 2);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
new file mode 100644
index 000000000000..a04d02dacce2
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_framebuffer functions
+ */
+
+#include <drm/drmP.h>
+#include "../drm_crtc_internal.h"
+
+#include "test-drm_modeset_common.h"
+
+#define MIN_WIDTH 4
+#define MAX_WIDTH 4096
+#define MIN_HEIGHT 4
+#define MAX_HEIGHT 4096
+
+struct drm_framebuffer_test {
+ int buffer_created;
+ struct drm_mode_fb_cmd2 cmd;
+ const char *name;
+};
+
+static struct drm_framebuffer_test createbuffer_tests[] = {
+{ .buffer_created = 1, .name = "ABGR8888 normal sizes",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * 600, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 max sizes",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 pitch greater than min required",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH + 1, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 pitch less than min required",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH - 1, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Invalid width",
+ .cmd = { .width = MAX_WIDTH + 1, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * (MAX_WIDTH + 1), 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Invalid buffer handle",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 0, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "No pixel format",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = 0,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Width 0",
+ .cmd = { .width = 0, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Height 0",
+ .cmd = { .width = MAX_WIDTH, .height = 0, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Out of bound height * pitch combination",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX - 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 Large buffer offset",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 Set DRM_MODE_FB_MODIFIERS without modifiers",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 Valid buffer modifier",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ .flags = DRM_MODE_FB_MODIFIERS, .modifier = { AFBC_FORMAT_MOD_YTR, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Invalid buffer modifier(DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 Extra pitches without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 4 * MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Extra pitches with DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .pitches = { 4 * MAX_WIDTH, 4 * MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "NV12 Normal sizes",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .pitches = { 600, 600, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "NV12 Max sizes",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Invalid pitch",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH - 1, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Invalid modifier/misssing DRM_MODE_FB_MODIFIERS flag",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 different modifier per-plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "NV12 with DRM_FORMAT_MOD_SAMSUNG_64_32_TILE",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Valid modifiers without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Modifier for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Handle for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS, .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "NV12 Handle for inexistent plane without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 1 }, .pitches = { 600, 600, 600 },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Normal sizes",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .pitches = { 600, 300, 300 },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 DRM_MODE_FB_MODIFIERS set without modifier",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .pitches = { 600, 300, 300 },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Max sizes",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2),
+ DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Invalid pitch",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) - 1,
+ DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Different pitches",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1,
+ DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Different buffer offsets/pitches",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .offsets = { MAX_WIDTH, MAX_WIDTH + MAX_WIDTH * MAX_HEIGHT,
+ MAX_WIDTH + 2 * MAX_WIDTH * MAX_HEIGHT },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1, DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Modifier set just for plane 0, without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Modifier set just for planes 0, 1, without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Modifier set just for plane 0, 1, with DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Valid modifier",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Different modifiers per plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_YTR,
+ AFBC_FORMAT_MOD_SPARSE },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Modifier for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE,
+ AFBC_FORMAT_MOD_SPARSE },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Normal sizes",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 1200, 0, 0 }
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Max sizes",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH, 0, 0 }
+ }
+},
+{ .buffer_created = 0, .name = "X0L2 Invalid pitch",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH - 1, 0, 0 }
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Pitch greater than minimum required",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
+ }
+},
+{ .buffer_created = 0, .name = "X0L2 Handle for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Offset for inexistent plane, without DRM_MODE_FB_MODIFIERS set",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .offsets = { 0, 0, 3 },
+ .pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
+ }
+},
+{ .buffer_created = 0, .name = "X0L2 Modifier without DRM_MODE_FB_MODIFIERS set",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Valid modifier",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ }
+},
+{ .buffer_created = 0, .name = "X0L2 Modifier for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT,
+ .pixel_format = DRM_FORMAT_X0L2, .handles = { 1, 0, 0 },
+ .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+ .flags = DRM_MODE_FB_MODIFIERS,
+ }
+},
+};
+
+static struct drm_framebuffer *fb_create_mock(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ int *buffer_created = dev->dev_private;
+ *buffer_created = 1;
+ return ERR_PTR(-EINVAL);
+}
+
+static struct drm_mode_config_funcs mock_config_funcs = {
+ .fb_create = fb_create_mock,
+};
+
+static struct drm_device mock_drm_device = {
+ .mode_config = {
+ .min_width = MIN_WIDTH,
+ .max_width = MAX_WIDTH,
+ .min_height = MIN_HEIGHT,
+ .max_height = MAX_HEIGHT,
+ .allow_fb_modifiers = true,
+ .funcs = &mock_config_funcs,
+ },
+};
+
+static int execute_drm_mode_fb_cmd2(struct drm_mode_fb_cmd2 *r)
+{
+ int buffer_created = 0;
+ struct drm_framebuffer *fb;
+
+ mock_drm_device.dev_private = &buffer_created;
+ fb = drm_internal_framebuffer_create(&mock_drm_device, r, NULL);
+ return buffer_created;
+}
+
+int igt_check_drm_framebuffer_create(void *ignored)
+{
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(createbuffer_tests); i++) {
+ FAIL(createbuffer_tests[i].buffer_created !=
+ execute_drm_mode_fb_cmd2(&createbuffer_tests[i].cmd),
+ "Test %d: \"%s\" failed\n", i, createbuffer_tests[i].name);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.c b/drivers/gpu/drm/selftests/test-drm_modeset_common.c
new file mode 100644
index 000000000000..2a7f93774006
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common file for modeset selftests.
+ */
+
+#include <linux/module.h>
+
+#include "test-drm_modeset_common.h"
+
+#define TESTS "drm_modeset_selftests.h"
+#include "drm_selftest.h"
+
+#include "drm_selftest.c"
+
+static int __init test_drm_modeset_init(void)
+{
+ int err;
+
+ err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
+
+ return err > 0 ? 0 : err;
+}
+
+static void __exit test_drm_modeset_exit(void)
+{
+}
+
+module_init(test_drm_modeset_init);
+module_exit(test_drm_modeset_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
new file mode 100644
index 000000000000..8c76f09c12d1
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __TEST_DRM_MODESET_COMMON_H__
+#define __TEST_DRM_MODESET_COMMON_H__
+
+#define FAIL(test, msg, ...) \
+ do { \
+ if (test) { \
+ pr_err("%s/%u: " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n")
+
+int igt_check_plane_state(void *ignored);
+int igt_check_drm_format_block_width(void *ignored);
+int igt_check_drm_format_block_height(void *ignored);
+int igt_check_drm_format_min_pitch(void *ignored);
+int igt_check_drm_framebuffer_create(void *ignored);
+int igt_damage_iter_no_damage(void *ignored);
+int igt_damage_iter_no_damage_fractional_src(void *ignored);
+int igt_damage_iter_no_damage_src_moved(void *ignored);
+int igt_damage_iter_no_damage_fractional_src_moved(void *ignored);
+int igt_damage_iter_no_damage_not_visible(void *ignored);
+int igt_damage_iter_no_damage_no_crtc(void *ignored);
+int igt_damage_iter_no_damage_no_fb(void *ignored);
+int igt_damage_iter_simple_damage(void *ignored);
+int igt_damage_iter_single_damage(void *ignored);
+int igt_damage_iter_single_damage_intersect_src(void *ignored);
+int igt_damage_iter_single_damage_outside_src(void *ignored);
+int igt_damage_iter_single_damage_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_outside_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_src_moved(void *ignored);
+int igt_damage_iter_single_damage_fractional_src_moved(void *ignored);
+int igt_damage_iter_damage(void *ignored);
+int igt_damage_iter_damage_one_intersect(void *ignored);
+int igt_damage_iter_damage_one_outside(void *ignored);
+int igt_damage_iter_damage_src_moved(void *ignored);
+int igt_damage_iter_damage_not_visible(void *ignored);
+
+#endif
diff --git a/drivers/gpu/drm/selftests/test-drm-helper.c b/drivers/gpu/drm/selftests/test-drm_plane_helper.c
index a015712b43e8..0a9553f51796 100644
--- a/drivers/gpu/drm/selftests/test-drm-helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_plane_helper.c
@@ -1,27 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Test cases for the drm_kms_helper functions
+ * Test cases for the drm_plane_helper functions
*/
-#define pr_fmt(fmt) "drm_kms_helper: " fmt
-
-#include <linux/module.h>
+#define pr_fmt(fmt) "drm_plane_helper: " fmt
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_modes.h>
-#define TESTS "drm_helper_selftests.h"
-#include "drm_selftest.h"
-
-#define FAIL(test, msg, ...) \
- do { \
- if (test) { \
- pr_err("%s/%u: " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
- return -EINVAL; \
- } \
- } while (0)
-
-#define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n")
+#include "test-drm_modeset_common.h"
static void set_src(struct drm_plane_state *plane_state,
unsigned src_x, unsigned src_y,
@@ -85,7 +73,7 @@ static bool check_crtc_eq(struct drm_plane_state *plane_state,
return true;
}
-static int igt_check_plane_state(void *ignored)
+int igt_check_plane_state(void *ignored)
{
int ret;
@@ -229,19 +217,3 @@ static int igt_check_plane_state(void *ignored)
return 0;
}
-
-#include "drm_selftest.c"
-
-static int __init test_drm_helper_init(void)
-{
- int err;
-
- err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
-
- return err > 0 ? 0 : err;
-}
-
-module_init(test_drm_helper_init);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 6ececad6f845..8554102a6ead 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -194,7 +194,7 @@ static int shmob_drm_remove(struct platform_device *pdev)
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
drm_irq_uninstall(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return 0;
}
@@ -290,7 +290,7 @@ err_modeset_cleanup:
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
err_free_drm_dev:
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return ret;
}
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 5824e6aca8f4..ed76e52eb213 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -40,6 +40,8 @@ static void sti_crtc_atomic_disable(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("\n");
mixer->status = STI_MIXER_DISABLING;
+
+ drm_crtc_wait_one_vblank(crtc);
}
static int
@@ -250,10 +252,8 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
struct sti_compositor *compo;
struct drm_crtc *crtc = data;
struct sti_mixer *mixer;
- struct sti_private *priv;
unsigned int pipe;
- priv = crtc->dev->dev_private;
pipe = drm_crtc_index(crtc);
compo = container_of(nb, struct sti_compositor, vtg_vblank_nb[pipe]);
mixer = compo->mixer[pipe];
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 57b870e1e696..bc908453ffb3 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -332,7 +332,6 @@ static void sti_cursor_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 6dced8abcf16..ac54e0f9caea 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -206,6 +206,8 @@ static void sti_cleanup(struct drm_device *ddev)
struct sti_private *private = ddev->dev_private;
drm_kms_helper_poll_fini(ddev);
+ drm_atomic_helper_shutdown(ddev);
+ drm_mode_config_cleanup(ddev);
component_unbind_all(ddev->dev, ddev);
kfree(private);
ddev->dev_private = NULL;
@@ -230,7 +232,7 @@ static int sti_bind(struct device *dev)
ret = drm_dev_register(ddev, 0);
if (ret)
- goto err_register;
+ goto err_cleanup;
drm_mode_config_reset(ddev);
@@ -238,8 +240,6 @@ static int sti_bind(struct device *dev)
return 0;
-err_register:
- drm_mode_config_cleanup(ddev);
err_cleanup:
sti_cleanup(ddev);
err_drm_dev_put:
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index c32de6cbf061..cff7b2b5ee9e 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -517,7 +517,7 @@ static void sti_gdp_init(struct sti_gdp *gdp)
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
- base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL | GFP_DMA);
+ base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL);
if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n");
@@ -883,7 +883,6 @@ static void sti_gdp_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 03ac3b4a4469..23565f52dd71 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1260,7 +1260,6 @@ static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index f2021b23554d..8dec001b9d37 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -26,7 +26,6 @@
static const struct drm_mode_config_funcs drv_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -52,7 +51,6 @@ DEFINE_DRM_GEM_CMA_FOPS(drv_driver_fops);
static struct drm_driver drv_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = drm_fb_helper_lastclose,
.name = "stm",
.desc = "STMicroelectronics SoC DRM",
.date = "20170330",
@@ -72,6 +70,8 @@ static struct drm_driver drv_driver = {
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .get_scanout_position = ltdc_crtc_scanoutpos,
+ .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
};
static int drv_load(struct drm_device *ddev)
@@ -108,12 +108,6 @@ static int drv_load(struct drm_device *ddev)
drm_mode_config_reset(ddev);
drm_kms_helper_poll_init(ddev);
- if (ddev->mode_config.num_connector) {
- ret = drm_fb_cma_fbdev_init(ddev, 16, 0);
- if (ret)
- DRM_DEBUG("Warning: fails to create fbdev\n");
- }
-
platform_set_drvdata(pdev, ddev);
return 0;
@@ -126,7 +120,6 @@ static void drv_unload(struct drm_device *ddev)
{
DRM_DEBUG("%s\n", __func__);
- drm_fb_cma_fbdev_fini(ddev);
drm_kms_helper_poll_fini(ddev);
ltdc_unload(ddev);
drm_mode_config_cleanup(ddev);
@@ -154,6 +147,8 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
if (ret)
goto err_put;
+ drm_fbdev_generic_setup(ddev, 16);
+
return 0;
err_put:
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 808d9fb627e9..61dd661aa0ac 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -148,6 +148,8 @@
#define IER_TERRIE BIT(2) /* Transfer ERRor Interrupt Enable */
#define IER_RRIE BIT(3) /* Register Reload Interrupt enable */
+#define CPSR_CYPOS GENMASK(15, 0) /* Current Y position */
+
#define ISR_LIF BIT(0) /* Line Interrupt Flag */
#define ISR_FUIF BIT(1) /* Fifo Underrun Interrupt Flag */
#define ISR_TERRIF BIT(2) /* Transfer ERRor Interrupt Flag */
@@ -626,6 +628,49 @@ static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
reg_clear(ldev->regs, LTDC_IER, IER_LIE);
}
+bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct ltdc_device *ldev = ddev->dev_private;
+ int line, vactive_start, vactive_end, vtotal;
+
+ if (stime)
+ *stime = ktime_get();
+
+ /* The active area starts after vsync + front porch and ends
+ * at vsync + front porc + display size.
+ * The total height also include back porch.
+ * We have 3 possible cases to handle:
+ * - line < vactive_start: vpos = line - vactive_start and will be
+ * negative
+ * - vactive_start < line < vactive_end: vpos = line - vactive_start
+ * and will be positive
+ * - line > vactive_end: vpos = line - vtotal - vactive_start
+ * and will negative
+ *
+ * Computation for the two first cases are identical so we can
+ * simplify the code and only test if line > vactive_end
+ */
+ line = reg_read(ldev->regs, LTDC_CPSR) & CPSR_CYPOS;
+ vactive_start = reg_read(ldev->regs, LTDC_BPCR) & BPCR_AVBP;
+ vactive_end = reg_read(ldev->regs, LTDC_AWCR) & AWCR_AAH;
+ vtotal = reg_read(ldev->regs, LTDC_TWCR) & TWCR_TOTALH;
+
+ if (line > vactive_end)
+ *vpos = line - vtotal - vactive_start;
+ else
+ *vpos = line - vactive_start;
+
+ *hpos = 0;
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
+}
+
static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index d5afb8960867..e46f477a8494 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -38,6 +38,11 @@ struct ltdc_device {
struct fps_info plane_fpsi[LTDC_MAX_LAYER];
};
+bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
int ltdc_load(struct drm_device *ddev);
void ltdc_unload(struct drm_device *ddev);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index bf49c55b0f2c..9e9255ee59cd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -48,8 +48,12 @@ static const u32 sunxi_rgb2yuv_coef[12] = {
/*
* These coefficients are taken from the A33 BSP from Allwinner.
*
- * The formula is for each component, each coefficient being multiplied by
- * 1024 and each constant being multiplied by 16:
+ * The first three values of each row are coded as 13-bit signed fixed-point
+ * numbers, with 10 bits for the fractional part. The fourth value is a
+ * constant coded as a 14-bit signed fixed-point number with 4 bits for the
+ * fractional part.
+ *
+ * The values in table order give the following colorspace translation:
* G = 1.164 * Y - 0.391 * U - 0.813 * V + 135
* R = 1.164 * Y + 1.596 * V - 222
* B = 1.164 * Y + 2.018 * U + 276
@@ -155,6 +159,36 @@ static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
return 0;
}
+static const uint32_t sun4i_backend_formats[] = {
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+};
+
+bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
+{
+ unsigned int i;
+
+ if (modifier != DRM_FORMAT_MOD_LINEAR)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
+ if (sun4i_backend_formats[i] == fmt)
+ return true;
+
+ return false;
+}
+
int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
int layer, struct drm_plane *plane)
{
@@ -395,6 +429,15 @@ int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
return 0;
}
+void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
+ int layer)
+{
+ regmap_update_bits(backend->engine.regs,
+ SUN4I_BACKEND_ATTCTL_REG0(layer),
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
+}
+
static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
{
u16 src_h = state->src_h >> 16;
@@ -413,11 +456,50 @@ static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
{
struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
struct sun4i_backend *backend = layer->backend;
+ uint32_t format = state->fb->format->format;
+ uint64_t modifier = state->fb->modifier;
if (IS_ERR(backend->frontend))
return false;
- return sun4i_backend_plane_uses_scaler(state);
+ if (!sun4i_frontend_format_is_supported(format, modifier))
+ return false;
+
+ if (!sun4i_backend_format_is_supported(format, modifier))
+ return true;
+
+ /*
+ * TODO: The backend alone allows 2x and 4x integer scaling, including
+ * support for an alpha component (which the frontend doesn't support).
+ * Use the backend directly instead of the frontend in this case, with
+ * another test to return false.
+ */
+
+ if (sun4i_backend_plane_uses_scaler(state))
+ return true;
+
+ /*
+ * Here the format is supported by both the frontend and the backend
+ * and no frontend scaling is required, so use the backend directly.
+ */
+ return false;
+}
+
+static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
+ bool *uses_frontend)
+{
+ if (sun4i_backend_plane_uses_frontend(state)) {
+ *uses_frontend = true;
+ return true;
+ }
+
+ *uses_frontend = false;
+
+ /* Scaling is not supported without the frontend. */
+ if (sun4i_backend_plane_uses_scaler(state))
+ return false;
+
+ return true;
}
static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
@@ -460,14 +542,19 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
struct drm_framebuffer *fb = plane_state->fb;
struct drm_format_name_buf format_name;
- if (sun4i_backend_plane_uses_frontend(plane_state)) {
+ if (!sun4i_backend_plane_is_supported(plane_state,
+ &layer_state->uses_frontend))
+ return -EINVAL;
+
+ if (layer_state->uses_frontend) {
DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
plane->index);
-
- layer_state->uses_frontend = true;
num_frontend_planes++;
} else {
- layer_state->uses_frontend = false;
+ if (fb->format->is_yuv) {
+ DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
+ num_yuv_planes++;
+ }
}
DRM_DEBUG_DRIVER("Plane FB format is %s\n",
@@ -476,11 +563,6 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
num_alpha_planes++;
- if (fb->format->is_yuv) {
- DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
- num_yuv_planes++;
- }
-
DRM_DEBUG_DRIVER("Plane zpos is %d\n",
plane_state->normalized_zpos);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index e3d4c6035eb2..01f66463271b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -198,6 +198,7 @@ engine_to_sun4i_backend(struct sunxi_engine *engine)
void sun4i_backend_layer_enable(struct sun4i_backend *backend,
int layer, bool enable);
+bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier);
int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
int layer, struct drm_plane *plane);
int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
@@ -208,5 +209,7 @@ int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
int layer, uint32_t in_fmt);
int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend,
int layer, struct drm_plane *plane);
+void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
+ int layer);
#endif /* _SUN4I_BACKEND_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 1e41c3f5fd6d..9e4c375ccc96 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -28,13 +28,22 @@
#include "sun4i_tcon.h"
#include "sun8i_tcon_top.h"
+static int drm_sun4i_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ /* The hardware only allows even pitches for YUV buffers. */
+ args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), 2);
+
+ return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+}
+
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
static struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
/* Generic Operations */
- .lastclose = drm_fb_helper_lastclose,
.fops = &sun4i_drv_fops,
.name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine",
@@ -43,7 +52,7 @@ static struct drm_driver sun4i_drv_driver = {
.minor = 0,
/* GEM Operations */
- .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_create = drm_sun4i_gem_dumb_create,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -105,12 +114,7 @@ static int sun4i_drv_bind(struct device *dev)
/* Remove early framebuffers (ie. simplefb) */
drm_fb_helper_remove_conflicting_framebuffers(NULL, "sun4i-drm-fb", false);
- /* Create our framebuffer */
- ret = sun4i_framebuffer_init(drm);
- if (ret) {
- dev_err(drm->dev, "Couldn't create our framebuffer\n");
- goto cleanup_mode_config;
- }
+ sun4i_framebuffer_init(drm);
/* Enable connectors polling */
drm_kms_helper_poll_init(drm);
@@ -119,11 +123,12 @@ static int sun4i_drv_bind(struct device *dev)
if (ret)
goto finish_poll;
+ drm_fbdev_generic_setup(drm, 32);
+
return 0;
finish_poll:
drm_kms_helper_poll_fini(drm);
- sun4i_framebuffer_free(drm);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
@@ -138,7 +143,6 @@ static void sun4i_drv_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
- sun4i_framebuffer_free(drm);
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
drm_dev_put(drm);
@@ -406,6 +410,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" },
{ .compatible = "allwinner,sun50i-a64-display-engine" },
+ { .compatible = "allwinner,sun50i-h6-display-engine" },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 5f29850ef8ac..cb828028ae06 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -12,8 +12,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drmP.h>
@@ -37,7 +35,6 @@ static int sun4i_de_atomic_check(struct drm_device *dev,
}
static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = sun4i_de_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
@@ -47,7 +44,7 @@ static struct drm_mode_config_helper_funcs sun4i_de_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
-int sun4i_framebuffer_init(struct drm_device *drm)
+void sun4i_framebuffer_init(struct drm_device *drm)
{
drm_mode_config_reset(drm);
@@ -56,11 +53,4 @@ int sun4i_framebuffer_init(struct drm_device *drm)
drm->mode_config.funcs = &sun4i_de_mode_config_funcs;
drm->mode_config.helper_private = &sun4i_de_mode_config_helpers;
-
- return drm_fb_cma_fbdev_init(drm, 32, 0);
-}
-
-void sun4i_framebuffer_free(struct drm_device *drm)
-{
- drm_fb_cma_fbdev_fini(drm);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.h b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
index 7ef0aed8384c..6fe5bd8c4026 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
@@ -13,7 +13,6 @@
#ifndef _SUN4I_FRAMEBUFFER_H_
#define _SUN4I_FRAMEBUFFER_H_
-int sun4i_framebuffer_init(struct drm_device *drm);
-void sun4i_framebuffer_free(struct drm_device *drm);
+void sun4i_framebuffer_init(struct drm_device *drm);
#endif /* _SUN4I_FRAMEBUFFER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index ddf6cfa6dd23..1a7ebc45747e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -107,8 +107,34 @@ EXPORT_SYMBOL(sun4i_frontend_update_buffer);
static int sun4i_frontend_drm_format_to_input_fmt(uint32_t fmt, u32 *val)
{
switch (fmt) {
- case DRM_FORMAT_ARGB8888:
- *val = 5;
+ case DRM_FORMAT_XRGB8888:
+ *val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_RGB;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sun4i_frontend_drm_format_to_input_mode(uint32_t fmt, u32 *val)
+{
+ if (drm_format_num_planes(fmt) == 1)
+ *val = SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PACKED;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sun4i_frontend_drm_format_to_input_sequence(uint32_t fmt, u32 *val)
+{
+ switch (fmt) {
+ case DRM_FORMAT_BGRX8888:
+ *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_BGRX;
+ return 0;
+
+ case DRM_FORMAT_XRGB8888:
+ *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_XRGB;
return 0;
default:
@@ -119,9 +145,12 @@ static int sun4i_frontend_drm_format_to_input_fmt(uint32_t fmt, u32 *val)
static int sun4i_frontend_drm_format_to_output_fmt(uint32_t fmt, u32 *val)
{
switch (fmt) {
+ case DRM_FORMAT_BGRX8888:
+ *val = SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_BGRX8888;
+ return 0;
+
case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- *val = 2;
+ *val = SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_XRGB8888;
return 0;
default:
@@ -129,22 +158,54 @@ static int sun4i_frontend_drm_format_to_output_fmt(uint32_t fmt, u32 *val)
}
}
+static const uint32_t sun4i_frontend_formats[] = {
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XRGB8888,
+};
+
+bool sun4i_frontend_format_is_supported(uint32_t fmt, uint64_t modifier)
+{
+ unsigned int i;
+
+ if (modifier != DRM_FORMAT_MOD_LINEAR)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_frontend_formats); i++)
+ if (sun4i_frontend_formats[i] == fmt)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(sun4i_frontend_format_is_supported);
+
int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
struct drm_plane *plane, uint32_t out_fmt)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
+ uint32_t format = fb->format->format;
u32 out_fmt_val;
- u32 in_fmt_val;
+ u32 in_fmt_val, in_mod_val, in_ps_val;
int ret;
- ret = sun4i_frontend_drm_format_to_input_fmt(fb->format->format,
- &in_fmt_val);
+ ret = sun4i_frontend_drm_format_to_input_fmt(format, &in_fmt_val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid input format\n");
return ret;
}
+ ret = sun4i_frontend_drm_format_to_input_mode(format, &in_mod_val);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Invalid input mode\n");
+ return ret;
+ }
+
+ ret = sun4i_frontend_drm_format_to_input_sequence(format, &in_ps_val);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Invalid pixel sequence\n");
+ return ret;
+ }
+
ret = sun4i_frontend_drm_format_to_output_fmt(out_fmt, &out_fmt_val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid output format\n");
@@ -162,10 +223,12 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG, 0x400);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG, 0x400);
+ regmap_update_bits(frontend->regs, SUN4I_FRONTEND_BYPASS_REG,
+ SUN4I_FRONTEND_BYPASS_CSC_EN,
+ SUN4I_FRONTEND_BYPASS_CSC_EN);
+
regmap_write(frontend->regs, SUN4I_FRONTEND_INPUT_FMT_REG,
- SUN4I_FRONTEND_INPUT_FMT_DATA_MOD(1) |
- SUN4I_FRONTEND_INPUT_FMT_DATA_FMT(in_fmt_val) |
- SUN4I_FRONTEND_INPUT_FMT_PS(1));
+ in_mod_val | in_fmt_val | in_ps_val);
/*
* TODO: It look like the A31 and A80 at least will need the
@@ -173,7 +236,7 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
* ARGB8888).
*/
regmap_write(frontend->regs, SUN4I_FRONTEND_OUTPUT_FMT_REG,
- SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT(out_fmt_val));
+ out_fmt_val);
return 0;
}
@@ -183,16 +246,24 @@ void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ uint32_t luma_width, luma_height;
+ uint32_t chroma_width, chroma_height;
/* Set height and width */
DRM_DEBUG_DRIVER("Frontend size W: %u H: %u\n",
state->crtc_w, state->crtc_h);
+
+ luma_width = state->src_w >> 16;
+ luma_height = state->src_h >> 16;
+
+ chroma_width = DIV_ROUND_UP(luma_width, fb->format->hsub);
+ chroma_height = DIV_ROUND_UP(luma_height, fb->format->vsub);
+
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_INSIZE_REG,
- SUN4I_FRONTEND_INSIZE(state->src_h >> 16,
- state->src_w >> 16));
+ SUN4I_FRONTEND_INSIZE(luma_height, luma_width));
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_INSIZE_REG,
- SUN4I_FRONTEND_INSIZE(state->src_h >> 16,
- state->src_w >> 16));
+ SUN4I_FRONTEND_INSIZE(chroma_height, chroma_width));
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_OUTSIZE_REG,
SUN4I_FRONTEND_OUTSIZE(state->crtc_h, state->crtc_w));
@@ -200,14 +271,14 @@ void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
SUN4I_FRONTEND_OUTSIZE(state->crtc_h, state->crtc_w));
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZFACT_REG,
- state->src_w / state->crtc_w);
+ (luma_width << 16) / state->crtc_w);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZFACT_REG,
- state->src_w / state->crtc_w);
+ (chroma_width << 16) / state->crtc_w);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTFACT_REG,
- state->src_h / state->crtc_h);
+ (luma_height << 16) / state->crtc_h);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTFACT_REG,
- state->src_h / state->crtc_h);
+ (chroma_height << 16) / state->crtc_h);
regmap_write_bits(frontend->regs, SUN4I_FRONTEND_FRM_CTRL_REG,
SUN4I_FRONTEND_FRM_CTRL_REG_RDY,
@@ -339,10 +410,6 @@ static int sun4i_frontend_runtime_resume(struct device *dev)
SUN4I_FRONTEND_EN_EN,
SUN4I_FRONTEND_EN_EN);
- regmap_update_bits(frontend->regs, SUN4I_FRONTEND_BYPASS_REG,
- SUN4I_FRONTEND_BYPASS_CSC_EN,
- SUN4I_FRONTEND_BYPASS_CSC_EN);
-
sun4i_frontend_scaler_init(frontend);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.h b/drivers/gpu/drm/sun4i/sun4i_frontend.h
index 02661ce81f3e..ad146e8d8d70 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.h
@@ -26,12 +26,14 @@
#define SUN4I_FRONTEND_LINESTRD0_REG 0x040
#define SUN4I_FRONTEND_INPUT_FMT_REG 0x04c
-#define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD(mod) ((mod) << 8)
-#define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT(fmt) ((fmt) << 4)
-#define SUN4I_FRONTEND_INPUT_FMT_PS(ps) (ps)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PACKED (1 << 8)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_RGB (5 << 4)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_BGRX 0
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_XRGB 1
#define SUN4I_FRONTEND_OUTPUT_FMT_REG 0x05c
-#define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT(fmt) (fmt)
+#define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_BGRX8888 1
+#define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_XRGB8888 2
#define SUN4I_FRONTEND_CH0_INSIZE_REG 0x100
#define SUN4I_FRONTEND_INSIZE(h, w) ((((h) - 1) << 16) | (((w) - 1)))
@@ -95,5 +97,6 @@ void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
struct drm_plane *plane);
int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
struct drm_plane *plane, uint32_t out_fmt);
+bool sun4i_frontend_format_is_supported(uint32_t fmt, uint64_t modifier);
#endif /* _SUN4I_FRONTEND_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index 3ecffa52c814..fb985ba1a176 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -35,7 +35,7 @@ static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
{
unsigned long best_rate = 0;
u8 best_m = 0, m;
- bool is_double;
+ bool is_double = false;
for (m = div_offset ?: 1; m < (16 + div_offset); m++) {
u8 d;
@@ -52,7 +52,7 @@ static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
(rate - tmp_rate) < (rate - best_rate)) {
best_rate = tmp_rate;
best_m = m;
- is_double = d;
+ is_double = (d == 2) ? true : false;
}
}
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 78f77af8805a..29631e0efde3 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drmP.h>
#include "sun4i_backend.h"
@@ -92,14 +93,16 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
struct sun4i_backend *backend = layer->backend;
struct sun4i_frontend *frontend = backend->frontend;
+ sun4i_backend_cleanup_layer(backend, layer->id);
+
if (layer_state->uses_frontend) {
sun4i_frontend_init(frontend);
sun4i_frontend_update_coord(frontend, plane);
sun4i_frontend_update_buffer(frontend, plane);
sun4i_frontend_update_formats(frontend, plane,
- DRM_FORMAT_ARGB8888);
+ DRM_FORMAT_XRGB8888);
sun4i_backend_update_layer_frontend(backend, layer->id,
- DRM_FORMAT_ARGB8888);
+ DRM_FORMAT_XRGB8888);
sun4i_frontend_enable(frontend);
} else {
sun4i_backend_update_layer_formats(backend, layer->id, plane);
@@ -112,6 +115,7 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_disable = sun4i_backend_layer_atomic_disable,
.atomic_update = sun4i_backend_layer_atomic_update,
};
@@ -125,10 +129,11 @@ static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
.update_plane = drm_atomic_helper_update_plane,
};
-static const uint32_t sun4i_backend_layer_formats[] = {
+static const uint32_t sun4i_layer_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGB888,
@@ -154,8 +159,8 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun4i_backend_layer_funcs,
- sun4i_backend_layer_formats,
- ARRAY_SIZE(sun4i_backend_layer_formats),
+ sun4i_layer_formats,
+ ARRAY_SIZE(sun4i_layer_formats),
NULL, type, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index af7dcb6da351..e7eb0d1e17be 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -75,7 +75,7 @@ static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling LVDS output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_prepare(tcon->panel);
drm_panel_enable(tcon->panel);
}
@@ -88,7 +88,7 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling LVDS output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_disable(tcon->panel);
drm_panel_unprepare(tcon->panel);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index bf068da6b12e..f4a22689eb54 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -135,7 +135,7 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling RGB output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_prepare(tcon->panel);
drm_panel_enable(tcon->panel);
}
@@ -148,7 +148,7 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling RGB output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_disable(tcon->panel);
drm_panel_unprepare(tcon->panel);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index c78cd35a1294..0420f5c978b9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -478,8 +478,11 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
}
static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
+ struct drm_connector *connector = sun4i_tcon_get_connector(encoder);
+ struct drm_display_info display_info = connector->display_info;
unsigned int bp, hsync, vsync;
u8 clk_delay;
u32 val = 0;
@@ -491,7 +494,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
sun4i_tcon0_mode_set_common(tcon, mode);
/* Set dithering if needed */
- sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
+ sun4i_tcon0_mode_set_dithering(tcon, connector);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
@@ -540,6 +543,9 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+ if (display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
+ val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
+
/*
* On A20 and similar SoCs, the only way to achieve Positive Edge
* (Rising Edge), is setting dclk clock phase to 2/3(240°).
@@ -555,20 +561,16 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
* Following code is a way to avoid quirks all around TCON
* and DOTCLOCK drivers.
*/
- if (!IS_ERR(tcon->panel)) {
- struct drm_panel *panel = tcon->panel;
- struct drm_connector *connector = panel->connector;
- struct drm_display_info display_info = connector->display_info;
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ clk_set_phase(tcon->dclk, 240);
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
- clk_set_phase(tcon->dclk, 240);
-
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
- clk_set_phase(tcon->dclk, 0);
- }
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ clk_set_phase(tcon->dclk, 0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
- SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
+ SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
+ SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
+ SUN4I_TCON0_IO_POL_DE_NEGATIVE,
val);
/* Map output pins to channel 0 */
@@ -683,7 +685,7 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
sun4i_tcon0_mode_set_lvds(tcon, encoder, mode);
break;
case DRM_MODE_ENCODER_NONE:
- sun4i_tcon0_mode_set_rgb(tcon, mode);
+ sun4i_tcon0_mode_set_rgb(tcon, encoder, mode);
sun4i_tcon_set_mux(tcon, 0, encoder);
break;
case DRM_MODE_ENCODER_TVDAC:
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 3d492c8be1fc..b5214d71610f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -116,6 +116,7 @@
#define SUN4I_TCON0_IO_POL_REG 0x88
#define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
+#define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27)
#define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
#define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24)
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index b14925b40ccf..e7608a72f26f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -34,6 +34,41 @@ static const u32 yvu2rgb[] = {
0x000004A8, 0x00000000, 0x00000813, 0xFFFBAC4A,
};
+/*
+ * DE3 has a bit different CSC units. Factors are in two's complement format.
+ * First three factors in a row are multiplication factors which have 17 bits
+ * for fractional part. Fourth value in a row is comprised of two factors.
+ * Upper 16 bits represents difference, which is subtracted from the input
+ * value before multiplication and lower 16 bits represents constant, which
+ * is addes at the end.
+ *
+ * x' = c00 * (x + d0) + c01 * (y + d1) + c02 * (z + d2) + const0
+ * y' = c10 * (x + d0) + c11 * (y + d1) + c12 * (z + d2) + const1
+ * z' = c20 * (x + d0) + c21 * (y + d1) + c22 * (z + d2) + const2
+ *
+ * Please note that above formula is true only for Blender CSC. Other DE3 CSC
+ * units takes only positive value for difference. From what can be deducted
+ * from BSP driver code, those units probably automatically assume that
+ * difference has to be subtracted.
+ *
+ * Layout of factors in table:
+ * c00 c01 c02 [d0 const0]
+ * c10 c11 c12 [d1 const1]
+ * c20 c21 c22 [d2 const2]
+ */
+
+static const u32 yuv2rgb_de3[] = {
+ 0x0002542a, 0x00000000, 0x0003312a, 0xffc00000,
+ 0x0002542a, 0xffff376b, 0xfffe5fc3, 0xfe000000,
+ 0x0002542a, 0x000408d3, 0x00000000, 0xfe000000,
+};
+
+static const u32 yvu2rgb_de3[] = {
+ 0x0002542a, 0x0003312a, 0x00000000, 0xffc00000,
+ 0x0002542a, 0xfffe5fc3, 0xffff376b, 0xfe000000,
+ 0x0002542a, 0x00000000, 0x000408d3, 0xfe000000,
+};
+
static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
enum sun8i_csc_mode mode)
{
@@ -61,6 +96,28 @@ static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
}
}
+static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
+ enum sun8i_csc_mode mode)
+{
+ const u32 *table;
+ u32 base_reg;
+
+ switch (mode) {
+ case SUN8I_CSC_MODE_YUV2RGB:
+ table = yuv2rgb_de3;
+ break;
+ case SUN8I_CSC_MODE_YVU2RGB:
+ table = yvu2rgb_de3;
+ break;
+ default:
+ DRM_WARN("Wrong CSC mode specified.\n");
+ return;
+ }
+
+ base_reg = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0, 0);
+ regmap_bulk_write(map, base_reg, table, 12);
+}
+
static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable)
{
u32 val;
@@ -73,11 +130,32 @@ static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable)
regmap_update_bits(map, SUN8I_CSC_CTRL(base), SUN8I_CSC_CTRL_EN, val);
}
+static void sun8i_de3_ccsc_enable(struct regmap *map, int layer, bool enable)
+{
+ u32 val, mask;
+
+ mask = SUN50I_MIXER_BLEND_CSC_CTL_EN(layer);
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ regmap_update_bits(map, SUN50I_MIXER_BLEND_CSC_CTL(DE3_BLD_BASE),
+ mask, val);
+}
+
void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
enum sun8i_csc_mode mode)
{
u32 base;
+ if (mixer->cfg->is_de3) {
+ sun8i_de3_ccsc_set_coefficients(mixer->engine.regs,
+ layer, mode);
+ return;
+ }
+
base = ccsc_base[mixer->cfg->ccsc][layer];
sun8i_csc_set_coefficients(mixer->engine.regs, base, mode);
@@ -87,6 +165,11 @@ void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable)
{
u32 base;
+ if (mixer->cfg->is_de3) {
+ sun8i_de3_ccsc_enable(mixer->engine.regs, layer, enable);
+ return;
+ }
+
base = ccsc_base[mixer->cfg->ccsc][layer];
sun8i_csc_enable(mixer->engine.regs, base, enable);
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index ed2983770e9c..dc47720c99ba 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -5,6 +5,7 @@
#include <linux/component.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <drm/drm_of.h>
@@ -20,7 +21,8 @@ static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
{
struct sun8i_dw_hdmi *hdmi = encoder_to_sun8i_dw_hdmi(encoder);
- clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
+ if (hdmi->quirks->set_rate)
+ clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
}
static const struct drm_encoder_helper_funcs
@@ -33,8 +35,8 @@ static const struct drm_encoder_funcs sun8i_dw_hdmi_encoder_funcs = {
};
static enum drm_mode_status
-sun8i_dw_hdmi_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
+sun8i_dw_hdmi_mode_valid_a83t(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
{
if (mode->clock > 297000)
return MODE_CLOCK_HIGH;
@@ -42,6 +44,17 @@ sun8i_dw_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static enum drm_mode_status
+sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ /* This is max for HDMI 2.0b (4K@60Hz) */
+ if (mode->clock > 594000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
static bool sun8i_dw_hdmi_node_is_tcon_top(struct device_node *node)
{
return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
@@ -102,6 +115,8 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = &pdev->dev;
encoder = &hdmi->encoder;
+ hdmi->quirks = of_device_get_match_data(dev);
+
encoder->possible_crtcs =
sun8i_dw_hdmi_find_possible_crtcs(drm, dev->of_node);
/*
@@ -168,10 +183,8 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
sun8i_hdmi_phy_init(hdmi->phy);
- plat_data->mode_valid = &sun8i_dw_hdmi_mode_valid;
- plat_data->phy_ops = sun8i_hdmi_phy_get_ops();
- plat_data->phy_name = "sun8i_dw_hdmi_phy";
- plat_data->phy_data = hdmi->phy;
+ plat_data->mode_valid = hdmi->quirks->mode_valid;
+ sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
platform_set_drvdata(pdev, hdmi);
@@ -230,8 +243,24 @@ static int sun8i_dw_hdmi_remove(struct platform_device *pdev)
return 0;
}
+static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
+ .mode_valid = sun8i_dw_hdmi_mode_valid_a83t,
+ .set_rate = true,
+};
+
+static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
+ .mode_valid = sun8i_dw_hdmi_mode_valid_h6,
+};
+
static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
- { .compatible = "allwinner,sun8i-a83t-dw-hdmi" },
+ {
+ .compatible = "allwinner,sun8i-a83t-dw-hdmi",
+ .data = &sun8i_a83t_quirks,
+ },
+ {
+ .compatible = "allwinner,sun50i-h6-dw-hdmi",
+ .data = &sun50i_h6_quirks,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids);
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index 7fdc1ecd2892..720c5aa8adc1 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -150,6 +150,10 @@ struct sun8i_hdmi_phy;
struct sun8i_hdmi_phy_variant {
bool has_phy_clk;
bool has_second_pll;
+ unsigned int is_custom_phy : 1;
+ const struct dw_hdmi_curr_ctrl *cur_ctr;
+ const struct dw_hdmi_mpll_config *mpll_cfg;
+ const struct dw_hdmi_phy_config *phy_cfg;
void (*phy_init)(struct sun8i_hdmi_phy *phy);
void (*phy_disable)(struct dw_hdmi *hdmi,
struct sun8i_hdmi_phy *phy);
@@ -170,6 +174,12 @@ struct sun8i_hdmi_phy {
struct sun8i_hdmi_phy_variant *variant;
};
+struct sun8i_dw_hdmi_quirks {
+ enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
+ const struct drm_display_mode *mode);
+ unsigned int set_rate : 1;
+};
+
struct sun8i_dw_hdmi {
struct clk *clk_tmds;
struct device *dev;
@@ -178,6 +188,7 @@ struct sun8i_dw_hdmi {
struct sun8i_hdmi_phy *phy;
struct dw_hdmi_plat_data plat_data;
struct regulator *regulator;
+ const struct sun8i_dw_hdmi_quirks *quirks;
struct reset_control *rst_ctrl;
};
@@ -191,7 +202,8 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
-const struct dw_hdmi_phy_ops *sun8i_hdmi_phy_get_ops(void);
+void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
+ struct dw_hdmi_plat_data *plat_data);
int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
bool second_parent);
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 471993097ced..66ea3a902e36 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -14,6 +14,122 @@
*/
#define I2C_ADDR 0x69
+static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = {
+ {
+ 30666000, {
+ { 0x00b3, 0x0000 },
+ { 0x2153, 0x0000 },
+ { 0x40f3, 0x0000 },
+ },
+ }, {
+ 36800000, {
+ { 0x00b3, 0x0000 },
+ { 0x2153, 0x0000 },
+ { 0x40a2, 0x0001 },
+ },
+ }, {
+ 46000000, {
+ { 0x00b3, 0x0000 },
+ { 0x2142, 0x0001 },
+ { 0x40a2, 0x0001 },
+ },
+ }, {
+ 61333000, {
+ { 0x0072, 0x0001 },
+ { 0x2142, 0x0001 },
+ { 0x40a2, 0x0001 },
+ },
+ }, {
+ 73600000, {
+ { 0x0072, 0x0001 },
+ { 0x2142, 0x0001 },
+ { 0x4061, 0x0002 },
+ },
+ }, {
+ 92000000, {
+ { 0x0072, 0x0001 },
+ { 0x2145, 0x0002 },
+ { 0x4061, 0x0002 },
+ },
+ }, {
+ 122666000, {
+ { 0x0051, 0x0002 },
+ { 0x2145, 0x0002 },
+ { 0x4061, 0x0002 },
+ },
+ }, {
+ 147200000, {
+ { 0x0051, 0x0002 },
+ { 0x2145, 0x0002 },
+ { 0x4064, 0x0003 },
+ },
+ }, {
+ 184000000, {
+ { 0x0051, 0x0002 },
+ { 0x214c, 0x0003 },
+ { 0x4064, 0x0003 },
+ },
+ }, {
+ 226666000, {
+ { 0x0040, 0x0003 },
+ { 0x214c, 0x0003 },
+ { 0x4064, 0x0003 },
+ },
+ }, {
+ 272000000, {
+ { 0x0040, 0x0003 },
+ { 0x214c, 0x0003 },
+ { 0x5a64, 0x0003 },
+ },
+ }, {
+ 340000000, {
+ { 0x0040, 0x0003 },
+ { 0x3b4c, 0x0003 },
+ { 0x5a64, 0x0003 },
+ },
+ }, {
+ 594000000, {
+ { 0x1a40, 0x0003 },
+ { 0x3b4c, 0x0003 },
+ { 0x5a64, 0x0003 },
+ },
+ }, {
+ ~0UL, {
+ { 0x0000, 0x0000 },
+ { 0x0000, 0x0000 },
+ { 0x0000, 0x0000 },
+ },
+ }
+};
+
+static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = {
+ /* pixelclk bpp8 bpp10 bpp12 */
+ { 25175000, { 0x0000, 0x0000, 0x0000 }, },
+ { 27000000, { 0x0012, 0x0000, 0x0000 }, },
+ { 59400000, { 0x0008, 0x0008, 0x0008 }, },
+ { 72000000, { 0x0008, 0x0008, 0x001b }, },
+ { 74250000, { 0x0013, 0x0013, 0x0013 }, },
+ { 90000000, { 0x0008, 0x001a, 0x001b }, },
+ { 118800000, { 0x001b, 0x001a, 0x001b }, },
+ { 144000000, { 0x001b, 0x001a, 0x0034 }, },
+ { 180000000, { 0x001b, 0x0033, 0x0034 }, },
+ { 216000000, { 0x0036, 0x0033, 0x0034 }, },
+ { 237600000, { 0x0036, 0x0033, 0x001b }, },
+ { 288000000, { 0x0036, 0x001b, 0x001b }, },
+ { 297000000, { 0x0019, 0x001b, 0x0019 }, },
+ { 330000000, { 0x0036, 0x001b, 0x001b }, },
+ { 594000000, { 0x003f, 0x001b, 0x001b }, },
+ { ~0UL, { 0x0000, 0x0000, 0x0000 }, }
+};
+
+static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = {
+ /*pixelclk symbol term vlev*/
+ { 74250000, 0x8009, 0x0004, 0x0232},
+ { 148500000, 0x8029, 0x0004, 0x0273},
+ { 594000000, 0x8039, 0x0004, 0x014a},
+ { ~0UL, 0x0000, 0x0000, 0x0000}
+};
+
static int sun8i_hdmi_phy_config_a83t(struct dw_hdmi *hdmi,
struct sun8i_hdmi_phy *phy,
unsigned int clk_rate)
@@ -279,8 +395,31 @@ static const struct dw_hdmi_phy_ops sun8i_hdmi_phy_ops = {
.setup_hpd = &dw_hdmi_phy_setup_hpd,
};
+static void sun8i_hdmi_phy_unlock(struct sun8i_hdmi_phy *phy)
+{
+ /* enable read access to HDMI controller */
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_READ_EN_REG,
+ SUN8I_HDMI_PHY_READ_EN_MAGIC);
+
+ /* unscramble register offsets */
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_UNSCRAMBLE_REG,
+ SUN8I_HDMI_PHY_UNSCRAMBLE_MAGIC);
+}
+
+static void sun50i_hdmi_phy_init_h6(struct sun8i_hdmi_phy *phy)
+{
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
+ SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN,
+ SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN);
+
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
+ 0xffff0000, 0x80c00000);
+}
+
static void sun8i_hdmi_phy_init_a83t(struct sun8i_hdmi_phy *phy)
{
+ sun8i_hdmi_phy_unlock(phy);
+
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_DBG_CTRL_REG,
SUN8I_HDMI_PHY_DBG_CTRL_PX_LOCK,
SUN8I_HDMI_PHY_DBG_CTRL_PX_LOCK);
@@ -298,6 +437,8 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
{
unsigned int val;
+ sun8i_hdmi_phy_unlock(phy);
+
regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG, 0);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_ENBI,
@@ -372,20 +513,23 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
{
- /* enable read access to HDMI controller */
- regmap_write(phy->regs, SUN8I_HDMI_PHY_READ_EN_REG,
- SUN8I_HDMI_PHY_READ_EN_MAGIC);
-
- /* unscramble register offsets */
- regmap_write(phy->regs, SUN8I_HDMI_PHY_UNSCRAMBLE_REG,
- SUN8I_HDMI_PHY_UNSCRAMBLE_MAGIC);
-
phy->variant->phy_init(phy);
}
-const struct dw_hdmi_phy_ops *sun8i_hdmi_phy_get_ops(void)
+void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
+ struct dw_hdmi_plat_data *plat_data)
{
- return &sun8i_hdmi_phy_ops;
+ struct sun8i_hdmi_phy_variant *variant = phy->variant;
+
+ if (variant->is_custom_phy) {
+ plat_data->phy_ops = &sun8i_hdmi_phy_ops;
+ plat_data->phy_name = "sun8i_dw_hdmi_phy";
+ plat_data->phy_data = phy;
+ } else {
+ plat_data->mpll_cfg = variant->mpll_cfg;
+ plat_data->cur_ctr = variant->cur_ctr;
+ plat_data->phy_config = variant->phy_cfg;
+ }
}
static struct regmap_config sun8i_hdmi_phy_regmap_config = {
@@ -396,14 +540,8 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
.name = "phy"
};
-static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
- .has_phy_clk = true,
- .phy_init = &sun8i_hdmi_phy_init_h3,
- .phy_disable = &sun8i_hdmi_phy_disable_h3,
- .phy_config = &sun8i_hdmi_phy_config_h3,
-};
-
static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
+ .is_custom_phy = true,
.phy_init = &sun8i_hdmi_phy_init_a83t,
.phy_disable = &sun8i_hdmi_phy_disable_a83t,
.phy_config = &sun8i_hdmi_phy_config_a83t,
@@ -411,6 +549,7 @@ static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
.has_phy_clk = true,
+ .is_custom_phy = true,
.phy_init = &sun8i_hdmi_phy_init_h3,
.phy_disable = &sun8i_hdmi_phy_disable_h3,
.phy_config = &sun8i_hdmi_phy_config_h3,
@@ -419,17 +558,29 @@ static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
static const struct sun8i_hdmi_phy_variant sun8i_r40_hdmi_phy = {
.has_phy_clk = true,
.has_second_pll = true,
+ .is_custom_phy = true,
+ .phy_init = &sun8i_hdmi_phy_init_h3,
+ .phy_disable = &sun8i_hdmi_phy_disable_h3,
+ .phy_config = &sun8i_hdmi_phy_config_h3,
+};
+
+static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
+ .has_phy_clk = true,
+ .is_custom_phy = true,
.phy_init = &sun8i_hdmi_phy_init_h3,
.phy_disable = &sun8i_hdmi_phy_disable_h3,
.phy_config = &sun8i_hdmi_phy_config_h3,
};
+static const struct sun8i_hdmi_phy_variant sun50i_h6_hdmi_phy = {
+ .cur_ctr = sun50i_h6_cur_ctr,
+ .mpll_cfg = sun50i_h6_mpll_cfg,
+ .phy_cfg = sun50i_h6_phy_config,
+ .phy_init = &sun50i_hdmi_phy_init_h6,
+};
+
static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
{
- .compatible = "allwinner,sun50i-a64-hdmi-phy",
- .data = &sun50i_a64_hdmi_phy,
- },
- {
.compatible = "allwinner,sun8i-a83t-hdmi-phy",
.data = &sun8i_a83t_hdmi_phy,
},
@@ -441,6 +592,14 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
.compatible = "allwinner,sun8i-r40-hdmi-phy",
.data = &sun8i_r40_hdmi_phy,
},
+ {
+ .compatible = "allwinner,sun50i-a64-hdmi-phy",
+ .data = &sun50i_a64_hdmi_phy,
+ },
+ {
+ .compatible = "allwinner,sun50i-h6-hdmi-phy",
+ .data = &sun50i_h6_hdmi_phy,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 8b3d02b146b7..44a9ba7d8433 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -368,6 +368,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
struct sun8i_mixer *mixer;
struct resource *res;
void __iomem *regs;
+ unsigned int base;
int plane_cnt;
int i, ret;
@@ -456,33 +457,60 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
list_add_tail(&mixer->engine.list, &drv->engine_list);
- /* Reset the registers */
- for (i = 0x0; i < 0x20000; i += 4)
- regmap_write(mixer->engine.regs, i, 0);
+ base = sun8i_blender_base(mixer);
+
+ /* Reset registers and disable unused sub-engines */
+ if (mixer->cfg->is_de3) {
+ for (i = 0; i < DE3_MIXER_UNIT_SIZE; i += 4)
+ regmap_write(mixer->engine.regs, i, 0);
+
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_FCE_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_PEAK_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_LCTI_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_BLS_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_FCC_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_DNS_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_DRC_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_FMT_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC0_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC1_EN, 0);
+ } else {
+ for (i = 0; i < DE2_MIXER_UNIT_SIZE; i += 4)
+ regmap_write(mixer->engine.regs, i, 0);
+
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_FCE_EN, 0);
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_BWS_EN, 0);
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_LTI_EN, 0);
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_PEAK_EN, 0);
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_ASE_EN, 0);
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_FCC_EN, 0);
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_DCSC_EN, 0);
+ }
/* Enable the mixer */
regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_CTL,
SUN8I_MIXER_GLOBAL_CTL_RT_EN);
/* Set background color to black */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_BKCOLOR,
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_BKCOLOR(base),
SUN8I_MIXER_BLEND_COLOR_BLACK);
/*
* Set fill color of bottom plane to black. Generally not needed
* except when VI plane is at bottom (zpos = 0) and enabled.
*/
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL,
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(0),
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, 0),
SUN8I_MIXER_BLEND_COLOR_BLACK);
plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
for (i = 0; i < plane_cnt; i++)
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_MODE(i),
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_MODE(base, i),
SUN8I_MIXER_BLEND_MODE_DEF);
- regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL,
+ regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
return 0;
@@ -585,6 +613,15 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
.vi_num = 1,
};
+static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
+ .ccsc = 0,
+ .is_de3 = true,
+ .mod_rate = 600000000,
+ .scaler_mask = 0xf,
+ .ui_num = 3,
+ .vi_num = 1,
+};
+
static const struct of_device_id sun8i_mixer_of_table[] = {
{
.compatible = "allwinner,sun8i-a83t-de2-mixer-0",
@@ -618,6 +655,10 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.compatible = "allwinner,sun50i-a64-de2-mixer-1",
.data = &sun50i_a64_mixer1_cfg,
},
+ {
+ .compatible = "allwinner,sun50i-h6-de3-mixer-0",
+ .data = &sun50i_h6_mixer0_cfg,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 406c42e752d7..913d14ce68b0 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -29,24 +29,41 @@
#define SUN8I_MIXER_GLOBAL_DBUFF_ENABLE BIT(0)
-#define SUN8I_MIXER_BLEND_PIPE_CTL 0x1000
-#define SUN8I_MIXER_BLEND_ATTR_FCOLOR(x) (0x1004 + 0x10 * (x) + 0x0)
-#define SUN8I_MIXER_BLEND_ATTR_INSIZE(x) (0x1004 + 0x10 * (x) + 0x4)
-#define SUN8I_MIXER_BLEND_ATTR_COORD(x) (0x1004 + 0x10 * (x) + 0x8)
-#define SUN8I_MIXER_BLEND_ROUTE 0x1080
-#define SUN8I_MIXER_BLEND_PREMULTIPLY 0x1084
-#define SUN8I_MIXER_BLEND_BKCOLOR 0x1088
-#define SUN8I_MIXER_BLEND_OUTSIZE 0x108c
-#define SUN8I_MIXER_BLEND_MODE(x) (0x1090 + 0x04 * (x))
-#define SUN8I_MIXER_BLEND_CK_CTL 0x10b0
-#define SUN8I_MIXER_BLEND_CK_CFG 0x10b4
-#define SUN8I_MIXER_BLEND_CK_MAX(x) (0x10c0 + 0x04 * (x))
-#define SUN8I_MIXER_BLEND_CK_MIN(x) (0x10e0 + 0x04 * (x))
-#define SUN8I_MIXER_BLEND_OUTCTL 0x10fc
+#define DE2_MIXER_UNIT_SIZE 0x6000
+#define DE3_MIXER_UNIT_SIZE 0x3000
+
+#define DE2_BLD_BASE 0x1000
+#define DE2_CH_BASE 0x2000
+#define DE2_CH_SIZE 0x1000
+
+#define DE3_BLD_BASE 0x0800
+#define DE3_CH_BASE 0x1000
+#define DE3_CH_SIZE 0x0800
+
+#define SUN8I_MIXER_BLEND_PIPE_CTL(base) ((base) + 0)
+#define SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, x) ((base) + 0x4 + 0x10 * (x))
+#define SUN8I_MIXER_BLEND_ATTR_INSIZE(base, x) ((base) + 0x8 + 0x10 * (x))
+#define SUN8I_MIXER_BLEND_ATTR_COORD(base, x) ((base) + 0xc + 0x10 * (x))
+#define SUN8I_MIXER_BLEND_ROUTE(base) ((base) + 0x80)
+#define SUN8I_MIXER_BLEND_PREMULTIPLY(base) ((base) + 0x84)
+#define SUN8I_MIXER_BLEND_BKCOLOR(base) ((base) + 0x88)
+#define SUN8I_MIXER_BLEND_OUTSIZE(base) ((base) + 0x8c)
+#define SUN8I_MIXER_BLEND_MODE(base, x) ((base) + 0x90 + 0x04 * (x))
+#define SUN8I_MIXER_BLEND_CK_CTL(base) ((base) + 0xb0)
+#define SUN8I_MIXER_BLEND_CK_CFG(base) ((base) + 0xb4)
+#define SUN8I_MIXER_BLEND_CK_MAX(base, x) ((base) + 0xc0 + 0x04 * (x))
+#define SUN8I_MIXER_BLEND_CK_MIN(base, x) ((base) + 0xe0 + 0x04 * (x))
+#define SUN8I_MIXER_BLEND_OUTCTL(base) ((base) + 0xfc)
+#define SUN50I_MIXER_BLEND_CSC_CTL(base) ((base) + 0x100)
+#define SUN50I_MIXER_BLEND_CSC_COEFF(base, layer, x, y) \
+ ((base) + 0x110 + (layer) * 0x30 + (x) * 0x10 + 4 * (y))
+#define SUN50I_MIXER_BLEND_CSC_CONST(base, layer, i) \
+ ((base) + 0x110 + (layer) * 0x30 + (i) * 0x10 + 0x0c)
#define SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK GENMASK(12, 8)
#define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe) BIT(8 + pipe)
#define SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(pipe) BIT(pipe)
+
/* colors are always in AARRGGBB format */
#define SUN8I_MIXER_BLEND_COLOR_BLACK 0xff000000
/* The following numbers are some still unknown magic numbers */
@@ -57,6 +74,9 @@
#define SUN8I_MIXER_BLEND_OUTCTL_INTERLACED BIT(1)
+#define SUN50I_MIXER_BLEND_CSC_CTL_EN(ch) BIT(ch)
+#define SUN50I_MIXER_BLEND_CSC_CONST_VAL(d, c) (((d) << 16) | ((c) & 0xffff))
+
#define SUN8I_MIXER_FBFMT_ARGB8888 0
#define SUN8I_MIXER_FBFMT_ABGR8888 1
#define SUN8I_MIXER_FBFMT_RGBA8888 2
@@ -95,8 +115,8 @@
#define SUN8I_MIXER_FBFMT_YUV411 14
/*
- * These sub-engines are still unknown now, the EN registers are here only to
- * be used to disable these sub-engines.
+ * Sub-engines listed bellow are unused for now. The EN registers are here only
+ * to be used to disable these sub-engines.
*/
#define SUN8I_MIXER_FCE_EN 0xa0000
#define SUN8I_MIXER_BWS_EN 0xa2000
@@ -106,6 +126,17 @@
#define SUN8I_MIXER_FCC_EN 0xaa000
#define SUN8I_MIXER_DCSC_EN 0xb0000
+#define SUN50I_MIXER_FCE_EN 0x70000
+#define SUN50I_MIXER_PEAK_EN 0x70800
+#define SUN50I_MIXER_LCTI_EN 0x71000
+#define SUN50I_MIXER_BLS_EN 0x71800
+#define SUN50I_MIXER_FCC_EN 0x72000
+#define SUN50I_MIXER_DNS_EN 0x80000
+#define SUN50I_MIXER_DRC_EN 0xa0000
+#define SUN50I_MIXER_FMT_EN 0xa8000
+#define SUN50I_MIXER_CDC0_EN 0xd0000
+#define SUN50I_MIXER_CDC1_EN 0xd8000
+
struct de2_fmt_info {
u32 drm_fmt;
u32 de2_fmt;
@@ -127,6 +158,7 @@ struct de2_fmt_info {
* are invalid.
* @mod_rate: module clock rate that needs to be set in order to have
* a functional block.
+ * @is_de3: true, if this is next gen display engine 3.0, false otherwise.
*/
struct sun8i_mixer_cfg {
int vi_num;
@@ -134,6 +166,7 @@ struct sun8i_mixer_cfg {
int scaler_mask;
int ccsc;
unsigned long mod_rate;
+ unsigned int is_de3 : 1;
};
struct sun8i_mixer {
@@ -153,5 +186,20 @@ engine_to_sun8i_mixer(struct sunxi_engine *engine)
return container_of(engine, struct sun8i_mixer, engine);
}
+static inline u32
+sun8i_blender_base(struct sun8i_mixer *mixer)
+{
+ return mixer->cfg->is_de3 ? DE3_BLD_BASE : DE2_BLD_BASE;
+}
+
+static inline u32
+sun8i_channel_base(struct sun8i_mixer *mixer, int channel)
+{
+ if (mixer->cfg->is_de3)
+ return DE3_CH_BASE + channel * DE3_CH_SIZE;
+ else
+ return DE2_CH_BASE + channel * DE2_CH_SIZE;
+}
+
const struct de2_fmt_info *sun8i_mixer_format_info(u32 format);
#endif /* _SUN8I_MIXER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 3040a79f298f..fc36e0c10a37 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -9,11 +9,17 @@
#include <linux/component.h>
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include "sun8i_tcon_top.h"
+struct sun8i_tcon_top_quirks {
+ bool has_tcon_tv1;
+ bool has_dsi;
+};
+
static bool sun8i_tcon_top_node_is_tcon_top(struct device_node *node)
{
return !!of_match_node(sun8i_tcon_top_of_table, node);
@@ -121,10 +127,13 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct clk_hw_onecell_data *clk_data;
struct sun8i_tcon_top *tcon_top;
+ const struct sun8i_tcon_top_quirks *quirks;
struct resource *res;
void __iomem *regs;
int ret, i;
+ quirks = of_device_get_match_data(&pdev->dev);
+
tcon_top = devm_kzalloc(dev, sizeof(*tcon_top), GFP_KERNEL);
if (!tcon_top)
return -ENOMEM;
@@ -168,6 +177,13 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
}
/*
+ * At least on H6, some registers have some bits set by default
+ * which may cause issues. Clear them here.
+ */
+ writel(0, regs + TCON_TOP_PORT_SEL_REG);
+ writel(0, regs + TCON_TOP_GATE_SRC_REG);
+
+ /*
* TCON TOP has two muxes, which select parent clock for each TCON TV
* channel clock. Parent could be either TCON TV or TVE clock. For now
* we leave this fixed to TCON TV, since TVE driver for R40 is not yet
@@ -180,15 +196,17 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
&tcon_top->reg_lock,
TCON_TOP_TCON_TV0_GATE, 0);
- clk_data->hws[CLK_TCON_TOP_TV1] =
- sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs,
- &tcon_top->reg_lock,
- TCON_TOP_TCON_TV1_GATE, 1);
+ if (quirks->has_tcon_tv1)
+ clk_data->hws[CLK_TCON_TOP_TV1] =
+ sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs,
+ &tcon_top->reg_lock,
+ TCON_TOP_TCON_TV1_GATE, 1);
- clk_data->hws[CLK_TCON_TOP_DSI] =
- sun8i_tcon_top_register_gate(dev, "dsi", regs,
- &tcon_top->reg_lock,
- TCON_TOP_TCON_DSI_GATE, 2);
+ if (quirks->has_dsi)
+ clk_data->hws[CLK_TCON_TOP_DSI] =
+ sun8i_tcon_top_register_gate(dev, "dsi", regs,
+ &tcon_top->reg_lock,
+ TCON_TOP_TCON_DSI_GATE, 2);
for (i = 0; i < CLK_NUM; i++)
if (IS_ERR(clk_data->hws[i])) {
@@ -250,9 +268,25 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
return 0;
}
+const struct sun8i_tcon_top_quirks sun8i_r40_tcon_top_quirks = {
+ .has_tcon_tv1 = true,
+ .has_dsi = true,
+};
+
+const struct sun8i_tcon_top_quirks sun50i_h6_tcon_top_quirks = {
+ /* Nothing special */
+};
+
/* sun4i_drv uses this list to check if a device node is a TCON TOP */
const struct of_device_id sun8i_tcon_top_of_table[] = {
- { .compatible = "allwinner,sun8i-r40-tcon-top" },
+ {
+ .compatible = "allwinner,sun8i-r40-tcon-top",
+ .data = &sun8i_r40_tcon_top_quirks
+ },
+ {
+ .compatible = "allwinner,sun50i-h6-tcon-top",
+ .data = &sun50i_h6_tcon_top_quirks
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 28c15c6ef1ef..18534263a05d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drmP.h>
@@ -30,7 +31,10 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
int overlay, bool enable, unsigned int zpos,
unsigned int old_zpos)
{
- u32 val;
+ u32 val, bld_base, ch_base;
+
+ bld_base = sun8i_blender_base(mixer);
+ ch_base = sun8i_channel_base(mixer, channel);
DRM_DEBUG_DRIVER("%sabling channel %d overlay %d\n",
enable ? "En" : "Dis", channel, overlay);
@@ -41,17 +45,17 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
val = 0;
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN, val);
if (!enable || zpos != old_zpos) {
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
0);
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE(bld_base),
SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
0);
}
@@ -60,12 +64,13 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+ SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
+ val, val);
val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE(bld_base),
SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
val);
}
@@ -77,12 +82,16 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
{
struct drm_plane_state *state = plane->state;
u32 src_w, src_h, dst_w, dst_h;
+ u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
DRM_DEBUG_DRIVER("Updating UI channel %d overlay %d\n",
channel, overlay);
+ bld_base = sun8i_blender_base(mixer);
+ ch_base = sun8i_channel_base(mixer, channel);
+
src_w = drm_rect_width(&state->src) >> 16;
src_h = drm_rect_height(&state->src) >> 16;
dst_w = drm_rect_width(&state->dst);
@@ -103,8 +112,8 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
regmap_write(mixer->engine.regs,
SUN8I_MIXER_GLOBAL_SIZE,
outsize);
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_OUTSIZE,
- outsize);
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_OUTSIZE(bld_base), outsize);
if (state->crtc)
interlaced = state->crtc->state->adjusted_mode.flags
@@ -116,7 +125,7 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
val = 0;
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_OUTCTL,
+ SUN8I_MIXER_BLEND_OUTCTL(bld_base),
SUN8I_MIXER_BLEND_OUTCTL_INTERLACED,
val);
@@ -129,10 +138,10 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
state->src.x1 >> 16, state->src.y1 >> 16);
DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_SIZE(channel, overlay),
+ SUN8I_MIXER_CHAN_UI_LAYER_SIZE(ch_base, overlay),
insize);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_OVL_SIZE(channel),
+ SUN8I_MIXER_CHAN_UI_OVL_SIZE(ch_base),
insize);
if (insize != outsize || hphase || vphase) {
@@ -156,10 +165,10 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
+ SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
+ SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
return 0;
@@ -170,7 +179,9 @@ static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
{
struct drm_plane_state *state = plane->state;
const struct de2_fmt_info *fmt_info;
- u32 val;
+ u32 val, ch_base;
+
+ ch_base = sun8i_channel_base(mixer, channel);
fmt_info = sun8i_mixer_format_info(state->fb->format->format);
if (!fmt_info || !fmt_info->rgb) {
@@ -180,7 +191,7 @@ static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val);
return 0;
@@ -193,8 +204,11 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *gem;
dma_addr_t paddr;
+ u32 ch_base;
int bpp;
+ ch_base = sun8i_channel_base(mixer, channel);
+
/* Get the physical address of the buffer in memory */
gem = drm_fb_cma_get_gem_obj(fb, 0);
@@ -211,13 +225,13 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
/* Set the line width */
DRM_DEBUG_DRIVER("Layer line width: %d bytes\n", fb->pitches[0]);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_PITCH(channel, overlay),
+ SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch_base, overlay),
fb->pitches[0]);
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(channel, overlay),
+ SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch_base, overlay),
lower_32_bits(paddr));
return 0;
@@ -287,6 +301,7 @@ static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
}
static struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = sun8i_ui_layer_atomic_check,
.atomic_disable = sun8i_ui_layer_atomic_disable,
.atomic_update = sun8i_ui_layer_atomic_update,
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
index 123b15ea9918..f4389cf0ba20 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
@@ -18,23 +18,26 @@
#include <drm/drm_plane.h>
-#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x0)
-#define SUN8I_MIXER_CHAN_UI_LAYER_SIZE(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x4)
-#define SUN8I_MIXER_CHAN_UI_LAYER_COORD(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x8)
-#define SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0xc)
-#define SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x10)
-#define SUN8I_MIXER_CHAN_UI_LAYER_BOT_LADDR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x14)
-#define SUN8I_MIXER_CHAN_UI_LAYER_FCOLOR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x18)
-#define SUN8I_MIXER_CHAN_UI_TOP_HADDR(ch) (0x2000 + 0x1000 * (ch) + 0x80)
-#define SUN8I_MIXER_CHAN_UI_BOT_HADDR(ch) (0x2000 + 0x1000 * (ch) + 0x84)
-#define SUN8I_MIXER_CHAN_UI_OVL_SIZE(ch) (0x2000 + 0x1000 * (ch) + 0x88)
+#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR(base, layer) \
+ ((base) + 0x20 * (layer) + 0x0)
+#define SUN8I_MIXER_CHAN_UI_LAYER_SIZE(base, layer) \
+ ((base) + 0x20 * (layer) + 0x4)
+#define SUN8I_MIXER_CHAN_UI_LAYER_COORD(base, layer) \
+ ((base) + 0x20 * (layer) + 0x8)
+#define SUN8I_MIXER_CHAN_UI_LAYER_PITCH(base, layer) \
+ ((base) + 0x20 * (layer) + 0xc)
+#define SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(base, layer) \
+ ((base) + 0x20 * (layer) + 0x10)
+#define SUN8I_MIXER_CHAN_UI_LAYER_BOT_LADDR(base, layer) \
+ ((base) + 0x20 * (layer) + 0x14)
+#define SUN8I_MIXER_CHAN_UI_LAYER_FCOLOR(base, layer) \
+ ((base) + 0x20 * (layer) + 0x18)
+#define SUN8I_MIXER_CHAN_UI_TOP_HADDR(base) \
+ ((base) + 0x80)
+#define SUN8I_MIXER_CHAN_UI_BOT_HADDR(base) \
+ ((base) + 0x84)
+#define SUN8I_MIXER_CHAN_UI_OVL_SIZE(base) \
+ ((base) + 0x88)
#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN BIT(0)
#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_MASK GENMASK(2, 1)
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
index 6bb2aa164c8e..ae0806bccac7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
@@ -10,6 +10,7 @@
*/
#include "sun8i_ui_scaler.h"
+#include "sun8i_vi_scaler.h"
static const u32 lan2coefftab16[240] = {
0x00004000, 0x00033ffe, 0x00063efc, 0x000a3bfb,
@@ -88,6 +89,20 @@ static const u32 lan2coefftab16[240] = {
0x0b1c1603, 0x0d1c1502, 0x0e1d1401, 0x0f1d1301,
};
+static u32 sun8i_ui_scaler_base(struct sun8i_mixer *mixer, int channel)
+{
+ int vi_num = mixer->cfg->vi_num;
+
+ if (mixer->cfg->is_de3)
+ return DE3_VI_SCALER_UNIT_BASE +
+ DE3_VI_SCALER_UNIT_SIZE * vi_num +
+ DE3_UI_SCALER_UNIT_SIZE * (channel - vi_num);
+ else
+ return DE2_VI_SCALER_UNIT_BASE +
+ DE2_VI_SCALER_UNIT_SIZE * vi_num +
+ DE2_UI_SCALER_UNIT_SIZE * (channel - vi_num);
+}
+
static int sun8i_ui_scaler_coef_index(unsigned int step)
{
unsigned int scale, int_part, float_part;
@@ -114,33 +129,35 @@ static int sun8i_ui_scaler_coef_index(unsigned int step)
void sun8i_ui_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
{
- int vi_cnt = mixer->cfg->vi_num;
- u32 val;
+ u32 val, base;
- if (WARN_ON(layer < vi_cnt))
+ if (WARN_ON(layer < mixer->cfg->vi_num))
return;
+ base = sun8i_ui_scaler_base(mixer, layer);
+
if (enable)
val = SUN8I_SCALER_GSU_CTRL_EN |
SUN8I_SCALER_GSU_CTRL_COEFF_RDY;
else
val = 0;
- regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_CTRL(vi_cnt, layer - vi_cnt), val);
+ regmap_write(mixer->engine.regs, SUN8I_SCALER_GSU_CTRL(base), val);
}
void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
u32 hscale, u32 vscale, u32 hphase, u32 vphase)
{
- int vi_cnt = mixer->cfg->vi_num;
u32 insize, outsize;
int i, offset;
+ u32 base;
- if (WARN_ON(layer < vi_cnt))
+ if (WARN_ON(layer < mixer->cfg->vi_num))
return;
+ base = sun8i_ui_scaler_base(mixer, layer);
+
hphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16;
vphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16;
hscale <<= SUN8I_UI_SCALER_SCALE_FRAC - 16;
@@ -149,24 +166,22 @@ void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
insize = SUN8I_UI_SCALER_SIZE(src_w, src_h);
outsize = SUN8I_UI_SCALER_SIZE(dst_w, dst_h);
- layer -= vi_cnt;
-
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_OUTSIZE(vi_cnt, layer), outsize);
+ SUN8I_SCALER_GSU_OUTSIZE(base), outsize);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_INSIZE(vi_cnt, layer), insize);
+ SUN8I_SCALER_GSU_INSIZE(base), insize);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_HSTEP(vi_cnt, layer), hscale);
+ SUN8I_SCALER_GSU_HSTEP(base), hscale);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_VSTEP(vi_cnt, layer), vscale);
+ SUN8I_SCALER_GSU_VSTEP(base), vscale);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_HPHASE(vi_cnt, layer), hphase);
+ SUN8I_SCALER_GSU_HPHASE(base), hphase);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_VPHASE(vi_cnt, layer), vphase);
+ SUN8I_SCALER_GSU_VPHASE(base), vphase);
offset = sun8i_ui_scaler_coef_index(hscale) *
SUN8I_UI_SCALER_COEFF_COUNT;
for (i = 0; i < SUN8I_UI_SCALER_COEFF_COUNT; i++)
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_HCOEFF(vi_cnt, layer, i),
+ SUN8I_SCALER_GSU_HCOEFF(base, i),
lan2coefftab16[offset + i]);
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
index 86295be8be78..1ef4bd6f2718 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
@@ -11,6 +11,9 @@
#include "sun8i_mixer.h"
+#define DE2_UI_SCALER_UNIT_SIZE 0x10000
+#define DE3_UI_SCALER_UNIT_SIZE 0x08000
+
/* this two macros assumes 16 fractional bits which is standard in DRM */
#define SUN8I_UI_SCALER_SCALE_MIN 1
#define SUN8I_UI_SCALER_SCALE_MAX ((1UL << 20) - 1)
@@ -20,23 +23,14 @@
#define SUN8I_UI_SCALER_COEFF_COUNT 16
#define SUN8I_UI_SCALER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
-#define SUN8I_SCALER_GSU_CTRL(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x0)
-#define SUN8I_SCALER_GSU_OUTSIZE(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x40)
-#define SUN8I_SCALER_GSU_INSIZE(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x80)
-#define SUN8I_SCALER_GSU_HSTEP(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x88)
-#define SUN8I_SCALER_GSU_VSTEP(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x8c)
-#define SUN8I_SCALER_GSU_HPHASE(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x90)
-#define SUN8I_SCALER_GSU_VPHASE(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x98)
-#define SUN8I_SCALER_GSU_HCOEFF(vi_cnt, ui_idx, index) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x200 + \
- 0x4 * (index))
+#define SUN8I_SCALER_GSU_CTRL(base) ((base) + 0x0)
+#define SUN8I_SCALER_GSU_OUTSIZE(base) ((base) + 0x40)
+#define SUN8I_SCALER_GSU_INSIZE(base) ((base) + 0x80)
+#define SUN8I_SCALER_GSU_HSTEP(base) ((base) + 0x88)
+#define SUN8I_SCALER_GSU_VSTEP(base) ((base) + 0x8c)
+#define SUN8I_SCALER_GSU_HPHASE(base) ((base) + 0x90)
+#define SUN8I_SCALER_GSU_VPHASE(base) ((base) + 0x98)
+#define SUN8I_SCALER_GSU_HCOEFF(base, index) ((base) + 0x200 + 0x4 * (index))
#define SUN8I_SCALER_GSU_CTRL_EN BIT(0)
#define SUN8I_SCALER_GSU_CTRL_COEFF_RDY BIT(4)
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index f4fe97813f94..87be898f9b7a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -13,6 +13,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drmP.h>
@@ -24,7 +25,10 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
int overlay, bool enable, unsigned int zpos,
unsigned int old_zpos)
{
- u32 val;
+ u32 val, bld_base, ch_base;
+
+ bld_base = sun8i_blender_base(mixer);
+ ch_base = sun8i_channel_base(mixer, channel);
DRM_DEBUG_DRIVER("%sabling VI channel %d overlay %d\n",
enable ? "En" : "Dis", channel, overlay);
@@ -35,17 +39,17 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
val = 0;
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN, val);
if (!enable || zpos != old_zpos) {
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
0);
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE(bld_base),
SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
0);
}
@@ -54,12 +58,13 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+ SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
+ val, val);
val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE(bld_base),
SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
val);
}
@@ -72,6 +77,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
struct drm_plane_state *state = plane->state;
const struct drm_format_info *format = state->fb->format;
u32 src_w, src_h, dst_w, dst_h;
+ u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
bool subsampled;
@@ -79,6 +85,9 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n",
channel, overlay);
+ bld_base = sun8i_blender_base(mixer);
+ ch_base = sun8i_channel_base(mixer, channel);
+
src_w = drm_rect_width(&state->src) >> 16;
src_h = drm_rect_height(&state->src) >> 16;
dst_w = drm_rect_width(&state->dst);
@@ -115,10 +124,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
(state->src.y1 >> 16) & ~(format->vsub - 1));
DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_SIZE(channel, overlay),
+ SUN8I_MIXER_CHAN_VI_LAYER_SIZE(ch_base, overlay),
insize);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_OVL_SIZE(channel),
+ SUN8I_MIXER_CHAN_VI_OVL_SIZE(ch_base),
insize);
/*
@@ -149,10 +158,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
+ SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
+ SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
return 0;
@@ -163,7 +172,9 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
{
struct drm_plane_state *state = plane->state;
const struct de2_fmt_info *fmt_info;
- u32 val;
+ u32 val, ch_base;
+
+ ch_base = sun8i_channel_base(mixer, channel);
fmt_info = sun8i_mixer_format_info(state->fb->format->format);
if (!fmt_info) {
@@ -173,7 +184,7 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val);
if (fmt_info->csc != SUN8I_CSC_MODE_OFF) {
@@ -189,9 +200,17 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
val = 0;
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE, val);
+ /* It seems that YUV formats use global alpha setting. */
+ if (mixer->cfg->is_de3)
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base,
+ overlay),
+ SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK,
+ SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(0xff));
+
return 0;
}
@@ -204,8 +223,11 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
struct drm_gem_cma_object *gem;
u32 dx, dy, src_x, src_y;
dma_addr_t paddr;
+ u32 ch_base;
int i;
+ ch_base = sun8i_channel_base(mixer, channel);
+
/* Adjust x and y to be dividable by subsampling factor */
src_x = (state->src.x1 >> 16) & ~(format->hsub - 1);
src_y = (state->src.y1 >> 16) & ~(format->vsub - 1);
@@ -235,17 +257,17 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
DRM_DEBUG_DRIVER("Layer %d. line width: %d bytes\n",
i + 1, fb->pitches[i]);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_PITCH(channel,
+ SUN8I_MIXER_CHAN_VI_LAYER_PITCH(ch_base,
overlay, i),
- fb->pitches[i]);
+ fb->pitches[i]);
DRM_DEBUG_DRIVER("Setting %d. buffer address to %pad\n",
i + 1, &paddr);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(channel,
+ SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch_base,
overlay, i),
- lower_32_bits(paddr));
+ lower_32_bits(paddr));
}
return 0;
@@ -315,6 +337,7 @@ static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
}
static struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = sun8i_vi_layer_atomic_check,
.atomic_disable = sun8i_vi_layer_atomic_disable,
.atomic_update = sun8i_vi_layer_atomic_update,
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
index 6996627a0a76..8a5e6d01c85d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
@@ -12,23 +12,26 @@
#include <drm/drm_plane.h>
-#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x0)
-#define SUN8I_MIXER_CHAN_VI_LAYER_SIZE(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x4)
-#define SUN8I_MIXER_CHAN_VI_LAYER_COORD(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x8)
-#define SUN8I_MIXER_CHAN_VI_LAYER_PITCH(ch, layer, plane) \
- (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0xc + 4 * (plane))
-#define SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch, layer, plane) \
- (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x18 + 4 * (plane))
-#define SUN8I_MIXER_CHAN_VI_OVL_SIZE(ch) (0x2000 + 0x1000 * (ch) + 0xe8)
+#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR(base, layer) \
+ ((base) + 0x30 * (layer) + 0x0)
+#define SUN8I_MIXER_CHAN_VI_LAYER_SIZE(base, layer) \
+ ((base) + 0x30 * (layer) + 0x4)
+#define SUN8I_MIXER_CHAN_VI_LAYER_COORD(base, layer) \
+ ((base) + 0x30 * (layer) + 0x8)
+#define SUN8I_MIXER_CHAN_VI_LAYER_PITCH(base, layer, plane) \
+ ((base) + 0x30 * (layer) + 0xc + 4 * (plane))
+#define SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(base, layer, plane) \
+ ((base) + 0x30 * (layer) + 0x18 + 4 * (plane))
+#define SUN8I_MIXER_CHAN_VI_OVL_SIZE(base) \
+ ((base) + 0xe8)
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN BIT(0)
/* RGB mode should be set for RGB formats and cleared for YCbCr */
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE BIT(15)
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET 8
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK GENMASK(12, 8)
+#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24)
+#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(x) ((x) << 24)
struct sun8i_mixer;
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
index d3f1acb234b7..7ba75011adf9 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
@@ -833,6 +833,16 @@ static const u32 bicubic4coefftab32[480] = {
0x1012110d, 0x1012110d, 0x1013110c, 0x1013110c,
};
+static u32 sun8i_vi_scaler_base(struct sun8i_mixer *mixer, int channel)
+{
+ if (mixer->cfg->is_de3)
+ return DE3_VI_SCALER_UNIT_BASE +
+ DE3_VI_SCALER_UNIT_SIZE * channel;
+ else
+ return DE2_VI_SCALER_UNIT_BASE +
+ DE2_VI_SCALER_UNIT_SIZE * channel;
+}
+
static int sun8i_vi_scaler_coef_index(unsigned int step)
{
unsigned int scale, int_part, float_part;
@@ -857,7 +867,7 @@ static int sun8i_vi_scaler_coef_index(unsigned int step)
}
}
-static void sun8i_vi_scaler_set_coeff(struct regmap *map, int layer,
+static void sun8i_vi_scaler_set_coeff(struct regmap *map, u32 base,
u32 hstep, u32 vstep,
const struct drm_format_info *format)
{
@@ -877,29 +887,31 @@ static void sun8i_vi_scaler_set_coeff(struct regmap *map, int layer,
offset = sun8i_vi_scaler_coef_index(hstep) *
SUN8I_VI_SCALER_COEFF_COUNT;
for (i = 0; i < SUN8I_VI_SCALER_COEFF_COUNT; i++) {
- regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF0(layer, i),
+ regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF0(base, i),
lan3coefftab32_left[offset + i]);
- regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF1(layer, i),
+ regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF1(base, i),
lan3coefftab32_right[offset + i]);
- regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF0(layer, i),
+ regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF0(base, i),
ch_left[offset + i]);
- regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF1(layer, i),
+ regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF1(base, i),
ch_right[offset + i]);
}
offset = sun8i_vi_scaler_coef_index(hstep) *
SUN8I_VI_SCALER_COEFF_COUNT;
for (i = 0; i < SUN8I_VI_SCALER_COEFF_COUNT; i++) {
- regmap_write(map, SUN8I_SCALER_VSU_YVCOEFF(layer, i),
+ regmap_write(map, SUN8I_SCALER_VSU_YVCOEFF(base, i),
lan2coefftab32[offset + i]);
- regmap_write(map, SUN8I_SCALER_VSU_CVCOEFF(layer, i),
+ regmap_write(map, SUN8I_SCALER_VSU_CVCOEFF(base, i),
cy[offset + i]);
}
}
void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
{
- u32 val;
+ u32 val, base;
+
+ base = sun8i_vi_scaler_base(mixer, layer);
if (enable)
val = SUN8I_SCALER_VSU_CTRL_EN |
@@ -907,7 +919,8 @@ void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
else
val = 0;
- regmap_write(mixer->engine.regs, SUN8I_SCALER_VSU_CTRL(layer), val);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_VSU_CTRL(base), val);
}
void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
@@ -917,6 +930,9 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
{
u32 chphase, cvphase;
u32 insize, outsize;
+ u32 base;
+
+ base = sun8i_vi_scaler_base(mixer, layer);
hphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16;
vphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16;
@@ -940,32 +956,44 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
cvphase = vphase;
}
+ if (mixer->cfg->is_de3) {
+ u32 val;
+
+ if (format->hsub == 1 && format->vsub == 1)
+ val = SUN50I_SCALER_VSU_SCALE_MODE_UI;
+ else
+ val = SUN50I_SCALER_VSU_SCALE_MODE_NORMAL;
+
+ regmap_write(mixer->engine.regs,
+ SUN50I_SCALER_VSU_SCALE_MODE(base), val);
+ }
+
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_OUTSIZE(layer), outsize);
+ SUN8I_SCALER_VSU_OUTSIZE(base), outsize);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_YINSIZE(layer), insize);
+ SUN8I_SCALER_VSU_YINSIZE(base), insize);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_YHSTEP(layer), hscale);
+ SUN8I_SCALER_VSU_YHSTEP(base), hscale);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_YVSTEP(layer), vscale);
+ SUN8I_SCALER_VSU_YVSTEP(base), vscale);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_YHPHASE(layer), hphase);
+ SUN8I_SCALER_VSU_YHPHASE(base), hphase);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_YVPHASE(layer), vphase);
+ SUN8I_SCALER_VSU_YVPHASE(base), vphase);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_CINSIZE(layer),
+ SUN8I_SCALER_VSU_CINSIZE(base),
SUN8I_VI_SCALER_SIZE(src_w / format->hsub,
src_h / format->vsub));
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_CHSTEP(layer),
+ SUN8I_SCALER_VSU_CHSTEP(base),
hscale / format->hsub);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_CVSTEP(layer),
+ SUN8I_SCALER_VSU_CVSTEP(base),
vscale / format->vsub);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_CHPHASE(layer), chphase);
+ SUN8I_SCALER_VSU_CHPHASE(base), chphase);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_CVPHASE(layer), cvphase);
- sun8i_vi_scaler_set_coeff(mixer->engine.regs, layer,
+ SUN8I_SCALER_VSU_CVPHASE(base), cvphase);
+ sun8i_vi_scaler_set_coeff(mixer->engine.regs, base,
hscale, vscale, format);
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
index a595ab643a5a..68f6593b369a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
@@ -12,6 +12,12 @@
#include <drm/drm_fourcc.h>
#include "sun8i_mixer.h"
+#define DE2_VI_SCALER_UNIT_BASE 0x20000
+#define DE2_VI_SCALER_UNIT_SIZE 0x20000
+
+#define DE3_VI_SCALER_UNIT_BASE 0x20000
+#define DE3_VI_SCALER_UNIT_SIZE 0x08000
+
/* this two macros assumes 16 fractional bits which is standard in DRM */
#define SUN8I_VI_SCALER_SCALE_MIN 1
#define SUN8I_VI_SCALER_SCALE_MAX ((1UL << 20) - 1)
@@ -21,34 +27,48 @@
#define SUN8I_VI_SCALER_COEFF_COUNT 32
#define SUN8I_VI_SCALER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
-#define SUN8I_SCALER_VSU_CTRL(ch) (0x20000 + 0x20000 * (ch) + 0x0)
-#define SUN8I_SCALER_VSU_OUTSIZE(ch) (0x20000 + 0x20000 * (ch) + 0x40)
-#define SUN8I_SCALER_VSU_YINSIZE(ch) (0x20000 + 0x20000 * (ch) + 0x80)
-#define SUN8I_SCALER_VSU_YHSTEP(ch) (0x20000 + 0x20000 * (ch) + 0x88)
-#define SUN8I_SCALER_VSU_YVSTEP(ch) (0x20000 + 0x20000 * (ch) + 0x8c)
-#define SUN8I_SCALER_VSU_YHPHASE(ch) (0x20000 + 0x20000 * (ch) + 0x90)
-#define SUN8I_SCALER_VSU_YVPHASE(ch) (0x20000 + 0x20000 * (ch) + 0x98)
-#define SUN8I_SCALER_VSU_CINSIZE(ch) (0x20000 + 0x20000 * (ch) + 0xc0)
-#define SUN8I_SCALER_VSU_CHSTEP(ch) (0x20000 + 0x20000 * (ch) + 0xc8)
-#define SUN8I_SCALER_VSU_CVSTEP(ch) (0x20000 + 0x20000 * (ch) + 0xcc)
-#define SUN8I_SCALER_VSU_CHPHASE(ch) (0x20000 + 0x20000 * (ch) + 0xd0)
-#define SUN8I_SCALER_VSU_CVPHASE(ch) (0x20000 + 0x20000 * (ch) + 0xd8)
-#define SUN8I_SCALER_VSU_YHCOEFF0(ch, i) \
- (0x20000 + 0x20000 * (ch) + 0x200 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_YHCOEFF1(ch, i) \
- (0x20000 + 0x20000 * (ch) + 0x300 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_YVCOEFF(ch, i) \
- (0x20000 + 0x20000 * (ch) + 0x400 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_CHCOEFF0(ch, i) \
- (0x20000 + 0x20000 * (ch) + 0x600 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_CHCOEFF1(ch, i) \
- (0x20000 + 0x20000 * (ch) + 0x700 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_CVCOEFF(ch, i) \
- (0x20000 + 0x20000 * (ch) + 0x800 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CTRL(base) ((base) + 0x0)
+#define SUN50I_SCALER_VSU_SCALE_MODE(base) ((base) + 0x10)
+#define SUN50I_SCALER_VSU_DIR_THR(base) ((base) + 0x20)
+#define SUN50I_SCALER_VSU_EDGE_THR(base) ((base) + 0x24)
+#define SUN50I_SCALER_VSU_EDSCL_CTRL(base) ((base) + 0x28)
+#define SUN50I_SCALER_VSU_ANGLE_THR(base) ((base) + 0x2c)
+#define SUN8I_SCALER_VSU_OUTSIZE(base) ((base) + 0x40)
+#define SUN8I_SCALER_VSU_YINSIZE(base) ((base) + 0x80)
+#define SUN8I_SCALER_VSU_YHSTEP(base) ((base) + 0x88)
+#define SUN8I_SCALER_VSU_YVSTEP(base) ((base) + 0x8c)
+#define SUN8I_SCALER_VSU_YHPHASE(base) ((base) + 0x90)
+#define SUN8I_SCALER_VSU_YVPHASE(base) ((base) + 0x98)
+#define SUN8I_SCALER_VSU_CINSIZE(base) ((base) + 0xc0)
+#define SUN8I_SCALER_VSU_CHSTEP(base) ((base) + 0xc8)
+#define SUN8I_SCALER_VSU_CVSTEP(base) ((base) + 0xcc)
+#define SUN8I_SCALER_VSU_CHPHASE(base) ((base) + 0xd0)
+#define SUN8I_SCALER_VSU_CVPHASE(base) ((base) + 0xd8)
+#define SUN8I_SCALER_VSU_YHCOEFF0(base, i) ((base) + 0x200 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_YHCOEFF1(base, i) ((base) + 0x300 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_YVCOEFF(base, i) ((base) + 0x400 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CHCOEFF0(base, i) ((base) + 0x600 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CHCOEFF1(base, i) ((base) + 0x700 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CVCOEFF(base, i) ((base) + 0x800 + 0x4 * (i))
#define SUN8I_SCALER_VSU_CTRL_EN BIT(0)
#define SUN8I_SCALER_VSU_CTRL_COEFF_RDY BIT(4)
+#define SUN50I_SCALER_VSU_SUB_ZERO_DIR_THR(x) (((x) << 24) & 0xFF)
+#define SUN50I_SCALER_VSU_ZERO_DIR_THR(x) (((x) << 16) & 0xFF)
+#define SUN50I_SCALER_VSU_HORZ_DIR_THR(x) (((x) << 8) & 0xFF)
+#define SUN50I_SCALER_VSU_VERT_DIR_THR(x) ((x) & 0xFF)
+
+#define SUN50I_SCALER_VSU_SCALE_MODE_UI 0
+#define SUN50I_SCALER_VSU_SCALE_MODE_NORMAL 1
+#define SUN50I_SCALER_VSU_SCALE_MODE_ED_SCALE 2
+
+#define SUN50I_SCALER_VSU_EDGE_SHIFT(x) (((x) << 16) & 0xF)
+#define SUN50I_SCALER_VSU_EDGE_OFFSET(x) ((x) & 0xFF)
+
+#define SUN50I_SCALER_VSU_ANGLE_SHIFT(x) (((x) << 16) & 0xF)
+#define SUN50I_SCALER_VSU_ANGLE_OFFSET(x) ((x) & 0xFF)
+
void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable);
void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 33e533268488..3dac08b24140 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -140,7 +140,6 @@ static int tilcdc_commit(struct drm_device *dev,
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = tilcdc_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = tilcdc_atomic_check,
.atomic_commit = tilcdc_commit,
};
@@ -191,9 +190,6 @@ static void tilcdc_fini(struct drm_device *dev)
drm_dev_unregister(dev);
drm_kms_helper_poll_fini(dev);
-
- drm_fb_cma_fbdev_fini(dev);
-
drm_irq_uninstall(dev);
drm_mode_config_cleanup(dev);
tilcdc_remove_external_device(dev);
@@ -396,16 +392,14 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
drm_mode_config_reset(ddev);
- ret = drm_fb_cma_fbdev_init(ddev, bpp, 0);
- if (ret)
- goto init_failed;
-
drm_kms_helper_poll_init(ddev);
ret = drm_dev_register(ddev, 0);
if (ret)
goto init_failed;
+ drm_fbdev_generic_setup(ddev, bpp);
+
priv->is_registered = true;
return 0;
@@ -519,7 +513,6 @@ DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver tilcdc_driver = {
.driver_features = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
DRIVER_PRIME | DRIVER_ATOMIC),
- .lastclose = drm_fb_helper_lastclose,
.irq_handler = tilcdc_irq,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_print_info = drm_gem_cma_print_info,
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
index 16f4b5c91f1b..2c408ac1a900 100644
--- a/drivers/gpu/drm/tinydrm/Kconfig
+++ b/drivers/gpu/drm/tinydrm/Kconfig
@@ -10,6 +10,17 @@ menuconfig DRM_TINYDRM
config TINYDRM_MIPI_DBI
tristate
+config TINYDRM_HX8357D
+ tristate "DRM support for HX8357D display panels"
+ depends on DRM_TINYDRM && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
+ help
+ DRM driver for the following HX8357D panels:
+ * YX350HV15-T 3.5" 340x350 TFT (Adafruit 3.5")
+
+ If M is selected the module will be called hx8357d.
+
config TINYDRM_ILI9225
tristate "DRM support for ILI9225 display panels"
depends on DRM_TINYDRM && SPI
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile
index 14d99080665a..f823066f7743 100644
--- a/drivers/gpu/drm/tinydrm/Makefile
+++ b/drivers/gpu/drm/tinydrm/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_TINYDRM) += core/
obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o
# Displays
+obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 255341ee4eb9..01a6f2d42440 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -36,77 +36,6 @@
* and registers the DRM device using devm_tinydrm_register().
*/
-/**
- * tinydrm_gem_cma_prime_import_sg_table - Produce a CMA GEM object from
- * another driver's scatter/gather table of pinned pages
- * @drm: DRM device to import into
- * @attach: DMA-BUF attachment
- * @sgt: Scatter/gather table of pinned pages
- *
- * This function imports a scatter/gather table exported via DMA-BUF by
- * another driver using drm_gem_cma_prime_import_sg_table(). It sets the
- * kernel virtual address on the CMA object. Drivers should use this as their
- * &drm_driver->gem_prime_import_sg_table callback if they need the virtual
- * address. tinydrm_gem_cma_free_object() should be used in combination with
- * this function.
- *
- * Returns:
- * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
- * error code on failure.
- */
-struct drm_gem_object *
-tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt)
-{
- struct drm_gem_cma_object *cma_obj;
- struct drm_gem_object *obj;
- void *vaddr;
-
- vaddr = dma_buf_vmap(attach->dmabuf);
- if (!vaddr) {
- DRM_ERROR("Failed to vmap PRIME buffer\n");
- return ERR_PTR(-ENOMEM);
- }
-
- obj = drm_gem_cma_prime_import_sg_table(drm, attach, sgt);
- if (IS_ERR(obj)) {
- dma_buf_vunmap(attach->dmabuf, vaddr);
- return obj;
- }
-
- cma_obj = to_drm_gem_cma_obj(obj);
- cma_obj->vaddr = vaddr;
-
- return obj;
-}
-EXPORT_SYMBOL(tinydrm_gem_cma_prime_import_sg_table);
-
-/**
- * tinydrm_gem_cma_free_object - Free resources associated with a CMA GEM
- * object
- * @gem_obj: GEM object to free
- *
- * This function frees the backing memory of the CMA GEM object, cleans up the
- * GEM object state and frees the memory used to store the object itself using
- * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel
- * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers
- * can use this as their &drm_driver->gem_free_object_unlocked callback.
- */
-void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj)
-{
- if (gem_obj->import_attach) {
- struct drm_gem_cma_object *cma_obj;
-
- cma_obj = to_drm_gem_cma_obj(gem_obj);
- dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
- cma_obj->vaddr = NULL;
- }
-
- drm_gem_cma_free_object(gem_obj);
-}
-EXPORT_SYMBOL_GPL(tinydrm_gem_cma_free_object);
-
static struct drm_framebuffer *
tinydrm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
@@ -146,6 +75,7 @@ static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
drm->dev_private = tdev;
drm_mode_config_init(drm);
drm->mode_config.funcs = &tinydrm_mode_config_funcs;
+ drm->mode_config.allow_fb_modifiers = true;
return 0;
}
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index dcd390163a4a..bf6bfbc5d412 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -9,12 +9,18 @@
#include <linux/backlight.h>
#include <linux/dma-buf.h>
+#include <linux/module.h>
#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <linux/swab.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/tinydrm/tinydrm.h>
#include <drm/tinydrm/tinydrm-helpers.h>
+#include <uapi/drm/drm.h>
static unsigned int spi_max;
module_param(spi_max, uint, 0400);
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index 7e8e24d0b7a7..eacfc0ec8ff1 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -184,6 +184,10 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev,
struct drm_display_mode mode_copy;
struct drm_connector *connector;
int ret;
+ static const uint64_t modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+ };
drm_mode_copy(&mode_copy, mode);
ret = tinydrm_rotate_mode(&mode_copy, rotation);
@@ -202,6 +206,6 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev,
return PTR_ERR(connector);
return drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats,
- format_count, NULL, connector);
+ format_count, modifiers, connector);
}
EXPORT_SYMBOL(tinydrm_display_pipe_init);
diff --git a/drivers/gpu/drm/tinydrm/hx8357d.c b/drivers/gpu/drm/tinydrm/hx8357d.c
new file mode 100644
index 000000000000..81a2bbeb25d4
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/hx8357d.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for the HX8357D LCD controller
+ *
+ * Copyright 2018 Broadcom
+ * Copyright 2018 David Lechner <david@lechnology.com>
+ * Copyright 2016 Noralf Trønnes
+ * Copyright (C) 2015 Adafruit Industries
+ * Copyright (C) 2013 Christian Vogelgsang
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <video/mipi_display.h>
+
+#define HX8357D_SETOSC 0xb0
+#define HX8357D_SETPOWER 0xb1
+#define HX8357D_SETRGB 0xb3
+#define HX8357D_SETCYC 0xb3
+#define HX8357D_SETCOM 0xb6
+#define HX8357D_SETEXTC 0xb9
+#define HX8357D_SETSTBA 0xc0
+#define HX8357D_SETPANEL 0xcc
+#define HX8357D_SETGAMMA 0xe0
+
+#define HX8357D_MADCTL_MY 0x80
+#define HX8357D_MADCTL_MX 0x40
+#define HX8357D_MADCTL_MV 0x20
+#define HX8357D_MADCTL_ML 0x10
+#define HX8357D_MADCTL_RGB 0x00
+#define HX8357D_MADCTL_BGR 0x08
+#define HX8357D_MADCTL_MH 0x04
+
+static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ u8 addr_mode;
+ int ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ ret = mipi_dbi_poweron_conditional_reset(mipi);
+ if (ret < 0)
+ return;
+ if (ret == 1)
+ goto out_enable;
+
+ /* setextc */
+ mipi_dbi_command(mipi, HX8357D_SETEXTC, 0xFF, 0x83, 0x57);
+ msleep(150);
+
+ /* setRGB which also enables SDO */
+ mipi_dbi_command(mipi, HX8357D_SETRGB, 0x00, 0x00, 0x06, 0x06);
+
+ /* -1.52V */
+ mipi_dbi_command(mipi, HX8357D_SETCOM, 0x25);
+
+ /* Normal mode 70Hz, Idle mode 55 Hz */
+ mipi_dbi_command(mipi, HX8357D_SETOSC, 0x68);
+
+ /* Set Panel - BGR, Gate direction swapped */
+ mipi_dbi_command(mipi, HX8357D_SETPANEL, 0x05);
+
+ mipi_dbi_command(mipi, HX8357D_SETPOWER,
+ 0x00, /* Not deep standby */
+ 0x15, /* BT */
+ 0x1C, /* VSPR */
+ 0x1C, /* VSNR */
+ 0x83, /* AP */
+ 0xAA); /* FS */
+
+ mipi_dbi_command(mipi, HX8357D_SETSTBA,
+ 0x50, /* OPON normal */
+ 0x50, /* OPON idle */
+ 0x01, /* STBA */
+ 0x3C, /* STBA */
+ 0x1E, /* STBA */
+ 0x08); /* GEN */
+
+ mipi_dbi_command(mipi, HX8357D_SETCYC,
+ 0x02, /* NW 0x02 */
+ 0x40, /* RTN */
+ 0x00, /* DIV */
+ 0x2A, /* DUM */
+ 0x2A, /* DUM */
+ 0x0D, /* GDON */
+ 0x78); /* GDOFF */
+
+ mipi_dbi_command(mipi, HX8357D_SETGAMMA,
+ 0x02,
+ 0x0A,
+ 0x11,
+ 0x1d,
+ 0x23,
+ 0x35,
+ 0x41,
+ 0x4b,
+ 0x4b,
+ 0x42,
+ 0x3A,
+ 0x27,
+ 0x1B,
+ 0x08,
+ 0x09,
+ 0x03,
+ 0x02,
+ 0x0A,
+ 0x11,
+ 0x1d,
+ 0x23,
+ 0x35,
+ 0x41,
+ 0x4b,
+ 0x4b,
+ 0x42,
+ 0x3A,
+ 0x27,
+ 0x1B,
+ 0x08,
+ 0x09,
+ 0x03,
+ 0x00,
+ 0x01);
+
+ /* 16 bit */
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT,
+ MIPI_DCS_PIXEL_FMT_16BIT);
+
+ /* TE off */
+ mipi_dbi_command(mipi, MIPI_DCS_SET_TEAR_ON, 0x00);
+
+ /* tear line */
+ mipi_dbi_command(mipi, MIPI_DCS_SET_TEAR_SCANLINE, 0x00, 0x02);
+
+ /* Exit Sleep */
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(150);
+
+ /* display on */
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ usleep_range(5000, 7000);
+
+out_enable:
+ switch (mipi->rotation) {
+ default:
+ addr_mode = HX8357D_MADCTL_MX | HX8357D_MADCTL_MY;
+ break;
+ case 90:
+ addr_mode = HX8357D_MADCTL_MV | HX8357D_MADCTL_MY;
+ break;
+ case 180:
+ addr_mode = 0;
+ break;
+ case 270:
+ addr_mode = HX8357D_MADCTL_MV | HX8357D_MADCTL_MX;
+ break;
+ }
+ mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+}
+
+static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
+ .enable = yx240qv29_enable,
+ .disable = mipi_dbi_pipe_disable,
+ .update = tinydrm_display_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode yx350hv15_mode = {
+ TINYDRM_MODE(320, 480, 60, 75),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
+
+static struct drm_driver hx8357d_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+ .fops = &hx8357d_fops,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "hx8357d",
+ .desc = "HX8357D",
+ .date = "20181023",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id hx8357d_of_match[] = {
+ { .compatible = "adafruit,yx350hv15" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hx8357d_of_match);
+
+static const struct spi_device_id hx8357d_id[] = {
+ { "yx350hv15", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, hx8357d_id);
+
+static int hx8357d_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dbi *mipi;
+ struct gpio_desc *dc;
+ u32 rotation = 0;
+ int ret;
+
+ mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ if (!mipi)
+ return -ENOMEM;
+
+ dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+ return PTR_ERR(dc);
+ }
+
+ mipi->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(mipi->backlight))
+ return PTR_ERR(mipi->backlight);
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, mipi, dc);
+ if (ret)
+ return ret;
+
+ ret = mipi_dbi_init(&spi->dev, mipi, &hx8357d_pipe_funcs,
+ &hx8357d_driver, &yx350hv15_mode, rotation);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, mipi);
+
+ return devm_tinydrm_register(&mipi->tinydrm);
+}
+
+static void hx8357d_shutdown(struct spi_device *spi)
+{
+ struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+ tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static struct spi_driver hx8357d_spi_driver = {
+ .driver = {
+ .name = "hx8357d",
+ .of_match_table = hx8357d_of_match,
+ },
+ .id_table = hx8357d_id,
+ .probe = hx8357d_probe,
+ .shutdown = hx8357d_shutdown,
+};
+module_spi_driver(hx8357d_spi_driver);
+
+MODULE_DESCRIPTION("HX8357D DRM driver");
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 455fefe012f5..78f7c2d1b449 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -20,7 +20,8 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -367,7 +368,7 @@ static struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &ili9225_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "ili9225",
.desc = "Ilitek ILI9225",
.date = "20171106",
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
index 6701037749a7..51395bdc6ca2 100644
--- a/drivers/gpu/drm/tinydrm/ili9341.c
+++ b/drivers/gpu/drm/tinydrm/ili9341.c
@@ -15,7 +15,7 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
@@ -144,7 +144,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
static struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
.fops = &ili9341_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index d7bb4c5e6657..3fa62e77c30b 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -17,9 +17,9 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_modeset_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
#include <video/mipi_display.h>
@@ -153,7 +153,7 @@ static struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index cb3441e51d5f..3a05e56f9b0d 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -9,15 +9,19 @@
* (at your option) any later version.
*/
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <uapi/drm/drm.h>
#include <video/mipi_display.h>
#define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
@@ -240,10 +244,10 @@ static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS,
(clip.x1 >> 8) & 0xFF, clip.x1 & 0xFF,
- (clip.x2 >> 8) & 0xFF, (clip.x2 - 1) & 0xFF);
+ ((clip.x2 - 1) >> 8) & 0xFF, (clip.x2 - 1) & 0xFF);
mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS,
(clip.y1 >> 8) & 0xFF, clip.y1 & 0xFF,
- (clip.y2 >> 8) & 0xFF, (clip.y2 - 1) & 0xFF);
+ ((clip.y2 - 1) >> 8) & 0xFF, (clip.y2 - 1) & 0xFF);
ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, tr,
(clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2);
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 50a1d4216ce7..54d6fe0f37ce 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -26,6 +26,8 @@
#include <linux/spi/spi.h>
#include <linux/thermal.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/tinydrm.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -106,12 +108,11 @@ static int repaper_spi_transfer(struct spi_device *spi, u8 header,
/* Stack allocated tx? */
if (tx && len <= 32) {
- txbuf = kmalloc(len, GFP_KERNEL);
+ txbuf = kmemdup(tx, len, GFP_KERNEL);
if (!txbuf) {
ret = -ENOMEM;
goto out_free;
}
- memcpy(txbuf, tx, len);
}
if (rx) {
@@ -882,7 +883,7 @@ static struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &repaper_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
.date = "20170405",
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 2fcbc3067d71..a6a8a1081b73 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -17,7 +17,8 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -303,7 +304,7 @@ static struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &st7586_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 3081bc57c116..b39779e0dcd8 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -14,7 +14,7 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -119,7 +119,7 @@ static struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &st7735r_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 26b889f86670..d87935bf8e30 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -45,6 +45,14 @@
static void ttm_bo_global_kobj_release(struct kobject *kobj);
+/**
+ * ttm_global_mutex - protecting the global BO state
+ */
+DEFINE_MUTEX(ttm_global_mutex);
+struct ttm_bo_global ttm_bo_glob = {
+ .use_count = 0
+};
+
static struct attribute ttm_bo_count = {
.name = "bo_count",
.mode = S_IRUGO
@@ -872,7 +880,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
if (fence) {
reservation_object_add_shared_fence(bo->resv, fence);
- ret = reservation_object_reserve_shared(bo->resv);
+ ret = reservation_object_reserve_shared(bo->resv, 1);
if (unlikely(ret))
return ret;
@@ -977,7 +985,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool has_erestartsys = false;
int i, ret;
- ret = reservation_object_reserve_shared(bo->resv);
+ ret = reservation_object_reserve_shared(bo->resv, 1);
if (unlikely(ret))
return ret;
@@ -1519,35 +1527,45 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
container_of(kobj, struct ttm_bo_global, kobj);
__free_page(glob->dummy_read_page);
- kfree(glob);
}
-void ttm_bo_global_release(struct drm_global_reference *ref)
+static void ttm_bo_global_release(void)
{
- struct ttm_bo_global *glob = ref->object;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
+
+ mutex_lock(&ttm_global_mutex);
+ if (--glob->use_count > 0)
+ goto out;
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
+ ttm_mem_global_release(&ttm_mem_glob);
+out:
+ mutex_unlock(&ttm_global_mutex);
}
-EXPORT_SYMBOL(ttm_bo_global_release);
-int ttm_bo_global_init(struct drm_global_reference *ref)
+static int ttm_bo_global_init(void)
{
- struct ttm_bo_global_ref *bo_ref =
- container_of(ref, struct ttm_bo_global_ref, ref);
- struct ttm_bo_global *glob = ref->object;
- int ret;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
+ int ret = 0;
unsigned i;
- mutex_init(&glob->device_list_mutex);
+ mutex_lock(&ttm_global_mutex);
+ if (++glob->use_count > 1)
+ goto out;
+
+ ret = ttm_mem_global_init(&ttm_mem_glob);
+ if (ret)
+ goto out;
+
spin_lock_init(&glob->lru_lock);
- glob->mem_glob = bo_ref->mem_glob;
+ glob->mem_glob = &ttm_mem_glob;
glob->mem_glob->bo_glob = glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
ret = -ENOMEM;
- goto out_no_drp;
+ goto out;
}
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
@@ -1559,13 +1577,10 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
if (unlikely(ret != 0))
kobject_put(&glob->kobj);
- return ret;
-out_no_drp:
- kfree(glob);
+out:
+ mutex_unlock(&ttm_global_mutex);
return ret;
}
-EXPORT_SYMBOL(ttm_bo_global_init);
-
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
@@ -1587,9 +1602,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
}
}
- mutex_lock(&glob->device_list_mutex);
+ mutex_lock(&ttm_global_mutex);
list_del(&bdev->device_list);
- mutex_unlock(&glob->device_list_mutex);
+ mutex_unlock(&ttm_global_mutex);
cancel_delayed_work_sync(&bdev->wq);
@@ -1604,18 +1619,25 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
drm_vma_offset_manager_destroy(&bdev->vma_manager);
+ if (!ret)
+ ttm_bo_global_release();
+
return ret;
}
EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
- struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
struct address_space *mapping,
uint64_t file_page_offset,
bool need_dma32)
{
- int ret = -EINVAL;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
+ int ret;
+
+ ret = ttm_bo_global_init();
+ if (ret)
+ return ret;
bdev->driver = driver;
@@ -1636,12 +1658,13 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->dev_mapping = mapping;
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
- mutex_lock(&glob->device_list_mutex);
+ mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
- mutex_unlock(&glob->device_list_mutex);
+ mutex_unlock(&ttm_global_mutex);
return 0;
out_no_sys:
+ ttm_bo_global_release();
return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ba80150d1052..895d77d799e4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (!fbo)
return -ENOMEM;
- ttm_bo_get(bo);
fbo->base = *bo;
+ fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
+
+ ttm_bo_get(bo);
fbo->bo = bo;
/**
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index e73ae0d22897..93860346c426 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -126,10 +126,11 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
}
if (!ret) {
- if (!entry->shared)
+ if (!entry->num_shared)
continue;
- ret = reservation_object_reserve_shared(bo->resv);
+ ret = reservation_object_reserve_shared(bo->resv,
+ entry->num_shared);
if (!ret)
continue;
}
@@ -150,8 +151,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
}
}
- if (!ret && entry->shared)
- ret = reservation_object_reserve_shared(bo->resv);
+ if (!ret && entry->num_shared)
+ ret = reservation_object_reserve_shared(bo->resv,
+ entry->num_shared);
if (unlikely(ret != 0)) {
if (ret == -EINTR)
@@ -187,21 +189,19 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
- struct ttm_bo_driver *driver;
if (list_empty(list))
return;
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
bdev = bo->bdev;
- driver = bdev->driver;
glob = bo->bdev->glob;
spin_lock(&glob->lru_lock);
list_for_each_entry(entry, list, head) {
bo = entry->bo;
- if (entry->shared)
+ if (entry->num_shared)
reservation_object_add_shared_fence(bo->resv, fence);
else
reservation_object_add_excl_fence(bo->resv, fence);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 450387c92b63..f1567c353b54 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -41,6 +41,9 @@
#define TTM_MEMORY_ALLOC_RETRIES 4
+struct ttm_mem_global ttm_mem_glob;
+EXPORT_SYMBOL(ttm_mem_glob);
+
struct ttm_mem_zone {
struct kobject kobj;
struct ttm_mem_global *glob;
@@ -216,14 +219,6 @@ static ssize_t ttm_mem_global_store(struct kobject *kobj,
return size;
}
-static void ttm_mem_global_kobj_release(struct kobject *kobj)
-{
- struct ttm_mem_global *glob =
- container_of(kobj, struct ttm_mem_global, kobj);
-
- kfree(glob);
-}
-
static struct attribute *ttm_mem_global_attrs[] = {
&ttm_mem_global_lower_mem_limit,
NULL
@@ -235,7 +230,6 @@ static const struct sysfs_ops ttm_mem_global_ops = {
};
static struct kobj_type ttm_mem_glob_kobj_type = {
- .release = &ttm_mem_global_kobj_release,
.sysfs_ops = &ttm_mem_global_ops,
.default_attrs = ttm_mem_global_attrs,
};
@@ -464,7 +458,6 @@ out_no_zone:
ttm_mem_global_release(glob);
return ret;
}
-EXPORT_SYMBOL(ttm_mem_global_init);
void ttm_mem_global_release(struct ttm_mem_global *glob)
{
@@ -486,7 +479,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
}
-EXPORT_SYMBOL(ttm_mem_global_release);
static void ttm_check_swapping(struct ttm_mem_global *glob)
{
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 72efcecb44f7..28e2d03c0ccf 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -249,7 +249,7 @@ static int tve200_probe(struct platform_device *pdev)
clk_disable:
clk_disable_unprepare(priv->pclk);
dev_unref:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -263,7 +263,7 @@ static int tve200_remove(struct platform_device *pdev)
drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
clk_disable_unprepare(priv->pclk);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index f455f095a146..1b014d92855b 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -350,15 +350,10 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto err;
- ret = drm_vblank_init(dev, 1);
- if (ret)
- goto err_fb;
-
drm_kms_helper_poll_init(dev);
return 0;
-err_fb:
- udl_fbdev_cleanup(dev);
+
err:
if (udl->urbs.count)
udl_free_urb_list(dev);
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 54d96518a131..a08766d39eab 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -293,6 +293,7 @@ v3d_prime_import_sg_table(struct drm_device *dev,
bo->resv = attach->dmabuf->resv;
bo->sgt = sgt;
+ obj->import_attach = attach;
v3d_bo_get_pages(bo);
v3d_mmu_insert_ptes(bo);
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 4db62c545748..eb2b2d2f8553 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -71,10 +71,13 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
V3D_READ(v3d_hub_reg_defs[i].reg));
}
- for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg,
- V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+ if (v3d->ver < 41) {
+ for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ v3d_gca_reg_defs[i].name,
+ v3d_gca_reg_defs[i].reg,
+ V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+ }
}
for (core = 0; core < v3d->cores; core++) {
@@ -176,9 +179,44 @@ static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused)
return 0;
}
+static int v3d_measure_clock(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ uint32_t cycles;
+ int core = 0;
+ int measure_ms = 1000;
+
+ if (v3d->ver >= 40) {
+ V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
+ V3D_SET_FIELD(V3D_PCTR_CYCLE_COUNT,
+ V3D_PCTR_S0));
+ V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1);
+ V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1);
+ } else {
+ V3D_CORE_WRITE(core, V3D_V3_PCTR_0_PCTRS0,
+ V3D_PCTR_CYCLE_COUNT);
+ V3D_CORE_WRITE(core, V3D_V3_PCTR_0_CLR, 1);
+ V3D_CORE_WRITE(core, V3D_V3_PCTR_0_EN,
+ V3D_V3_PCTR_0_EN_ENABLE |
+ 1);
+ }
+ msleep(measure_ms);
+ cycles = V3D_CORE_READ(core, V3D_PCTR_0_PCTR0);
+
+ seq_printf(m, "cycles: %d (%d.%d Mhz)\n",
+ cycles,
+ cycles / (measure_ms * 1000),
+ (cycles / (measure_ms * 100)) % 10);
+
+ return 0;
+}
+
static const struct drm_info_list v3d_debugfs_list[] = {
{"v3d_ident", v3d_v3d_debugfs_ident, 0},
{"v3d_regs", v3d_v3d_debugfs_regs, 0},
+ {"measure_clock", v3d_measure_clock, 0},
{"bo_stats", v3d_debugfs_bo_stats, 0},
};
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 2a85fa68ffea..f0afcec72c34 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -112,10 +112,15 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
return 0;
}
- /* Any params that aren't just register reads would go here. */
- DRM_DEBUG("Unknown parameter %d\n", args->param);
- return -EINVAL;
+ switch (args->param) {
+ case DRM_V3D_PARAM_SUPPORTS_TFU:
+ args->value = 1;
+ return 0;
+ default:
+ DRM_DEBUG("Unknown parameter %d\n", args->param);
+ return -EINVAL;
+ }
}
static int
@@ -170,7 +175,8 @@ static const struct file_operations v3d_drm_fops = {
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be be
* able to submit CLs that could access BOs from clients authenticated
- * with the master node.
+ * with the master node. The TFU doesn't use the GMP, so it would
+ * need to stay DRM_AUTH until we do buffer size/offset validation.
*/
static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
@@ -179,6 +185,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
};
static const struct vm_operations_struct v3d_vm_ops = {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index e6fed696ad86..dcb772a19191 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -7,19 +7,18 @@
#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
#include <drm/gpu_scheduler.h>
+#include "uapi/drm/v3d_drm.h"
#define GMP_GRANULARITY (128 * 1024)
-/* Enum for each of the V3D queues. We maintain various queue
- * tracking as an array because at some point we'll want to support
- * the TFU (texture formatting unit) as another queue.
- */
+/* Enum for each of the V3D queues. */
enum v3d_queue {
V3D_BIN,
V3D_RENDER,
+ V3D_TFU,
};
-#define V3D_MAX_QUEUES (V3D_RENDER + 1)
+#define V3D_MAX_QUEUES (V3D_TFU + 1)
struct v3d_queue_state {
struct drm_gpu_scheduler sched;
@@ -68,6 +67,7 @@ struct v3d_dev {
struct v3d_exec_info *bin_job;
struct v3d_exec_info *render_job;
+ struct v3d_tfu_job *tfu_job;
struct v3d_queue_state queue[V3D_MAX_QUEUES];
@@ -198,6 +198,11 @@ struct v3d_exec_info {
*/
struct dma_fence *bin_done_fence;
+ /* Fence for when the scheduler considers the render to be
+ * done, for when the BOs reservations should be complete.
+ */
+ struct dma_fence *render_done_fence;
+
struct kref refcount;
/* This is the array of BOs that were looked up at the start of exec. */
@@ -213,6 +218,25 @@ struct v3d_exec_info {
u32 qma, qms, qts;
};
+struct v3d_tfu_job {
+ struct drm_sched_job base;
+
+ struct drm_v3d_submit_tfu args;
+
+ /* An optional fence userspace can pass in for the job to depend on. */
+ struct dma_fence *in_fence;
+
+ /* v3d fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ struct v3d_dev *v3d;
+
+ struct kref refcount;
+
+ /* This is the array of BOs that were looked up at the start of exec. */
+ struct v3d_bo *bo[4];
+};
+
/**
* _wait_for - magic (register) wait macro
*
@@ -276,9 +300,12 @@ int v3d_gem_init(struct drm_device *dev);
void v3d_gem_destroy(struct drm_device *dev);
int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void v3d_exec_put(struct v3d_exec_info *exec);
+void v3d_tfu_job_put(struct v3d_tfu_job *exec);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
void v3d_flush_caches(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
index 50bfcf9a8a1a..b0a2a1ae2eb1 100644
--- a/drivers/gpu/drm/v3d/v3d_fence.c
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -29,10 +29,16 @@ static const char *v3d_fence_get_timeline_name(struct dma_fence *fence)
{
struct v3d_fence *f = to_v3d_fence(fence);
- if (f->queue == V3D_BIN)
+ switch (f->queue) {
+ case V3D_BIN:
return "v3d-bin";
- else
+ case V3D_RENDER:
return "v3d-render";
+ case V3D_TFU:
+ return "v3d-tfu";
+ default:
+ return NULL;
+ }
}
const struct dma_fence_ops v3d_fence_ops = {
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 70c54774400b..05ca6319065e 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -207,32 +207,26 @@ v3d_flush_caches(struct v3d_dev *v3d)
}
static void
-v3d_attach_object_fences(struct v3d_exec_info *exec)
+v3d_attach_object_fences(struct v3d_bo **bos, int bo_count,
+ struct dma_fence *fence)
{
- struct dma_fence *out_fence = &exec->render.base.s_fence->finished;
- struct v3d_bo *bo;
int i;
- for (i = 0; i < exec->bo_count; i++) {
- bo = to_v3d_bo(&exec->bo[i]->base);
-
+ for (i = 0; i < bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
- reservation_object_add_excl_fence(bo->resv, out_fence);
+ reservation_object_add_excl_fence(bos[i]->resv, fence);
}
}
static void
-v3d_unlock_bo_reservations(struct drm_device *dev,
- struct v3d_exec_info *exec,
+v3d_unlock_bo_reservations(struct v3d_bo **bos,
+ int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
int i;
- for (i = 0; i < exec->bo_count; i++) {
- struct v3d_bo *bo = to_v3d_bo(&exec->bo[i]->base);
-
- ww_mutex_unlock(&bo->resv->lock);
- }
+ for (i = 0; i < bo_count; i++)
+ ww_mutex_unlock(&bos[i]->resv->lock);
ww_acquire_fini(acquire_ctx);
}
@@ -245,19 +239,19 @@ v3d_unlock_bo_reservations(struct drm_device *dev,
* to v3d, so we don't attach dma-buf fences to them.
*/
static int
-v3d_lock_bo_reservations(struct drm_device *dev,
- struct v3d_exec_info *exec,
+v3d_lock_bo_reservations(struct v3d_bo **bos,
+ int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
int contended_lock = -1;
int i, ret;
- struct v3d_bo *bo;
ww_acquire_init(acquire_ctx, &reservation_ww_class);
retry:
if (contended_lock != -1) {
- bo = to_v3d_bo(&exec->bo[contended_lock]->base);
+ struct v3d_bo *bo = bos[contended_lock];
+
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
acquire_ctx);
if (ret) {
@@ -266,23 +260,20 @@ retry:
}
}
- for (i = 0; i < exec->bo_count; i++) {
+ for (i = 0; i < bo_count; i++) {
if (i == contended_lock)
continue;
- bo = to_v3d_bo(&exec->bo[i]->base);
-
- ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
+ ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock,
+ acquire_ctx);
if (ret) {
int j;
- for (j = 0; j < i; j++) {
- bo = to_v3d_bo(&exec->bo[j]->base);
- ww_mutex_unlock(&bo->resv->lock);
- }
+ for (j = 0; j < i; j++)
+ ww_mutex_unlock(&bos[j]->resv->lock);
if (contended_lock != -1 && contended_lock >= i) {
- bo = to_v3d_bo(&exec->bo[contended_lock]->base);
+ struct v3d_bo *bo = bos[contended_lock];
ww_mutex_unlock(&bo->resv->lock);
}
@@ -302,12 +293,11 @@ retry:
/* Reserve space for our shared (read-only) fence references,
* before we commit the CL to the hardware.
*/
- for (i = 0; i < exec->bo_count; i++) {
- bo = to_v3d_bo(&exec->bo[i]->base);
-
- ret = reservation_object_reserve_shared(bo->resv);
+ for (i = 0; i < bo_count; i++) {
+ ret = reservation_object_reserve_shared(bos[i]->resv, 1);
if (ret) {
- v3d_unlock_bo_reservations(dev, exec, acquire_ctx);
+ v3d_unlock_bo_reservations(bos, bo_count,
+ acquire_ctx);
return ret;
}
}
@@ -409,6 +399,7 @@ v3d_exec_cleanup(struct kref *ref)
dma_fence_put(exec->render.done_fence);
dma_fence_put(exec->bin_done_fence);
+ dma_fence_put(exec->render_done_fence);
for (i = 0; i < exec->bo_count; i++)
drm_gem_object_put_unlocked(&exec->bo[i]->base);
@@ -429,6 +420,33 @@ void v3d_exec_put(struct v3d_exec_info *exec)
kref_put(&exec->refcount, v3d_exec_cleanup);
}
+static void
+v3d_tfu_job_cleanup(struct kref *ref)
+{
+ struct v3d_tfu_job *job = container_of(ref, struct v3d_tfu_job,
+ refcount);
+ struct v3d_dev *v3d = job->v3d;
+ unsigned int i;
+
+ dma_fence_put(job->in_fence);
+ dma_fence_put(job->done_fence);
+
+ for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
+ if (job->bo[i])
+ drm_gem_object_put_unlocked(&job->bo[i]->base);
+ }
+
+ pm_runtime_mark_last_busy(v3d->dev);
+ pm_runtime_put_autosuspend(v3d->dev);
+
+ kfree(job);
+}
+
+void v3d_tfu_job_put(struct v3d_tfu_job *job)
+{
+ kref_put(&job->refcount, v3d_tfu_job_cleanup);
+}
+
int
v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -503,6 +521,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_syncobj *sync_out;
int ret = 0;
+ trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
+
if (args->pad != 0) {
DRM_INFO("pad must be zero: %d\n", args->pad);
return -EINVAL;
@@ -521,12 +541,12 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
kref_init(&exec->refcount);
ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl,
- 0, &exec->bin.in_fence);
+ 0, 0, &exec->bin.in_fence);
if (ret == -EINVAL)
goto fail;
ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl,
- 0, &exec->render.in_fence);
+ 0, 0, &exec->render.in_fence);
if (ret == -EINVAL)
goto fail;
@@ -546,7 +566,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (ret)
goto fail;
- ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx);
+ ret = v3d_lock_bo_reservations(exec->bo, exec->bo_count,
+ &acquire_ctx);
if (ret)
goto fail;
@@ -572,20 +593,23 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (ret)
goto fail_unreserve;
+ exec->render_done_fence =
+ dma_fence_get(&exec->render.base.s_fence->finished);
+
kref_get(&exec->refcount); /* put by scheduler job completion */
drm_sched_entity_push_job(&exec->render.base,
&v3d_priv->sched_entity[V3D_RENDER]);
mutex_unlock(&v3d->sched_lock);
- v3d_attach_object_fences(exec);
+ v3d_attach_object_fences(exec->bo, exec->bo_count,
+ exec->render_done_fence);
- v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
+ v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
/* Update the return sync object for the */
sync_out = drm_syncobj_find(file_priv, args->out_sync);
if (sync_out) {
- drm_syncobj_replace_fence(sync_out, 0,
- &exec->render.base.s_fence->finished);
+ drm_syncobj_replace_fence(sync_out, exec->render_done_fence);
drm_syncobj_put(sync_out);
}
@@ -595,13 +619,121 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
- v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
+ v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
fail:
v3d_exec_put(exec);
return ret;
}
+/**
+ * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace provides the register setup for the TFU, which we don't
+ * need to validate since the TFU is behind the MMU.
+ */
+int
+v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct drm_v3d_submit_tfu *args = data;
+ struct v3d_tfu_job *job;
+ struct ww_acquire_ctx acquire_ctx;
+ struct drm_syncobj *sync_out;
+ struct dma_fence *sched_done_fence;
+ int ret = 0;
+ int bo_count;
+
+ trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
+
+ job = kcalloc(1, sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ ret = pm_runtime_get_sync(v3d->dev);
+ if (ret < 0) {
+ kfree(job);
+ return ret;
+ }
+
+ kref_init(&job->refcount);
+
+ ret = drm_syncobj_find_fence(file_priv, args->in_sync,
+ 0, 0, &job->in_fence);
+ if (ret == -EINVAL)
+ goto fail;
+
+ job->args = *args;
+ job->v3d = v3d;
+
+ spin_lock(&file_priv->table_lock);
+ for (bo_count = 0; bo_count < ARRAY_SIZE(job->bo); bo_count++) {
+ struct drm_gem_object *bo;
+
+ if (!args->bo_handles[bo_count])
+ break;
+
+ bo = idr_find(&file_priv->object_idr,
+ args->bo_handles[bo_count]);
+ if (!bo) {
+ DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
+ bo_count, args->bo_handles[bo_count]);
+ ret = -ENOENT;
+ spin_unlock(&file_priv->table_lock);
+ goto fail;
+ }
+ drm_gem_object_get(bo);
+ job->bo[bo_count] = to_v3d_bo(bo);
+ }
+ spin_unlock(&file_priv->table_lock);
+
+ ret = v3d_lock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+ if (ret)
+ goto fail;
+
+ mutex_lock(&v3d->sched_lock);
+ ret = drm_sched_job_init(&job->base,
+ &v3d_priv->sched_entity[V3D_TFU],
+ v3d_priv);
+ if (ret)
+ goto fail_unreserve;
+
+ sched_done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ kref_get(&job->refcount); /* put by scheduler job completion */
+ drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[V3D_TFU]);
+ mutex_unlock(&v3d->sched_lock);
+
+ v3d_attach_object_fences(job->bo, bo_count, sched_done_fence);
+
+ v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+
+ /* Update the return sync object */
+ sync_out = drm_syncobj_find(file_priv, args->out_sync);
+ if (sync_out) {
+ drm_syncobj_replace_fence(sync_out, sched_done_fence);
+ drm_syncobj_put(sync_out);
+ }
+ dma_fence_put(sched_done_fence);
+
+ v3d_tfu_job_put(job);
+
+ return 0;
+
+fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
+ v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+fail:
+ v3d_tfu_job_put(job);
+
+ return ret;
+}
+
int
v3d_gem_init(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index e07514eb11b5..69338da70ddc 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -4,8 +4,8 @@
/**
* DOC: Interrupt management for the V3D engine
*
- * When we take a binning or rendering flush done interrupt, we need
- * to signal the fence for that job so that the scheduler can queue up
+ * When we take a bin, render, or TFU done interrupt, we need to
+ * signal the fence for that job so that the scheduler can queue up
* the next one and unblock any waiters.
*
* When we take the binner out of memory interrupt, we need to
@@ -15,6 +15,7 @@
#include "v3d_drv.h"
#include "v3d_regs.h"
+#include "v3d_trace.h"
#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
V3D_INT_FLDONE | \
@@ -23,7 +24,8 @@
#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
V3D_HUB_INT_MMU_PTI | \
- V3D_HUB_INT_MMU_CAP))
+ V3D_HUB_INT_MMU_CAP | \
+ V3D_HUB_INT_TFUC))
static void
v3d_overflow_mem_work(struct work_struct *work)
@@ -87,12 +89,20 @@ v3d_irq(int irq, void *arg)
}
if (intsts & V3D_INT_FLDONE) {
- dma_fence_signal(v3d->bin_job->bin.done_fence);
+ struct v3d_fence *fence =
+ to_v3d_fence(v3d->bin_job->bin.done_fence);
+
+ trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_FRDONE) {
- dma_fence_signal(v3d->render_job->render.done_fence);
+ struct v3d_fence *fence =
+ to_v3d_fence(v3d->render_job->render.done_fence);
+
+ trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
@@ -117,6 +127,15 @@ v3d_hub_irq(int irq, void *arg)
/* Acknowledge the interrupts we're handling here. */
V3D_WRITE(V3D_HUB_INT_CLR, intsts);
+ if (intsts & V3D_HUB_INT_TFUC) {
+ struct v3d_fence *fence =
+ to_v3d_fence(v3d->tfu_job->done_fence);
+
+ trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
+ status = IRQ_HANDLED;
+ }
+
if (intsts & (V3D_HUB_INT_MMU_WRV |
V3D_HUB_INT_MMU_PTI |
V3D_HUB_INT_MMU_CAP)) {
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 854046565989..6ccdee9d47bd 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -86,6 +86,55 @@
# define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c
# define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0)
+#define V3D_TFU_CS 0x00400
+/* Stops current job, empties input fifo. */
+# define V3D_TFU_CS_TFURST BIT(31)
+# define V3D_TFU_CS_CVTCT_MASK V3D_MASK(23, 16)
+# define V3D_TFU_CS_CVTCT_SHIFT 16
+# define V3D_TFU_CS_NFREE_MASK V3D_MASK(13, 8)
+# define V3D_TFU_CS_NFREE_SHIFT 8
+# define V3D_TFU_CS_BUSY BIT(0)
+
+#define V3D_TFU_SU 0x00404
+/* Interrupt when FINTTHR input slots are free (0 = disabled) */
+# define V3D_TFU_SU_FINTTHR_MASK V3D_MASK(13, 8)
+# define V3D_TFU_SU_FINTTHR_SHIFT 8
+/* Skips resetting the CRC at the start of CRC generation. */
+# define V3D_TFU_SU_CRCCHAIN BIT(4)
+/* skips writes, computes CRC of the image. miplevels must be 0. */
+# define V3D_TFU_SU_CRC BIT(3)
+# define V3D_TFU_SU_THROTTLE_MASK V3D_MASK(1, 0)
+# define V3D_TFU_SU_THROTTLE_SHIFT 0
+
+#define V3D_TFU_ICFG 0x00408
+/* Interrupt when the conversion is complete. */
+# define V3D_TFU_ICFG_IOC BIT(0)
+
+/* Input Image Address */
+#define V3D_TFU_IIA 0x0040c
+/* Input Chroma Address */
+#define V3D_TFU_ICA 0x00410
+/* Input Image Stride */
+#define V3D_TFU_IIS 0x00414
+/* Input Image U-Plane Address */
+#define V3D_TFU_IUA 0x00418
+/* Output Image Address */
+#define V3D_TFU_IOA 0x0041c
+/* Image Output Size */
+#define V3D_TFU_IOS 0x00420
+/* TFU YUV Coefficient 0 */
+#define V3D_TFU_COEF0 0x00424
+/* Use these regs instead of the defaults. */
+# define V3D_TFU_COEF0_USECOEF BIT(31)
+/* TFU YUV Coefficient 1 */
+#define V3D_TFU_COEF1 0x00428
+/* TFU YUV Coefficient 2 */
+#define V3D_TFU_COEF2 0x0042c
+/* TFU YUV Coefficient 3 */
+#define V3D_TFU_COEF3 0x00430
+
+#define V3D_TFU_CRC 0x00434
+
/* Per-MMU registers. */
#define V3D_MMUC_CONTROL 0x01000
@@ -267,6 +316,36 @@
# define V3D_PTB_BXCF_RWORDERDISA BIT(1)
# define V3D_PTB_BXCF_CLIPDISA BIT(0)
+#define V3D_V3_PCTR_0_EN 0x00674
+#define V3D_V3_PCTR_0_EN_ENABLE BIT(31)
+#define V3D_V4_PCTR_0_EN 0x00650
+/* When a bit is set, resets the counter to 0. */
+#define V3D_V3_PCTR_0_CLR 0x00670
+#define V3D_V4_PCTR_0_CLR 0x00654
+#define V3D_PCTR_0_OVERFLOW 0x00658
+
+#define V3D_V3_PCTR_0_PCTRS0 0x00684
+#define V3D_V3_PCTR_0_PCTRS15 0x00660
+#define V3D_V3_PCTR_0_PCTRSX(x) (V3D_V3_PCTR_0_PCTRS0 + \
+ 4 * (x))
+/* Each src reg muxes four counters each. */
+#define V3D_V4_PCTR_0_SRC_0_3 0x00660
+#define V3D_V4_PCTR_0_SRC_28_31 0x0067c
+# define V3D_PCTR_S0_MASK V3D_MASK(6, 0)
+# define V3D_PCTR_S0_SHIFT 0
+# define V3D_PCTR_S1_MASK V3D_MASK(14, 8)
+# define V3D_PCTR_S1_SHIFT 8
+# define V3D_PCTR_S2_MASK V3D_MASK(22, 16)
+# define V3D_PCTR_S2_SHIFT 16
+# define V3D_PCTR_S3_MASK V3D_MASK(30, 24)
+# define V3D_PCTR_S3_SHIFT 24
+# define V3D_PCTR_CYCLE_COUNT 32
+
+/* Output values of the counters. */
+#define V3D_PCTR_0_PCTR0 0x00680
+#define V3D_PCTR_0_PCTR31 0x006fc
+#define V3D_PCTR_0_PCTRX(x) (V3D_PCTR_0_PCTR0 + \
+ 4 * (x))
#define V3D_GMP_STATUS 0x00800
# define V3D_GMP_STATUS_GMPRST BIT(31)
# define V3D_GMP_STATUS_WR_COUNT_MASK V3D_MASK(30, 24)
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 9243dea6e6ad..f7508e907536 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -30,16 +30,34 @@ to_v3d_job(struct drm_sched_job *sched_job)
return container_of(sched_job, struct v3d_job, base);
}
+static struct v3d_tfu_job *
+to_tfu_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct v3d_tfu_job, base);
+}
+
static void
v3d_job_free(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
+ drm_sched_job_cleanup(sched_job);
+
v3d_exec_put(job->exec);
}
+static void
+v3d_tfu_job_free(struct drm_sched_job *sched_job)
+{
+ struct v3d_tfu_job *job = to_tfu_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+
+ v3d_tfu_job_put(job);
+}
+
/**
- * Returns the fences that the bin job depends on, one by one.
+ * Returns the fences that the bin or render job depends on, one by one.
* v3d_job_run() won't be called until all of them have been signaled.
*/
static struct dma_fence *
@@ -76,6 +94,27 @@ v3d_job_dependency(struct drm_sched_job *sched_job,
return fence;
}
+/**
+ * Returns the fences that the TFU job depends on, one by one.
+ * v3d_tfu_job_run() won't be called until all of them have been
+ * signaled.
+ */
+static struct dma_fence *
+v3d_tfu_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
+{
+ struct v3d_tfu_job *job = to_tfu_job(sched_job);
+ struct dma_fence *fence;
+
+ fence = job->in_fence;
+ if (fence) {
+ job->in_fence = NULL;
+ return fence;
+ }
+
+ return NULL;
+}
+
static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
@@ -147,31 +186,47 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
return fence;
}
-static void
-v3d_job_timedout(struct drm_sched_job *sched_job)
+static struct dma_fence *
+v3d_tfu_job_run(struct drm_sched_job *sched_job)
{
- struct v3d_job *job = to_v3d_job(sched_job);
- struct v3d_exec_info *exec = job->exec;
- struct v3d_dev *v3d = exec->v3d;
- enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
- enum v3d_queue q;
- u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
- u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
+ struct v3d_tfu_job *job = to_tfu_job(sched_job);
+ struct v3d_dev *v3d = job->v3d;
+ struct drm_device *dev = &v3d->drm;
+ struct dma_fence *fence;
- /* If the current address or return address have changed, then
- * the GPU has probably made progress and we should delay the
- * reset. This could fail if the GPU got in an infinite loop
- * in the CL, but that is pretty unlikely outside of an i-g-t
- * testcase.
- */
- if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
- job->timedout_ctca = ctca;
- job->timedout_ctra = ctra;
+ fence = v3d_fence_create(v3d, V3D_TFU);
+ if (IS_ERR(fence))
+ return NULL;
- schedule_delayed_work(&job->base.sched->work_tdr,
- job->base.sched->timeout);
- return;
+ v3d->tfu_job = job;
+ if (job->done_fence)
+ dma_fence_put(job->done_fence);
+ job->done_fence = dma_fence_get(fence);
+
+ trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
+
+ V3D_WRITE(V3D_TFU_IIA, job->args.iia);
+ V3D_WRITE(V3D_TFU_IIS, job->args.iis);
+ V3D_WRITE(V3D_TFU_ICA, job->args.ica);
+ V3D_WRITE(V3D_TFU_IUA, job->args.iua);
+ V3D_WRITE(V3D_TFU_IOA, job->args.ioa);
+ V3D_WRITE(V3D_TFU_IOS, job->args.ios);
+ V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]);
+ if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) {
+ V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]);
+ V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]);
+ V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]);
}
+ /* ICFG kicks off the job. */
+ V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC);
+
+ return fence;
+}
+
+static void
+v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
+{
+ enum v3d_queue q;
mutex_lock(&v3d->reset_lock);
@@ -196,6 +251,39 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
mutex_unlock(&v3d->reset_lock);
}
+static void
+v3d_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct v3d_job *job = to_v3d_job(sched_job);
+ struct v3d_exec_info *exec = job->exec;
+ struct v3d_dev *v3d = exec->v3d;
+ enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
+ u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
+ u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
+
+ /* If the current address or return address have changed, then
+ * the GPU has probably made progress and we should delay the
+ * reset. This could fail if the GPU got in an infinite loop
+ * in the CL, but that is pretty unlikely outside of an i-g-t
+ * testcase.
+ */
+ if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
+ job->timedout_ctca = ctca;
+ job->timedout_ctra = ctra;
+ return;
+ }
+
+ v3d_gpu_reset_for_timeout(v3d, sched_job);
+}
+
+static void
+v3d_tfu_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct v3d_tfu_job *job = to_tfu_job(sched_job);
+
+ v3d_gpu_reset_for_timeout(job->v3d, sched_job);
+}
+
static const struct drm_sched_backend_ops v3d_sched_ops = {
.dependency = v3d_job_dependency,
.run_job = v3d_job_run,
@@ -203,6 +291,13 @@ static const struct drm_sched_backend_ops v3d_sched_ops = {
.free_job = v3d_job_free
};
+static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
+ .dependency = v3d_tfu_job_dependency,
+ .run_job = v3d_tfu_job_run,
+ .timedout_job = v3d_tfu_job_timedout,
+ .free_job = v3d_tfu_job_free
+};
+
int
v3d_sched_init(struct v3d_dev *v3d)
{
@@ -233,6 +328,19 @@ v3d_sched_init(struct v3d_dev *v3d)
return ret;
}
+ ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
+ &v3d_tfu_sched_ops,
+ hw_jobs_limit, job_hang_limit,
+ msecs_to_jiffies(hang_limit_ms),
+ "v3d_tfu");
+ if (ret) {
+ dev_err(v3d->dev, "Failed to create TFU scheduler: %d.",
+ ret);
+ drm_sched_fini(&v3d->queue[V3D_RENDER].sched);
+ drm_sched_fini(&v3d->queue[V3D_BIN].sched);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h
index 85dd351e1e09..edd984afa33f 100644
--- a/drivers/gpu/drm/v3d/v3d_trace.h
+++ b/drivers/gpu/drm/v3d/v3d_trace.h
@@ -12,6 +12,28 @@
#define TRACE_SYSTEM v3d
#define TRACE_INCLUDE_FILE v3d_trace
+TRACE_EVENT(v3d_submit_cl_ioctl,
+ TP_PROTO(struct drm_device *dev, u32 ct1qba, u32 ct1qea),
+ TP_ARGS(dev, ct1qba, ct1qea),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, ct1qba)
+ __field(u32, ct1qea)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->ct1qba = ct1qba;
+ __entry->ct1qea = ct1qea;
+ ),
+
+ TP_printk("dev=%u, RCL 0x%08x..0x%08x",
+ __entry->dev,
+ __entry->ct1qba,
+ __entry->ct1qea)
+);
+
TRACE_EVENT(v3d_submit_cl,
TP_PROTO(struct drm_device *dev, bool is_render,
uint64_t seqno,
@@ -42,6 +64,105 @@ TRACE_EVENT(v3d_submit_cl,
__entry->ctnqea)
);
+TRACE_EVENT(v3d_bcl_irq,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
+TRACE_EVENT(v3d_rcl_irq,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
+TRACE_EVENT(v3d_tfu_irq,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
+TRACE_EVENT(v3d_submit_tfu_ioctl,
+ TP_PROTO(struct drm_device *dev, u32 iia),
+ TP_ARGS(dev, iia),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, iia)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->iia = iia;
+ ),
+
+ TP_printk("dev=%u, IIA 0x%08x",
+ __entry->dev,
+ __entry->iia)
+);
+
+TRACE_EVENT(v3d_submit_tfu,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
TRACE_EVENT(v3d_reset_begin,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 1f1780ccdbdf..f6f5cd80c04d 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -33,6 +33,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_atomic_helper.h>
#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
@@ -308,6 +309,8 @@ static void vc4_drm_unbind(struct device *dev)
drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
+
drm_mode_config_cleanup(drm);
drm_atomic_private_obj_fini(&vc4->ctm_manager);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index bd6ef1f31822..4f87b03f837d 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -338,6 +338,7 @@ struct vc4_plane_state {
u32 pos0_offset;
u32 pos2_offset;
u32 ptr0_offset;
+ u32 lbm_offset;
/* Offset where the plane's dlist was last stored in the
* hardware at vc4_crtc_atomic_flush() time.
@@ -369,6 +370,11 @@ struct vc4_plane_state {
* to enable background color fill.
*/
bool needs_bg_fill;
+
+ /* Mark the dlist as initialized. Useful to avoid initializing it twice
+ * when async update is not possible.
+ */
+ bool dlist_initialized;
};
static inline struct vc4_plane_state *
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 5b22e996af6c..aea2b8dfec17 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -635,7 +635,7 @@ retry:
for (i = 0; i < exec->bo_count; i++) {
bo = to_vc4_bo(&exec->bo[i]->base);
- ret = reservation_object_reserve_shared(bo->resv);
+ ret = reservation_object_reserve_shared(bo->resv, 1);
if (ret) {
vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
return ret;
@@ -681,7 +681,7 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
exec->fence = &fence->base;
if (out_sync)
- drm_syncobj_replace_fence(out_sync, 0, exec->fence);
+ drm_syncobj_replace_fence(out_sync, exec->fence);
vc4_update_bo_seqnos(exec, seqno);
@@ -1173,7 +1173,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
if (args->in_sync) {
ret = drm_syncobj_find_fence(file_priv, args->in_sync,
- 0, &in_fence);
+ 0, 0, &in_fence);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 127468785f74..1f94b9affe4b 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -214,6 +214,12 @@ static int vc4_atomic_commit(struct drm_device *dev,
return 0;
}
+ /* We know for sure we don't want an async update here. Set
+ * state->legacy_cursor_update to false to prevent
+ * drm_atomic_helper_setup_commit() from auto-completing
+ * commit->flip_done.
+ */
+ state->legacy_cursor_update = false;
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 9dc3fcbd290b..75db62cbe468 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -129,12 +129,12 @@ static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
{
- if (dst > src)
+ if (dst == src)
+ return VC4_SCALING_NONE;
+ if (3 * dst >= 2 * src)
return VC4_SCALING_PPF;
- else if (dst < src)
- return VC4_SCALING_TPZ;
else
- return VC4_SCALING_NONE;
+ return VC4_SCALING_TPZ;
}
static bool plane_enabled(struct drm_plane_state *state)
@@ -154,6 +154,7 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane
return NULL;
memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
+ vc4_state->dlist_initialized = 0;
__drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
@@ -259,37 +260,51 @@ static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{
- struct drm_plane *plane = state->plane;
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
u32 subpixel_src_mask = (1 << 16) - 1;
u32 format = fb->format->format;
int num_planes = fb->format->num_planes;
- u32 h_subsample = 1;
- u32 v_subsample = 1;
- int i;
+ struct drm_crtc_state *crtc_state;
+ u32 h_subsample, v_subsample;
+ int i, ret;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ if (!crtc_state) {
+ DRM_DEBUG_KMS("Invalid crtc state\n");
+ return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state, 1,
+ INT_MAX, true, true);
+ if (ret)
+ return ret;
+
+ h_subsample = drm_format_horz_chroma_subsampling(format);
+ v_subsample = drm_format_vert_chroma_subsampling(format);
for (i = 0; i < num_planes; i++)
vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
/* We don't support subpixel source positioning for scaling. */
- if ((state->src_x & subpixel_src_mask) ||
- (state->src_y & subpixel_src_mask) ||
- (state->src_w & subpixel_src_mask) ||
- (state->src_h & subpixel_src_mask)) {
+ if ((state->src.x1 & subpixel_src_mask) ||
+ (state->src.x2 & subpixel_src_mask) ||
+ (state->src.y1 & subpixel_src_mask) ||
+ (state->src.y2 & subpixel_src_mask)) {
return -EINVAL;
}
- vc4_state->src_x = state->src_x >> 16;
- vc4_state->src_y = state->src_y >> 16;
- vc4_state->src_w[0] = state->src_w >> 16;
- vc4_state->src_h[0] = state->src_h >> 16;
+ vc4_state->src_x = state->src.x1 >> 16;
+ vc4_state->src_y = state->src.y1 >> 16;
+ vc4_state->src_w[0] = (state->src.x2 - state->src.x1) >> 16;
+ vc4_state->src_h[0] = (state->src.y2 - state->src.y1) >> 16;
- vc4_state->crtc_x = state->crtc_x;
- vc4_state->crtc_y = state->crtc_y;
- vc4_state->crtc_w = state->crtc_w;
- vc4_state->crtc_h = state->crtc_h;
+ vc4_state->crtc_x = state->dst.x1;
+ vc4_state->crtc_y = state->dst.y1;
+ vc4_state->crtc_w = state->dst.x2 - state->dst.x1;
+ vc4_state->crtc_h = state->dst.y2 - state->dst.y1;
vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0],
vc4_state->crtc_w);
@@ -302,8 +317,6 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
if (num_planes > 1) {
vc4_state->is_yuv = true;
- h_subsample = drm_format_horz_chroma_subsampling(format);
- v_subsample = drm_format_vert_chroma_subsampling(format);
vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample;
vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample;
@@ -314,52 +327,20 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_get_scaling_mode(vc4_state->src_h[1],
vc4_state->crtc_h);
- /* YUV conversion requires that horizontal scaling be enabled,
- * even on a plane that's otherwise 1:1. Looks like only PPF
- * works in that case, so let's pick that one.
+ /* YUV conversion requires that horizontal scaling be enabled
+ * on the UV plane even if vc4_get_scaling_mode() returned
+ * VC4_SCALING_NONE (which can happen when the down-scaling
+ * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this
+ * case.
*/
- if (vc4_state->is_unity)
- vc4_state->x_scaling[0] = VC4_SCALING_PPF;
+ if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
+ vc4_state->x_scaling[1] = VC4_SCALING_PPF;
} else {
+ vc4_state->is_yuv = false;
vc4_state->x_scaling[1] = VC4_SCALING_NONE;
vc4_state->y_scaling[1] = VC4_SCALING_NONE;
}
- /* No configuring scaling on the cursor plane, since it gets
- non-vblank-synced updates, and scaling requires requires
- LBM changes which have to be vblank-synced.
- */
- if (plane->type == DRM_PLANE_TYPE_CURSOR && !vc4_state->is_unity)
- return -EINVAL;
-
- /* Clamp the on-screen start x/y to 0. The hardware doesn't
- * support negative y, and negative x wastes bandwidth.
- */
- if (vc4_state->crtc_x < 0) {
- for (i = 0; i < num_planes; i++) {
- u32 cpp = fb->format->cpp[i];
- u32 subs = ((i == 0) ? 1 : h_subsample);
-
- vc4_state->offsets[i] += (cpp *
- (-vc4_state->crtc_x) / subs);
- }
- vc4_state->src_w[0] += vc4_state->crtc_x;
- vc4_state->src_w[1] += vc4_state->crtc_x / h_subsample;
- vc4_state->crtc_x = 0;
- }
-
- if (vc4_state->crtc_y < 0) {
- for (i = 0; i < num_planes; i++) {
- u32 subs = ((i == 0) ? 1 : v_subsample);
-
- vc4_state->offsets[i] += (fb->pitches[i] *
- (-vc4_state->crtc_y) / subs);
- }
- vc4_state->src_h[0] += vc4_state->crtc_y;
- vc4_state->src_h[1] += vc4_state->crtc_y / v_subsample;
- vc4_state->crtc_y = 0;
- }
-
return 0;
}
@@ -400,10 +381,13 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w);
u32 lbm;
+ /* LBM is not needed when there's no vertical scaling. */
+ if (vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
+ vc4_state->y_scaling[1] == VC4_SCALING_NONE)
+ return 0;
+
if (!vc4_state->is_yuv) {
- if (vc4_state->is_unity)
- return 0;
- else if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
+ if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
lbm = pix_per_line * 8;
else {
/* In special cases, this multiplier might be 12. */
@@ -454,6 +438,43 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state,
}
}
+static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned long irqflags;
+ u32 lbm_size;
+
+ lbm_size = vc4_lbm_size(state);
+ if (!lbm_size)
+ return 0;
+
+ if (WARN_ON(!vc4_state->lbm_offset))
+ return -EINVAL;
+
+ /* Allocate the LBM memory that the HVS will use for temporary
+ * storage due to our scaling/format conversion.
+ */
+ if (!vc4_state->lbm.allocated) {
+ int ret;
+
+ spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
+ ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
+ &vc4_state->lbm,
+ lbm_size, 32, 0, 0);
+ spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
+
+ if (ret)
+ return ret;
+ } else {
+ WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
+ }
+
+ vc4_state->dlist[vc4_state->lbm_offset] = vc4_state->lbm.start;
+
+ return 0;
+}
+
/* Writes out a full display list for an active plane to the plane's
* private dlist state.
*/
@@ -467,34 +488,18 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
int num_planes = drm_format_num_planes(format->drm);
+ u32 h_subsample, v_subsample;
bool mix_plane_alpha;
bool covers_screen;
u32 scl0, scl1, pitch0;
- u32 lbm_size, tiling;
- unsigned long irqflags;
+ u32 tiling;
u32 hvs_format = format->hvs;
int ret, i;
- ret = vc4_plane_setup_clipping_and_scaling(state);
- if (ret)
- return ret;
-
- /* Allocate the LBM memory that the HVS will use for temporary
- * storage due to our scaling/format conversion.
- */
- lbm_size = vc4_lbm_size(state);
- if (lbm_size) {
- if (!vc4_state->lbm.allocated) {
- spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
- ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
- &vc4_state->lbm,
- lbm_size, 32, 0, 0);
- spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
- } else {
- WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
- }
- }
+ if (vc4_state->dlist_initialized)
+ return 0;
+ ret = vc4_plane_setup_clipping_and_scaling(state);
if (ret)
return ret;
@@ -512,26 +517,77 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
scl1 = vc4_get_scl_field(state, 0);
}
+ h_subsample = drm_format_horz_chroma_subsampling(format->drm);
+ v_subsample = drm_format_vert_chroma_subsampling(format->drm);
+
switch (base_format_mod) {
case DRM_FORMAT_MOD_LINEAR:
tiling = SCALER_CTL0_TILING_LINEAR;
pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
+
+ /* Adjust the base pointer to the first pixel to be scanned
+ * out.
+ */
+ for (i = 0; i < num_planes; i++) {
+ vc4_state->offsets[i] += vc4_state->src_y /
+ (i ? v_subsample : 1) *
+ fb->pitches[i];
+ vc4_state->offsets[i] += vc4_state->src_x /
+ (i ? h_subsample : 1) *
+ fb->format->cpp[i];
+ }
+
break;
case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: {
- /* For T-tiled, the FB pitch is "how many bytes from
- * one row to the next, such that pitch * tile_h ==
- * tile_size * tiles_per_row."
- */
u32 tile_size_shift = 12; /* T tiles are 4kb */
+ /* Whole-tile offsets, mostly for setting the pitch. */
+ u32 tile_w_shift = fb->format->cpp[0] == 2 ? 6 : 5;
u32 tile_h_shift = 5; /* 16 and 32bpp are 32 pixels high */
+ u32 tile_w_mask = (1 << tile_w_shift) - 1;
+ /* The height mask on 32-bit-per-pixel tiles is 63, i.e. twice
+ * the height (in pixels) of a 4k tile.
+ */
+ u32 tile_h_mask = (2 << tile_h_shift) - 1;
+ /* For T-tiled, the FB pitch is "how many bytes from one row to
+ * the next, such that
+ *
+ * pitch * tile_h == tile_size * tiles_per_row
+ */
u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift);
+ u32 tiles_l = vc4_state->src_x >> tile_w_shift;
+ u32 tiles_r = tiles_w - tiles_l;
+ u32 tiles_t = vc4_state->src_y >> tile_h_shift;
+ /* Intra-tile offsets, which modify the base address (the
+ * SCALER_PITCH0_TILE_Y_OFFSET tells HVS how to walk from that
+ * base address).
+ */
+ u32 tile_y = (vc4_state->src_y >> 4) & 1;
+ u32 subtile_y = (vc4_state->src_y >> 2) & 3;
+ u32 utile_y = vc4_state->src_y & 3;
+ u32 x_off = vc4_state->src_x & tile_w_mask;
+ u32 y_off = vc4_state->src_y & tile_h_mask;
tiling = SCALER_CTL0_TILING_256B_OR_T;
+ pitch0 = (VC4_SET_FIELD(x_off, SCALER_PITCH0_SINK_PIX) |
+ VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) |
+ VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) |
+ VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R));
+ vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift);
+ vc4_state->offsets[0] += subtile_y << 8;
+ vc4_state->offsets[0] += utile_y << 4;
+
+ /* Rows of tiles alternate left-to-right and right-to-left. */
+ if (tiles_t & 1) {
+ pitch0 |= SCALER_PITCH0_TILE_INITIAL_LINE_DIR;
+ vc4_state->offsets[0] += (tiles_w - tiles_l) <<
+ tile_size_shift;
+ vc4_state->offsets[0] -= (1 + !tile_y) << 10;
+ } else {
+ vc4_state->offsets[0] += tiles_l << tile_size_shift;
+ vc4_state->offsets[0] += tile_y << 10;
+ }
- pitch0 = (VC4_SET_FIELD(0, SCALER_PITCH0_TILE_Y_OFFSET) |
- VC4_SET_FIELD(0, SCALER_PITCH0_TILE_WIDTH_L) |
- VC4_SET_FIELD(tiles_w, SCALER_PITCH0_TILE_WIDTH_R));
break;
}
@@ -667,15 +723,18 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
}
+ vc4_state->lbm_offset = 0;
+
if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
- /* LBM Base Address. */
+ /* Reserve a slot for the LBM Base Address. The real value will
+ * be set when calling vc4_plane_allocate_lbm().
+ */
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
- vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
- vc4_dlist_write(vc4_state, vc4_state->lbm.start);
- }
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE)
+ vc4_state->lbm_offset = vc4_state->dlist_count++;
if (num_planes > 1) {
/* Emit Cb/Cr as channel 0 and Y as channel
@@ -721,6 +780,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
state->alpha != DRM_BLEND_ALPHA_OPAQUE;
+ /* Flag the dlist as initialized to avoid checking it twice in case
+ * the async update check already called vc4_plane_mode_set() and
+ * decided to fallback to sync update because async update was not
+ * possible.
+ */
+ vc4_state->dlist_initialized = 1;
+
return 0;
}
@@ -735,13 +801,18 @@ static int vc4_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ int ret;
vc4_state->dlist_count = 0;
- if (plane_enabled(state))
- return vc4_plane_mode_set(plane, state);
- else
+ if (!plane_enabled(state))
return 0;
+
+ ret = vc4_plane_mode_set(plane, state);
+ if (ret)
+ return ret;
+
+ return vc4_plane_allocate_lbm(state);
}
static void vc4_plane_atomic_update(struct drm_plane *plane,
@@ -807,28 +878,59 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
static void vc4_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
+ struct vc4_plane_state *vc4_state, *new_vc4_state;
- if (plane->state->fb != state->fb) {
- vc4_plane_async_set_fb(plane, state->fb);
- drm_atomic_set_fb_for_plane(plane->state, state->fb);
- }
-
- /* Set the cursor's position on the screen. This is the
- * expected change from the drm_mode_cursor_universal()
- * helper.
- */
+ drm_atomic_set_fb_for_plane(plane->state, state->fb);
plane->state->crtc_x = state->crtc_x;
plane->state->crtc_y = state->crtc_y;
-
- /* Allow changing the start position within the cursor BO, if
- * that matters.
- */
+ plane->state->crtc_w = state->crtc_w;
+ plane->state->crtc_h = state->crtc_h;
plane->state->src_x = state->src_x;
plane->state->src_y = state->src_y;
-
- /* Update the display list based on the new crtc_x/y. */
- vc4_plane_atomic_check(plane, plane->state);
+ plane->state->src_w = state->src_w;
+ plane->state->src_h = state->src_h;
+ plane->state->src_h = state->src_h;
+ plane->state->alpha = state->alpha;
+ plane->state->pixel_blend_mode = state->pixel_blend_mode;
+ plane->state->rotation = state->rotation;
+ plane->state->zpos = state->zpos;
+ plane->state->normalized_zpos = state->normalized_zpos;
+ plane->state->color_encoding = state->color_encoding;
+ plane->state->color_range = state->color_range;
+ plane->state->src = state->src;
+ plane->state->dst = state->dst;
+ plane->state->visible = state->visible;
+
+ new_vc4_state = to_vc4_plane_state(state);
+ vc4_state = to_vc4_plane_state(plane->state);
+
+ vc4_state->crtc_x = new_vc4_state->crtc_x;
+ vc4_state->crtc_y = new_vc4_state->crtc_y;
+ vc4_state->crtc_h = new_vc4_state->crtc_h;
+ vc4_state->crtc_w = new_vc4_state->crtc_w;
+ vc4_state->src_x = new_vc4_state->src_x;
+ vc4_state->src_y = new_vc4_state->src_y;
+ memcpy(vc4_state->src_w, new_vc4_state->src_w,
+ sizeof(vc4_state->src_w));
+ memcpy(vc4_state->src_h, new_vc4_state->src_h,
+ sizeof(vc4_state->src_h));
+ memcpy(vc4_state->x_scaling, new_vc4_state->x_scaling,
+ sizeof(vc4_state->x_scaling));
+ memcpy(vc4_state->y_scaling, new_vc4_state->y_scaling,
+ sizeof(vc4_state->y_scaling));
+ vc4_state->is_unity = new_vc4_state->is_unity;
+ vc4_state->is_yuv = new_vc4_state->is_yuv;
+ memcpy(vc4_state->offsets, new_vc4_state->offsets,
+ sizeof(vc4_state->offsets));
+ vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill;
+
+ /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
+ vc4_state->dlist[vc4_state->pos0_offset] =
+ new_vc4_state->dlist[vc4_state->pos0_offset];
+ vc4_state->dlist[vc4_state->pos2_offset] =
+ new_vc4_state->dlist[vc4_state->pos2_offset];
+ vc4_state->dlist[vc4_state->ptr0_offset] =
+ new_vc4_state->dlist[vc4_state->ptr0_offset];
/* Note that we can't just call vc4_plane_write_dlist()
* because that would smash the context data that the HVS is
@@ -845,13 +947,38 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
static int vc4_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- /* No configuring new scaling in the fast path. */
- if (plane->state->crtc_w != state->crtc_w ||
- plane->state->crtc_h != state->crtc_h ||
- plane->state->src_w != state->src_w ||
- plane->state->src_h != state->src_h)
+ struct vc4_plane_state *old_vc4_state, *new_vc4_state;
+ int ret;
+ u32 i;
+
+ ret = vc4_plane_mode_set(plane, state);
+ if (ret)
+ return ret;
+
+ old_vc4_state = to_vc4_plane_state(plane->state);
+ new_vc4_state = to_vc4_plane_state(state);
+ if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
+ old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
+ old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
+ old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset ||
+ vc4_lbm_size(plane->state) != vc4_lbm_size(state))
return -EINVAL;
+ /* Only pos0, pos2 and ptr0 DWORDS can be updated in an async update
+ * if anything else has changed, fallback to a sync update.
+ */
+ for (i = 0; i < new_vc4_state->dlist_count; i++) {
+ if (i == new_vc4_state->pos0_offset ||
+ i == new_vc4_state->pos2_offset ||
+ i == new_vc4_state->ptr0_offset ||
+ (new_vc4_state->lbm_offset &&
+ i == new_vc4_state->lbm_offset))
+ continue;
+
+ if (new_vc4_state->dlist[i] != old_vc4_state->dlist[i])
+ return -EINVAL;
+ }
+
return 0;
}
@@ -903,7 +1030,6 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
static void vc4_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
@@ -969,7 +1095,6 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
struct drm_plane *plane = NULL;
struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)];
- u32 num_formats = 0;
int ret = 0;
unsigned i;
static const uint64_t modifiers[] = {
@@ -986,20 +1111,13 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
if (!vc4_plane)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
- /* Don't allow YUV in cursor planes, since that means
- * tuning on the scaler, which we don't allow for the
- * cursor.
- */
- if (type != DRM_PLANE_TYPE_CURSOR ||
- hvs_formats[i].hvs < HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE) {
- formats[num_formats++] = hvs_formats[i].drm;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(hvs_formats); i++)
+ formats[i] = hvs_formats[i].drm;
+
plane = &vc4_plane->base;
ret = drm_universal_plane_init(dev, plane, 0,
&vc4_plane_funcs,
- formats, num_formats,
+ formats, ARRAY_SIZE(formats),
modifiers, type, NULL);
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index d6864fa4bd14..931088014272 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -1037,14 +1037,18 @@ enum hvs_pixel_format {
#define SCALER_TILE_HEIGHT_MASK VC4_MASK(15, 0)
#define SCALER_TILE_HEIGHT_SHIFT 0
+/* Common PITCH0 fields */
+#define SCALER_PITCH0_SINK_PIX_MASK VC4_MASK(31, 26)
+#define SCALER_PITCH0_SINK_PIX_SHIFT 26
+
/* PITCH0 fields for T-tiled. */
#define SCALER_PITCH0_TILE_WIDTH_L_MASK VC4_MASK(22, 16)
#define SCALER_PITCH0_TILE_WIDTH_L_SHIFT 16
#define SCALER_PITCH0_TILE_LINE_DIR BIT(15)
#define SCALER_PITCH0_TILE_INITIAL_LINE_DIR BIT(14)
/* Y offset within a tile. */
-#define SCALER_PITCH0_TILE_Y_OFFSET_MASK VC4_MASK(13, 7)
-#define SCALER_PITCH0_TILE_Y_OFFSET_SHIFT 7
+#define SCALER_PITCH0_TILE_Y_OFFSET_MASK VC4_MASK(13, 8)
+#define SCALER_PITCH0_TILE_Y_OFFSET_SHIFT 8
#define SCALER_PITCH0_TILE_WIDTH_R_MASK VC4_MASK(6, 0)
#define SCALER_PITCH0_TILE_WIDTH_R_SHIFT 0
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index ec6af8b920da..5930facd6d2d 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -431,7 +431,8 @@ static void vgem_release(struct drm_device *dev)
}
static struct drm_driver vgem_driver = {
- .driver_features = DRIVER_GEM | DRIVER_PRIME,
+ .driver_features = DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_RENDER,
.release = vgem_release,
.open = vgem_open,
.postclose = vgem_postclose,
@@ -471,31 +472,31 @@ static int __init vgem_init(void)
if (!vgem_device)
return -ENOMEM;
- ret = drm_dev_init(&vgem_device->drm, &vgem_driver, NULL);
- if (ret)
- goto out_free;
-
vgem_device->platform =
platform_device_register_simple("vgem", -1, NULL, 0);
if (IS_ERR(vgem_device->platform)) {
ret = PTR_ERR(vgem_device->platform);
- goto out_fini;
+ goto out_free;
}
dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
DMA_BIT_MASK(64));
+ ret = drm_dev_init(&vgem_device->drm, &vgem_driver,
+ &vgem_device->platform->dev);
+ if (ret)
+ goto out_unregister;
/* Final step: expose the device/driver to userspace */
ret = drm_dev_register(&vgem_device->drm, 0);
if (ret)
- goto out_unregister;
+ goto out_fini;
return 0;
-out_unregister:
- platform_device_unregister(vgem_device->platform);
out_fini:
drm_dev_fini(&vgem_device->drm);
+out_unregister:
+ platform_device_unregister(vgem_device->platform);
out_free:
kfree(vgem_device);
return ret;
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index e6ee71323a66..c1c420afe2dd 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -180,7 +180,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
reservation_object_lock(resv, NULL);
if (arg->flags & VGEM_FENCE_WRITE)
reservation_object_add_excl_fence(resv, fence);
- else if ((ret = reservation_object_reserve_shared(resv)) == 0)
+ else if ((ret = reservation_object_reserve_shared(resv, 1)) == 0)
reservation_object_add_shared_fence(resv, fence);
reservation_object_unlock(resv);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 8f8fed471e34..b5580b11a063 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -169,6 +169,12 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode = NULL;
int count, width, height;
+ if (output->edid) {
+ count = drm_add_edid_modes(connector, output->edid);
+ if (count)
+ return count;
+ }
+
width = le32_to_cpu(output->info.r.width);
height = le32_to_cpu(output->info.r.height);
count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
@@ -287,6 +293,8 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
+ if (vgdev->has_edid)
+ drm_connector_attach_edid_property(connector);
drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -378,6 +386,10 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
{
+ int i;
+
+ for (i = 0 ; i < vgdev->num_scanouts; ++i)
+ kfree(vgdev->outputs[i].edid);
virtio_gpu_fbdev_fini(vgdev);
drm_mode_config_cleanup(vgdev->ddev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 757ca28ab93e..0887e0b64b9c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -53,6 +53,37 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
0,
"virtiodrmfb");
+ /*
+ * Normally the drm_dev_set_unique() call is done by core DRM.
+ * The following comment covers, why virtio cannot rely on it.
+ *
+ * Unlike the other virtual GPU drivers, virtio abstracts the
+ * underlying bus type by using struct virtio_device.
+ *
+ * Hence the dev_is_pci() check, used in core DRM, will fail
+ * and the unique returned will be the virtio_device "virtio0",
+ * while a "pci:..." one is required.
+ *
+ * A few other ideas were considered:
+ * - Extend the dev_is_pci() check [in drm_set_busid] to
+ * consider virtio.
+ * Seems like a bigger hack than what we have already.
+ *
+ * - Point drm_device::dev to the parent of the virtio_device
+ * Semantic changes:
+ * * Using the wrong device for i2c, framebuffer_alloc and
+ * prime import.
+ * Visual changes:
+ * * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer,
+ * will print the wrong information.
+ *
+ * We could address the latter issues, by introducing
+ * drm_device::bus_dev, ... which would be used solely for this.
+ *
+ * So for the moment keep things as-is, with a bulky comment
+ * for the next person who feels like removing this
+ * drm_dev_set_unique() quirk.
+ */
snprintf(unique, sizeof(unique), "pci:%s", pname);
ret = drm_dev_set_unique(dev, unique);
if (ret)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index d9287c144fe5..f7f32a885af7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -80,6 +80,7 @@ static unsigned int features[] = {
*/
VIRTIO_GPU_F_VIRGL,
#endif
+ VIRTIO_GPU_F_EDID,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d29f0c7c768c..1deb41d42ea4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -47,8 +47,8 @@
#define DRIVER_DATE "0"
#define DRIVER_MAJOR 0
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 1
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
/* virtgpu_drm_bus.c */
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
@@ -65,6 +65,7 @@ struct virtio_gpu_object {
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
+ bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, gem_base)
@@ -114,6 +115,7 @@ struct virtio_gpu_output {
struct drm_encoder enc;
struct virtio_gpu_display_one info;
struct virtio_gpu_update_cursor cursor;
+ struct edid *edid;
int cur_x;
int cur_y;
bool enabled;
@@ -130,6 +132,7 @@ struct virtio_gpu_framebuffer {
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
uint32_t hw_res_handle;
+ struct virtio_gpu_fence *fence;
};
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
@@ -142,9 +145,6 @@ struct virtio_gpu_fbdev {
};
struct virtio_gpu_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
- bool mem_global_referenced;
struct ttm_bo_device bdev;
};
@@ -190,8 +190,7 @@ struct virtio_gpu_device {
struct kmem_cache *vbufs;
bool vqs_ready;
- struct idr resource_idr;
- spinlock_t resource_idr_lock;
+ struct ida resource_ida;
wait_queue_head_t resp_wq;
/* current display info */
@@ -200,10 +199,10 @@ struct virtio_gpu_device {
struct virtio_gpu_fence_driver fence_drv;
- struct idr ctx_id_idr;
- spinlock_t ctx_id_idr_lock;
+ struct ida ctx_id_ida;
bool has_virgl_3d;
+ bool has_edid;
struct work_struct config_changed_work;
@@ -259,11 +258,8 @@ int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb,
/* virtio vg */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
-void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
- uint32_t *resid);
-void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
+ struct virtio_gpu_object *bo,
uint32_t format,
uint32_t width,
uint32_t height);
@@ -274,7 +270,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
__le32 width, __le32 height,
__le32 x, __le32 y,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
@@ -285,8 +281,7 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t x, uint32_t y);
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- uint32_t resource_id,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
@@ -298,6 +293,7 @@ int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx);
int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
int idx, int version,
struct virtio_gpu_drv_cap_cache **cache_p);
+int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
uint32_t nlen, const char *name);
void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
@@ -310,22 +306,22 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence **fence);
+ uint32_t ctx_id, struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_resource_create_3d *rc_3d,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_resource_create_3d *rc_3d);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_fence_ack(struct virtqueue *vq);
@@ -353,9 +349,12 @@ void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
/* virtio_gpu_fence.c */
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(
+ struct virtio_gpu_device *vgdev);
+void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence);
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index cea749f4ec39..fb1cc8b2f119 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -214,7 +214,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct virtio_gpu_object *obj;
- uint32_t resid, format, size;
+ uint32_t format, size;
int ret;
mode_cmd.width = sizes->surface_width;
@@ -231,8 +231,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
if (IS_ERR(obj))
return PTR_ERR(obj);
- virtio_gpu_resource_id_get(vgdev, &resid);
- virtio_gpu_cmd_create_resource(vgdev, resid, format,
+ virtio_gpu_cmd_create_resource(vgdev, obj, format,
mode_cmd.width, mode_cmd.height);
ret = virtio_gpu_object_kmap(obj);
@@ -242,7 +241,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
}
/* attach the object to the resource */
- ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
+ ret = virtio_gpu_object_attach(vgdev, obj, NULL);
if (ret)
goto err_obj_attach;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 00c742a441bf..4d6826b27814 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -67,28 +67,43 @@ static const struct dma_fence_ops virtio_fence_ops = {
.timeline_value_str = virtio_timeline_value_str,
};
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
+{
+ struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
+ struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
+ GFP_ATOMIC);
+ if (!fence)
+ return fence;
+
+ fence->drv = drv;
+ dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
+
+ return fence;
+}
+
+void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence)
+{
+ if (!fence)
+ return;
+
+ dma_fence_put(&fence->f);
+}
+
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
unsigned long irq_flags;
- *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
- if ((*fence) == NULL)
- return -ENOMEM;
-
spin_lock_irqsave(&drv->lock, irq_flags);
- (*fence)->drv = drv;
- (*fence)->seq = ++drv->sync_seq;
- dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
- drv->context, (*fence)->seq);
- dma_fence_get(&(*fence)->f);
- list_add_tail(&(*fence)->node, &drv->fences);
+ fence->seq = ++drv->sync_seq;
+ dma_fence_get(&fence->f);
+ list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
- cmd_hdr->fence_id = cpu_to_le64((*fence)->seq);
+ cmd_hdr->fence_id = cpu_to_le64(fence->seq);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 82c817f37cf7..f06586393974 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -87,7 +87,6 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct virtio_gpu_object *obj;
int ret;
uint32_t pitch;
- uint32_t resid;
uint32_t format;
if (args->bpp != 32)
@@ -103,13 +102,12 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
goto fail;
format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
- virtio_gpu_resource_id_get(vgdev, &resid);
- virtio_gpu_cmd_create_resource(vgdev, resid, format,
+ obj = gem_to_virtio_gpu_obj(gobj);
+ virtio_gpu_cmd_create_resource(vgdev, obj, format,
args->width, args->height);
/* attach the object to the resource */
- obj = gem_to_virtio_gpu_obj(gobj);
- ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
+ ret = virtio_gpu_object_attach(vgdev, obj, NULL);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index f16b875d6a46..161b80fee492 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include <drm/virtgpu_drm.h>
#include <drm/ttm/ttm_execbuf_util.h>
+#include <linux/sync_file.h>
#include "virtgpu_drv.h"
@@ -105,7 +106,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
struct drm_gem_object *gobj;
- struct virtio_gpu_fence *fence;
+ struct virtio_gpu_fence *out_fence;
struct virtio_gpu_object *qobj;
int ret;
uint32_t *bo_handles = NULL;
@@ -114,11 +115,46 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct ttm_validate_buffer *buflist = NULL;
int i;
struct ww_acquire_ctx ticket;
+ struct sync_file *sync_file;
+ int in_fence_fd = exbuf->fence_fd;
+ int out_fence_fd = -1;
void *buf;
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
+ if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
+ return -EINVAL;
+
+ exbuf->fence_fd = -1;
+
+ if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
+ struct dma_fence *in_fence;
+
+ in_fence = sync_file_get_fence(in_fence_fd);
+
+ if (!in_fence)
+ return -EINVAL;
+
+ /*
+ * Wait if the fence is from a foreign context, or if the fence
+ * array contains any fence from a foreign context.
+ */
+ ret = 0;
+ if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
+ ret = dma_fence_wait(in_fence, true);
+
+ dma_fence_put(in_fence);
+ if (ret)
+ return ret;
+ }
+
+ if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0)
+ return out_fence_fd;
+ }
+
INIT_LIST_HEAD(&validate_list);
if (exbuf->num_bo_handles) {
@@ -128,26 +164,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
sizeof(struct ttm_validate_buffer),
GFP_KERNEL | __GFP_ZERO);
if (!bo_handles || !buflist) {
- kvfree(bo_handles);
- kvfree(buflist);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_unused_fd;
}
user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
if (copy_from_user(bo_handles, user_bo_handles,
exbuf->num_bo_handles * sizeof(uint32_t))) {
ret = -EFAULT;
- kvfree(bo_handles);
- kvfree(buflist);
- return ret;
+ goto out_unused_fd;
}
for (i = 0; i < exbuf->num_bo_handles; i++) {
gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
if (!gobj) {
- kvfree(bo_handles);
- kvfree(buflist);
- return -ENOENT;
+ ret = -ENOENT;
+ goto out_unused_fd;
}
qobj = gem_to_virtio_gpu_obj(gobj);
@@ -156,6 +188,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
list_add(&buflist[i].head, &validate_list);
}
kvfree(bo_handles);
+ bo_handles = NULL;
}
ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
@@ -168,22 +201,48 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
ret = PTR_ERR(buf);
goto out_unresv;
}
+
+ out_fence = virtio_gpu_fence_alloc(vgdev);
+ if(!out_fence) {
+ ret = -ENOMEM;
+ goto out_memdup;
+ }
+
+ if (out_fence_fd >= 0) {
+ sync_file = sync_file_create(&out_fence->f);
+ if (!sync_file) {
+ dma_fence_put(&out_fence->f);
+ ret = -ENOMEM;
+ goto out_memdup;
+ }
+
+ exbuf->fence_fd = out_fence_fd;
+ fd_install(out_fence_fd, sync_file->file);
+ }
+
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
- vfpriv->ctx_id, &fence);
+ vfpriv->ctx_id, out_fence);
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
+ ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
/* fence the command bo */
virtio_gpu_unref_list(&validate_list);
kvfree(buflist);
- dma_fence_put(&fence->f);
return 0;
+out_memdup:
+ kfree(buf);
out_unresv:
ttm_eu_backoff_reservation(&ticket, &validate_list);
out_free:
virtio_gpu_unref_list(&validate_list);
+out_unused_fd:
+ kvfree(bo_handles);
kvfree(buflist);
+
+ if (out_fence_fd >= 0)
+ put_unused_fd(out_fence_fd);
+
return ret;
}
@@ -217,7 +276,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_resource_create *rc = data;
int ret;
- uint32_t res_id;
struct virtio_gpu_object *qobj;
struct drm_gem_object *obj;
uint32_t handle = 0;
@@ -244,8 +302,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
INIT_LIST_HEAD(&validate_list);
memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
- virtio_gpu_resource_id_get(vgdev, &res_id);
-
size = rc->size;
/* allocate a single page size object */
@@ -253,17 +309,15 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
size = PAGE_SIZE;
qobj = virtio_gpu_alloc_object(dev, size, false, false);
- if (IS_ERR(qobj)) {
- ret = PTR_ERR(qobj);
- goto fail_id;
- }
+ if (IS_ERR(qobj))
+ return PTR_ERR(qobj);
obj = &qobj->gem_base;
if (!vgdev->has_virgl_3d) {
- virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
+ virtio_gpu_cmd_create_resource(vgdev, qobj, rc->format,
rc->width, rc->height);
- ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
+ ret = virtio_gpu_object_attach(vgdev, qobj, NULL);
} else {
/* use a gem reference since unref list undoes them */
drm_gem_object_get(&qobj->gem_base);
@@ -276,7 +330,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
goto fail_unref;
}
- rc_3d.resource_id = cpu_to_le32(res_id);
+ rc_3d.resource_id = cpu_to_le32(qobj->hw_res_handle);
rc_3d.target = cpu_to_le32(rc->target);
rc_3d.format = cpu_to_le32(rc->format);
rc_3d.bind = cpu_to_le32(rc->bind);
@@ -288,17 +342,21 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
rc_3d.flags = cpu_to_le32(rc->flags);
- virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
- ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
+ fence = virtio_gpu_fence_alloc(vgdev);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto fail_backoff;
+ }
+
+ virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d);
+ ret = virtio_gpu_object_attach(vgdev, qobj, fence);
if (ret) {
- ttm_eu_backoff_reservation(&ticket, &validate_list);
- goto fail_unref;
+ virtio_gpu_fence_cleanup(fence);
+ goto fail_backoff;
}
ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
}
- qobj->hw_res_handle = res_id;
-
ret = drm_gem_handle_create(file_priv, obj, &handle);
if (ret) {
@@ -311,7 +369,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
}
drm_gem_object_put_unlocked(obj);
- rc->res_handle = res_id; /* similiar to a VM address */
+ rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
rc->bo_handle = handle;
if (vgdev->has_virgl_3d) {
@@ -319,6 +377,8 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
dma_fence_put(&fence->f);
}
return 0;
+fail_backoff:
+ ttm_eu_backoff_reservation(&ticket, &validate_list);
fail_unref:
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
@@ -326,8 +386,6 @@ fail_unref:
}
//fail_obj:
// drm_gem_object_handle_unreference_unlocked(obj);
-fail_id:
- virtio_gpu_resource_id_put(vgdev, res_id);
return ret;
}
@@ -383,10 +441,16 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
goto out_unres;
convert_to_hw_box(&box, &args->box);
+
+ fence = virtio_gpu_fence_alloc(vgdev);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto out_unres;
+ }
virtio_gpu_cmd_transfer_from_host_3d
(vgdev, qobj->hw_res_handle,
vfpriv->ctx_id, offset, args->level,
- &box, &fence);
+ &box, fence);
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
@@ -432,10 +496,15 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
(vgdev, qobj, offset,
box.w, box.h, box.x, box.y, NULL);
} else {
+ fence = virtio_gpu_fence_alloc(vgdev);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto out_unres;
+ }
virtio_gpu_cmd_transfer_to_host_3d
(vgdev, qobj,
vfpriv ? vfpriv->ctx_id : 0, offset,
- args->level, &box, &fence);
+ args->level, &box, fence);
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
dma_fence_put(&fence->f);
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 65060c08522d..3af6181c05a8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -44,6 +44,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
virtio_cread(vgdev->vdev, struct virtio_gpu_config,
events_read, &events_read);
if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
drm_helper_hpd_irq_event(vgdev->ddev);
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
@@ -52,39 +54,23 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
events_clear, &events_clear);
}
-static void virtio_gpu_ctx_id_get(struct virtio_gpu_device *vgdev,
- uint32_t *resid)
+static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
+ uint32_t nlen, const char *name)
{
- int handle;
-
- idr_preload(GFP_KERNEL);
- spin_lock(&vgdev->ctx_id_idr_lock);
- handle = idr_alloc(&vgdev->ctx_id_idr, NULL, 1, 0, 0);
- spin_unlock(&vgdev->ctx_id_idr_lock);
- idr_preload_end();
- *resid = handle;
-}
+ int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
-static void virtio_gpu_ctx_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
-{
- spin_lock(&vgdev->ctx_id_idr_lock);
- idr_remove(&vgdev->ctx_id_idr, id);
- spin_unlock(&vgdev->ctx_id_idr_lock);
-}
-
-static void virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
- uint32_t nlen, const char *name,
- uint32_t *ctx_id)
-{
- virtio_gpu_ctx_id_get(vgdev, ctx_id);
- virtio_gpu_cmd_context_create(vgdev, *ctx_id, nlen, name);
+ if (handle < 0)
+ return handle;
+ handle += 1;
+ virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
+ return handle;
}
static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t ctx_id)
{
virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
- virtio_gpu_ctx_id_put(vgdev, ctx_id);
+ ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
}
static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
@@ -151,10 +137,8 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
vgdev->dev = dev->dev;
spin_lock_init(&vgdev->display_info_lock);
- spin_lock_init(&vgdev->ctx_id_idr_lock);
- idr_init(&vgdev->ctx_id_idr);
- spin_lock_init(&vgdev->resource_idr_lock);
- idr_init(&vgdev->resource_idr);
+ ida_init(&vgdev->ctx_id_ida);
+ ida_init(&vgdev->resource_ida);
init_waitqueue_head(&vgdev->resp_wq);
virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
@@ -174,6 +158,10 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
#else
DRM_INFO("virgl 3d acceleration not supported by guest\n");
#endif
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
+ vgdev->has_edid = true;
+ DRM_INFO("EDID support available.\n");
+ }
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
@@ -219,6 +207,8 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
if (num_capsets)
virtio_gpu_get_capsets(vgdev, num_capsets);
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
5 * HZ);
@@ -271,7 +261,7 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv;
- uint32_t id;
+ int id;
char dbgname[TASK_COMM_LEN];
/* can't create contexts without 3d renderer */
@@ -284,7 +274,11 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
return -ENOMEM;
get_task_comm(dbgname, current);
- virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname, &id);
+ id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname);
+ if (id < 0) {
+ kfree(vfpriv);
+ return id;
+ }
vfpriv->ctx_id = id;
file->driver_priv = vfpriv;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index eca765537470..f39a183d59c2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -25,6 +25,23 @@
#include "virtgpu_drv.h"
+static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
+ uint32_t *resid)
+{
+ int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+
+ if (handle < 0)
+ return handle;
+
+ *resid = handle + 1;
+ return 0;
+}
+
+static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
+{
+ ida_free(&vgdev->resource_ida, id - 1);
+}
+
static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct virtio_gpu_object *bo;
@@ -33,13 +50,14 @@ static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct virtio_gpu_object, tbo);
vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
- if (bo->hw_res_handle)
+ if (bo->created)
virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
if (bo->pages)
virtio_gpu_object_free_sg_table(bo);
if (bo->vmap)
virtio_gpu_object_kunmap(bo);
drm_gem_object_release(&bo->gem_base);
+ virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
kfree(bo);
}
@@ -81,9 +99,15 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
+ ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
+ if (ret < 0) {
+ kfree(bo);
+ return ret;
+ }
size = roundup(size, PAGE_SIZE);
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
if (ret != 0) {
+ virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
kfree(bo);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a9f4ae7d4483..ead5c53d4e21 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -137,6 +137,41 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_h >> 16);
}
+static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_object *bo;
+
+ if (!new_state->fb)
+ return 0;
+
+ vgfb = to_virtio_gpu_framebuffer(new_state->fb);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+ if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
+ vgfb->fence = virtio_gpu_fence_alloc(vgdev);
+ if (!vgfb->fence)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct virtio_gpu_framebuffer *vgfb;
+
+ if (!plane->state->fb)
+ return;
+
+ vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ if (vgfb->fence)
+ virtio_gpu_fence_cleanup(vgfb->fence);
+}
+
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -144,7 +179,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
- struct virtio_gpu_fence *fence = NULL;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
int ret = 0;
@@ -170,13 +204,13 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
(vgdev, bo, 0,
cpu_to_le32(plane->state->crtc_w),
cpu_to_le32(plane->state->crtc_h),
- 0, 0, &fence);
+ 0, 0, vgfb->fence);
ret = virtio_gpu_object_reserve(bo, false);
if (!ret) {
reservation_object_add_excl_fence(bo->tbo.resv,
- &fence->f);
- dma_fence_put(&fence->f);
- fence = NULL;
+ &vgfb->fence->f);
+ dma_fence_put(&vgfb->fence->f);
+ vgfb->fence = NULL;
virtio_gpu_object_unreserve(bo);
virtio_gpu_object_wait(bo, false);
}
@@ -218,6 +252,8 @@ static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
};
static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
+ .prepare_fb = virtio_gpu_cursor_prepare_fb,
+ .cleanup_fb = virtio_gpu_cursor_cleanup_fb,
.atomic_check = virtio_gpu_plane_atomic_check,
.atomic_update = virtio_gpu_cursor_plane_update,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index e3152d45c5f1..4bfbf25fabff 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -50,62 +50,6 @@ virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
return vgdev;
}
-static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- vgdev->mman.mem_global_referenced = false;
- global_ref = &vgdev->mman.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &virtio_gpu_ttm_mem_global_init;
- global_ref->release = &virtio_gpu_ttm_mem_global_release;
-
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- vgdev->mman.bo_global_ref.mem_glob =
- vgdev->mman.mem_global_ref.object;
- global_ref = &vgdev->mman.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&vgdev->mman.mem_global_ref);
- return r;
- }
-
- vgdev->mman.mem_global_referenced = true;
- return 0;
-}
-
-static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
-{
- if (vgdev->mman.mem_global_referenced) {
- drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
- drm_global_item_unref(&vgdev->mman.mem_global_ref);
- vgdev->mman.mem_global_referenced = false;
- }
-}
-
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
@@ -347,8 +291,7 @@ static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
} else if (new_mem->placement & TTM_PL_FLAG_TT) {
if (bo->hw_res_handle) {
- virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle,
- NULL);
+ virtio_gpu_object_attach(vgdev, bo, NULL);
}
}
}
@@ -383,12 +326,8 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
{
int r;
- r = virtio_gpu_ttm_global_init(vgdev);
- if (r)
- return r;
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&vgdev->mman.bdev,
- vgdev->mman.bo_global_ref.ref.object,
&virtio_gpu_bo_driver,
vgdev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, 0);
@@ -407,13 +346,11 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
err_mm_init:
ttm_bo_device_release(&vgdev->mman.bdev);
err_dev_init:
- virtio_gpu_ttm_global_fini(vgdev);
return r;
}
void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
{
ttm_bo_device_release(&vgdev->mman.bdev);
- virtio_gpu_ttm_global_fini(vgdev);
DRM_INFO("virtio_gpu: ttm finalized\n");
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 4e2e037aed34..e27c4aedb809 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -38,26 +38,6 @@
+ MAX_INLINE_CMD_SIZE \
+ MAX_INLINE_RESP_SIZE)
-void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
- uint32_t *resid)
-{
- int handle;
-
- idr_preload(GFP_KERNEL);
- spin_lock(&vgdev->resource_idr_lock);
- handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
- spin_unlock(&vgdev->resource_idr_lock);
- idr_preload_end();
- *resid = handle;
-}
-
-void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
-{
- spin_lock(&vgdev->resource_idr_lock);
- idr_remove(&vgdev->resource_idr, id);
- spin_unlock(&vgdev->resource_idr_lock);
-}
-
void virtio_gpu_ctrl_ack(struct virtqueue *vq)
{
struct drm_device *dev = vq->vdev->priv;
@@ -98,10 +78,9 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_vbuffer *vbuf;
- vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
if (!vbuf)
return ERR_PTR(-ENOMEM);
- memset(vbuf, 0, VBUFFER_SIZE);
BUG_ON(size > MAX_INLINE_CMD_SIZE);
vbuf->buf = (void *)vbuf + sizeof(*vbuf);
@@ -319,7 +298,7 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_ctrl_hdr *hdr,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
int rc;
@@ -388,7 +367,7 @@ retry:
/* create a basic resource */
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
+ struct virtio_gpu_object *bo,
uint32_t format,
uint32_t width,
uint32_t height)
@@ -400,12 +379,13 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->format = cpu_to_le32(format);
cmd_p->width = cpu_to_le32(width);
cmd_p->height = cpu_to_le32(height);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ bo->created = true;
}
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
@@ -425,7 +405,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_detach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -487,7 +467,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
__le32 width, __le32 height,
__le32 x, __le32 y,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -517,7 +497,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
struct virtio_gpu_mem_entry *ents,
uint32_t nents,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_attach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -604,6 +584,45 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
wake_up(&vgdev->resp_wq);
}
+static int virtio_get_edid_block(void *data, u8 *buf,
+ unsigned int block, size_t len)
+{
+ struct virtio_gpu_resp_edid *resp = data;
+ size_t start = block * EDID_LENGTH;
+
+ if (start + len > le32_to_cpu(resp->size))
+ return -1;
+ memcpy(buf, resp->edid + start, len);
+ return 0;
+}
+
+static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_cmd_get_edid *cmd =
+ (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
+ struct virtio_gpu_resp_edid *resp =
+ (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
+ uint32_t scanout = le32_to_cpu(cmd->scanout);
+ struct virtio_gpu_output *output;
+ struct edid *new_edid, *old_edid;
+
+ if (scanout >= vgdev->num_scanouts)
+ return;
+ output = vgdev->outputs + scanout;
+
+ new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
+
+ spin_lock(&vgdev->display_info_lock);
+ old_edid = output->edid;
+ output->edid = new_edid;
+ drm_connector_update_edid_property(&output->conn, output->edid);
+ spin_unlock(&vgdev->display_info_lock);
+
+ kfree(old_edid);
+ wake_up(&vgdev->resp_wq);
+}
+
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_ctrl_hdr *cmd_p;
@@ -706,6 +725,34 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
return 0;
}
+int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
+{
+ struct virtio_gpu_cmd_get_edid *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ void *resp_buf;
+ int scanout;
+
+ if (WARN_ON(!vgdev->has_edid))
+ return -EINVAL;
+
+ for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
+ resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
+ GFP_KERNEL);
+ if (!resp_buf)
+ return -ENOMEM;
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
+ sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
+ resp_buf);
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
+ cmd_p->scanout = cpu_to_le32(scanout);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ }
+
+ return 0;
+}
+
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
uint32_t nlen, const char *name)
{
@@ -772,8 +819,8 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_resource_create_3d *rc_3d,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_resource_create_3d *rc_3d)
{
struct virtio_gpu_resource_create_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -785,7 +832,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
cmd_p->hdr.flags = 0;
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ bo->created = true;
}
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
@@ -793,7 +841,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -821,7 +869,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -841,7 +889,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence **fence)
+ uint32_t ctx_id, struct virtio_gpu_fence *fence)
{
struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -861,14 +909,16 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- uint32_t resource_id,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
struct scatterlist *sg;
int si, nents;
+ if (!obj->created)
+ return 0;
+
if (!obj->pages) {
int ret;
@@ -902,10 +952,9 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
ents[si].padding = 0;
}
- virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
+ virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents,
fence);
- obj->hw_res_handle = resource_id;
return 0;
}
@@ -913,11 +962,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj)
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
- struct virtio_gpu_fence *fence;
if (use_dma_api && obj->mapped) {
+ struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
/* detach backing and wait for the host process it ... */
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
+ virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
dma_fence_wait(&fence->f, true);
dma_fence_put(&fence->f);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 07cfde1b4132..83087877565c 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -68,7 +68,6 @@ static struct drm_driver vkms_driver = {
.release = vkms_release,
.fops = &vkms_driver_fops,
.dumb_create = vkms_dumb_create,
- .dumb_map_offset = vkms_dumb_map,
.gem_vm_ops = &vkms_gem_vm_ops,
.gem_free_object_unlocked = vkms_gem_free_object,
.get_vblank_timestamp = vkms_get_vblank_timestamp,
@@ -108,17 +107,18 @@ static int __init vkms_init(void)
if (!vkms_device)
return -ENOMEM;
- ret = drm_dev_init(&vkms_device->drm, &vkms_driver, NULL);
- if (ret)
- goto out_free;
-
vkms_device->platform =
platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
if (IS_ERR(vkms_device->platform)) {
ret = PTR_ERR(vkms_device->platform);
- goto out_fini;
+ goto out_free;
}
+ ret = drm_dev_init(&vkms_device->drm, &vkms_driver,
+ &vkms_device->platform->dev);
+ if (ret)
+ goto out_unregister;
+
vkms_device->drm.irq_enabled = true;
ret = drm_vblank_init(&vkms_device->drm, 1);
@@ -129,20 +129,20 @@ static int __init vkms_init(void)
ret = vkms_modeset_init(vkms_device);
if (ret)
- goto out_unregister;
+ goto out_fini;
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
- goto out_unregister;
+ goto out_fini;
return 0;
-out_unregister:
- platform_device_unregister(vkms_device->platform);
-
out_fini:
drm_dev_fini(&vkms_device->drm);
+out_unregister:
+ platform_device_unregister(vkms_device->platform);
+
out_free:
kfree(vkms_device);
return ret;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 1c93990693e3..e4469cd3d254 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -127,9 +127,6 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
- u32 handle, u64 *offset);
-
void vkms_gem_free_object(struct drm_gem_object *obj);
int vkms_gem_vmap(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index d04e988b4cbe..80311daed47a 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -153,32 +153,6 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
return 0;
}
-int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
- u32 handle, u64 *offset)
-{
- struct drm_gem_object *obj;
- int ret;
-
- obj = drm_gem_object_lookup(file, handle);
- if (!obj)
- return -ENOENT;
-
- if (!obj->filp) {
- ret = -EINVAL;
- goto unref;
- }
-
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto unref;
-
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
-unref:
- drm_gem_object_put_unlocked(obj);
-
- return ret;
-}
-
static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
{
struct drm_gem_object *gem_obj = &vkms_obj->gem;
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 7041007396ae..418817600ad1 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -23,8 +23,11 @@ vkms_plane_duplicate_state(struct drm_plane *plane)
return NULL;
crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL);
- if (WARN_ON(!crc_data))
- DRM_INFO("Couldn't allocate crc_data");
+ if (!crc_data) {
+ DRM_DEBUG_KMS("Couldn't allocate crc_data\n");
+ kfree(vkms_state);
+ return NULL;
+ }
vkms_state->crc_data = crc_data;
@@ -138,14 +141,12 @@ static int vkms_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_gem_object *gem_obj;
- struct vkms_gem_object *vkms_obj;
int ret;
if (!state->fb)
return 0;
gem_obj = drm_gem_fb_get_obj(state->fb, 0);
- vkms_obj = drm_gem_to_vkms_gem(gem_obj);
ret = vkms_gem_vmap(gem_obj);
if (ret)
DRM_ERROR("vmap failed: %d\n", ret);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 61a84b958d67..9fd8b4e75a8c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -665,7 +665,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
- mutex_init(&dev_priv->requested_layout_mutex);
mutex_init(&dev_priv->global_kms_state_mutex);
ttm_lock_init(&dev_priv->reservation_sem);
spin_lock_init(&dev_priv->resource_lock);
@@ -801,11 +800,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
dev_priv->mmio_start, dev_priv->mmio_size / 1024);
- ret = vmw_ttm_global_init(dev_priv);
- if (unlikely(ret != 0))
- goto out_err0;
-
-
vmw_master_init(&dev_priv->fbdev_master);
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -816,7 +810,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (unlikely(dev_priv->mmio_virt == NULL)) {
ret = -ENOMEM;
DRM_ERROR("Failed mapping MMIO.\n");
- goto out_err3;
+ goto out_err0;
}
/* Need mmio memory to check for fifo pitchlock cap. */
@@ -828,8 +822,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_err4;
}
- dev_priv->tdev = ttm_object_device_init
- (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
+ dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
+ &vmw_prime_dmabuf_ops);
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
@@ -870,7 +864,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
ret = ttm_bo_device_init(&dev_priv->bdev,
- dev_priv->bo_global_ref.ref.object,
&vmw_bo_driver,
dev->anon_inode->i_mapping,
VMWGFX_FILE_PAGE_OFFSET,
@@ -992,8 +985,6 @@ out_no_device:
ttm_object_device_release(&dev_priv->tdev);
out_err4:
memunmap(dev_priv->mmio_virt);
-out_err3:
- vmw_ttm_global_release(dev_priv);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
@@ -1045,7 +1036,6 @@ static void vmw_driver_unload(struct drm_device *dev)
memunmap(dev_priv->mmio_virt);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
- vmw_ttm_global_release(dev_priv);
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 59f614225bcd..d7f6cb9331de 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -417,8 +417,6 @@ enum {
struct vmw_private {
struct ttm_bo_device bdev;
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
struct vmw_fifo_state fifo;
@@ -468,15 +466,6 @@ struct vmw_private {
uint32_t num_displays;
/*
- * Currently requested_layout_mutex is used to protect the gui
- * positionig state in display unit. With that use case currently this
- * mutex is only taken during layout ioctl and atomic check_modeset.
- * Other display unit state can be protected with this mutex but that
- * needs careful consideration.
- */
- struct mutex requested_layout_mutex;
-
- /*
* Framebuffer info.
*/
@@ -486,8 +475,6 @@ struct vmw_private {
struct vmw_overlay *overlay_priv;
struct drm_property *hotplug_mode_update_property;
struct drm_property *implicit_placement_property;
- unsigned num_implicit;
- struct vmw_framebuffer *implicit_fb;
struct mutex global_kms_state_mutex;
spinlock_t cursor_lock;
struct drm_atomic_state *suspend_state;
@@ -842,8 +829,6 @@ extern int vmw_fifo_flush(struct vmw_private *dev_priv,
* TTM glue - vmwgfx_ttm_glue.c
*/
-extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
-extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
/**
@@ -1363,7 +1348,7 @@ vmw_bo_reference(struct vmw_buffer_object *buf)
static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
{
- return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
+ return &ttm_mem_glob;
}
static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5a6b70ba137a..260650bb5560 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1738,7 +1738,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
void *buf)
{
struct vmw_buffer_object *vmw_bo;
- int ret;
struct {
uint32_t header;
@@ -1748,7 +1747,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.ptr,
&vmw_bo);
- return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index f87261545f2c..301260e23e52 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -906,13 +906,10 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
container_of(action, struct vmw_event_fence_action, action);
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
- struct drm_file *file_priv;
-
if (unlikely(event == NULL))
return;
- file_priv = event->file_priv;
spin_lock_irq(&dev->event_lock);
if (likely(eaction->tv_sec != NULL)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index dca04d4246ea..b351fb5214d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_rect.h>
+#include <drm/drm_damage_helper.h>
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
@@ -456,21 +457,8 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc *crtc = state->crtc;
struct vmw_connector_state *vcs;
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
vcs = vmw_connector_state_to_vcs(du->connector.state);
-
- /* Only one active implicit framebuffer at a time. */
- mutex_lock(&dev_priv->global_kms_state_mutex);
- if (vcs->is_implicit && dev_priv->implicit_fb &&
- !(dev_priv->num_implicit == 1 && du->active_implicit)
- && dev_priv->implicit_fb != vfb) {
- DRM_ERROR("Multiple implicit framebuffers "
- "not supported.\n");
- ret = -EINVAL;
- }
- mutex_unlock(&dev_priv->global_kms_state_mutex);
}
@@ -493,24 +481,24 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
int ret = 0;
+ struct drm_crtc_state *crtc_state = NULL;
struct vmw_surface *surface = NULL;
struct drm_framebuffer *fb = new_state->fb;
- struct drm_rect src = drm_plane_state_src(new_state);
- struct drm_rect dest = drm_plane_state_dest(new_state);
+ if (new_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
- /* Turning off */
- if (!fb)
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ if (ret)
return ret;
- ret = drm_plane_helper_check_update(plane, new_state->crtc, fb,
- &src, &dest,
- DRM_MODE_ROTATE_0,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true, &new_state->visible);
- if (!ret)
- return ret;
+ /* Turning off */
+ if (!fb)
+ return 0;
/* A lot of the code assumes this */
if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
@@ -846,58 +834,6 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
kfree(vfbs);
}
-static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
- struct drm_file *file_priv,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
-{
- struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- struct vmw_framebuffer_surface *vfbs =
- vmw_framebuffer_to_vfbs(framebuffer);
- struct drm_clip_rect norect;
- int ret, inc = 1;
-
- /* Legacy Display Unit does not support 3D */
- if (dev_priv->active_display_unit == vmw_du_legacy)
- return -EINVAL;
-
- drm_modeset_lock_all(dev_priv->dev);
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0)) {
- drm_modeset_unlock_all(dev_priv->dev);
- return ret;
- }
-
- if (!num_clips) {
- num_clips = 1;
- clips = &norect;
- norect.x1 = norect.y1 = 0;
- norect.x2 = framebuffer->width;
- norect.y2 = framebuffer->height;
- } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
- num_clips /= 2;
- inc = 2; /* skip source rects */
- }
-
- if (dev_priv->active_display_unit == vmw_du_screen_object)
- ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
- clips, NULL, NULL, 0, 0,
- num_clips, inc, NULL, NULL);
- else
- ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
- clips, NULL, NULL, 0, 0,
- num_clips, inc, NULL, NULL);
-
- vmw_fifo_flush(dev_priv, false);
- ttm_read_unlock(&dev_priv->reservation_sem);
-
- drm_modeset_unlock_all(dev_priv->dev);
-
- return 0;
-}
-
/**
* vmw_kms_readback - Perform a readback from the screen system to
* a buffer-object backed framebuffer.
@@ -941,7 +877,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
- .dirty = vmw_framebuffer_surface_dirty,
+ .dirty = drm_atomic_helper_dirtyfb,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
@@ -1084,16 +1020,6 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
}
switch (dev_priv->active_display_unit) {
- case vmw_du_screen_target:
- ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
- clips, NULL, num_clips, increment,
- true, true, NULL);
- break;
- case vmw_du_screen_object:
- ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base,
- clips, NULL, num_clips,
- increment, true, NULL, NULL);
- break;
case vmw_du_legacy:
ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
clips, num_clips, increment);
@@ -1112,9 +1038,25 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
return ret;
}
+static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+
+ if (dev_priv->active_display_unit == vmw_du_legacy)
+ return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
+ color, clips, num_clips);
+
+ return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
+ clips, num_clips);
+}
+
static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
.destroy = vmw_framebuffer_bo_destroy,
- .dirty = vmw_framebuffer_bo_dirty,
+ .dirty = vmw_framebuffer_bo_dirty_ext,
};
/**
@@ -1565,6 +1507,88 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
}
/**
+ * vmw_crtc_state_and_lock - Return new or current crtc state with locked
+ * crtc mutex
+ * @state: The atomic state pointer containing the new atomic state
+ * @crtc: The crtc
+ *
+ * This function returns the new crtc state if it's part of the state update.
+ * Otherwise returns the current crtc state. It also makes sure that the
+ * crtc mutex is locked.
+ *
+ * Returns: A valid crtc state pointer or NULL. It may also return a
+ * pointer error, in particular -EDEADLK if locking needs to be rerun.
+ */
+static struct drm_crtc_state *
+vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (crtc_state) {
+ lockdep_assert_held(&crtc->mutex.mutex.base);
+ } else {
+ int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
+
+ if (ret != 0 && ret != -EALREADY)
+ return ERR_PTR(ret);
+
+ crtc_state = crtc->state;
+ }
+
+ return crtc_state;
+}
+
+/**
+ * vmw_kms_check_implicit - Verify that all implicit display units scan out
+ * from the same fb after the new state is committed.
+ * @dev: The drm_device.
+ * @state: The new state to be checked.
+ *
+ * Returns:
+ * Zero on success,
+ * -EINVAL on invalid state,
+ * -EDEADLK if modeset locking needs to be rerun.
+ */
+static int vmw_kms_check_implicit(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_framebuffer *implicit_fb = NULL;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+
+ drm_for_each_crtc(crtc, dev) {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ if (!du->is_implicit)
+ continue;
+
+ crtc_state = vmw_crtc_state_and_lock(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!crtc_state || !crtc_state->enable)
+ continue;
+
+ /*
+ * Can't move primary planes across crtcs, so this is OK.
+ * It also means we don't need to take the plane mutex.
+ */
+ plane_state = du->primary.state;
+ if (plane_state->crtc != crtc)
+ continue;
+
+ if (!implicit_fb)
+ implicit_fb = plane_state->fb;
+ else if (implicit_fb != plane_state->fb)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* vmw_kms_check_topology - Validates topology in drm_atomic_state
* @dev: DRM device
* @state: the driver state object
@@ -1575,7 +1599,6 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_rect *rects;
struct drm_crtc *crtc;
@@ -1587,19 +1610,31 @@ static int vmw_kms_check_topology(struct drm_device *dev,
if (!rects)
return -ENOMEM;
- mutex_lock(&dev_priv->requested_layout_mutex);
-
drm_for_each_crtc(crtc, dev) {
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_crtc_state *crtc_state;
i = drm_crtc_index(crtc);
- if (crtc_state && crtc_state->enable) {
+ crtc_state = vmw_crtc_state_and_lock(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto clean;
+ }
+
+ if (!crtc_state)
+ continue;
+
+ if (crtc_state->enable) {
rects[i].x1 = du->gui_x;
rects[i].y1 = du->gui_y;
rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
+ } else {
+ rects[i].x1 = 0;
+ rects[i].y1 = 0;
+ rects[i].x2 = 0;
+ rects[i].y2 = 0;
}
}
@@ -1611,14 +1646,6 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
- if (!new_crtc_state->enable) {
- rects[i].x1 = 0;
- rects[i].y1 = 0;
- rects[i].x2 = 0;
- rects[i].y2 = 0;
- continue;
- }
-
if (!du->pref_active) {
ret = -EINVAL;
goto clean;
@@ -1639,18 +1666,12 @@ static int vmw_kms_check_topology(struct drm_device *dev,
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
vmw_conn_state->gui_x = du->gui_x;
vmw_conn_state->gui_y = du->gui_y;
-
- rects[i].x1 = du->gui_x;
- rects[i].y1 = du->gui_y;
- rects[i].x2 = du->gui_x + new_crtc_state->mode.hdisplay;
- rects[i].y2 = du->gui_y + new_crtc_state->mode.vdisplay;
}
ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
rects);
clean:
- mutex_unlock(&dev_priv->requested_layout_mutex);
kfree(rects);
return ret;
}
@@ -1681,6 +1702,10 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
if (ret)
return ret;
+ ret = vmw_kms_check_implicit(dev, state);
+ if (ret)
+ return ret;
+
if (!state->allow_modeset)
return ret;
@@ -2003,11 +2028,25 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
struct vmw_display_unit *du;
struct drm_connector *con;
struct drm_connector_list_iter conn_iter;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc *crtc;
+ int ret;
+
+ /* Currently gui_x/y is protected with the crtc mutex */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_acquire_init(&ctx, 0);
+retry:
+ drm_for_each_crtc(crtc, dev) {
+ ret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (ret < 0) {
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+ goto out_fini;
+ }
+ }
- /*
- * Currently only gui_x/y is protected with requested_layout_mutex.
- */
- mutex_lock(&dev_priv->requested_layout_mutex);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(con, &conn_iter) {
du = vmw_connector_to_du(con);
@@ -2026,9 +2065,7 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
}
}
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev_priv->requested_layout_mutex);
- mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(con, &dev->mode_config.connector_list, head) {
du = vmw_connector_to_du(con);
if (num_rects > du->unit) {
@@ -2048,10 +2085,13 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
}
con->status = vmw_du_connector_detect(con, true);
}
- mutex_unlock(&dev->mode_config.mutex);
drm_sysfs_hotplug_event(dev);
-
+out_fini:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ mutex_unlock(&dev->mode_config.mutex);
+
return 0;
}
@@ -2275,84 +2315,6 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
return 1;
}
-int vmw_du_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- struct vmw_display_unit *du = vmw_connector_to_du(connector);
- struct vmw_private *dev_priv = vmw_priv(connector->dev);
-
- if (property == dev_priv->implicit_placement_property)
- du->is_implicit = val;
-
- return 0;
-}
-
-
-
-/**
- * vmw_du_connector_atomic_set_property - Atomic version of get property
- *
- * @crtc - crtc the property is associated with
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int
-vmw_du_connector_atomic_set_property(struct drm_connector *connector,
- struct drm_connector_state *state,
- struct drm_property *property,
- uint64_t val)
-{
- struct vmw_private *dev_priv = vmw_priv(connector->dev);
- struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
- struct vmw_display_unit *du = vmw_connector_to_du(connector);
-
-
- if (property == dev_priv->implicit_placement_property) {
- vcs->is_implicit = val;
-
- /*
- * We should really be doing a drm_atomic_commit() to
- * commit the new state, but since this doesn't cause
- * an immedate state change, this is probably ok
- */
- du->is_implicit = vcs->is_implicit;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-/**
- * vmw_du_connector_atomic_get_property - Atomic version of get property
- *
- * @connector - connector the property is associated with
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int
-vmw_du_connector_atomic_get_property(struct drm_connector *connector,
- const struct drm_connector_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- struct vmw_private *dev_priv = vmw_priv(connector->dev);
- struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
-
- if (property == dev_priv->implicit_placement_property)
- *val = vcs->is_implicit;
- else {
- DRM_ERROR("Invalid Property %s\n", property->name);
- return -EINVAL;
- }
-
- return 0;
-}
-
/**
* vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
* @dev: drm device for the ioctl
@@ -2742,143 +2704,25 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
}
/**
- * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
- *
- * @dev_priv: Pointer to a device private struct.
- * @du: The display unit of the crtc.
- */
-void vmw_kms_del_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du)
-{
- mutex_lock(&dev_priv->global_kms_state_mutex);
- if (du->active_implicit) {
- if (--(dev_priv->num_implicit) == 0)
- dev_priv->implicit_fb = NULL;
- du->active_implicit = false;
- }
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
- * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
- *
- * @vmw_priv: Pointer to a device private struct.
- * @du: The display unit of the crtc.
- * @vfb: The implicit framebuffer
- *
- * Registers a binding to an implicit framebuffer.
- */
-void vmw_kms_add_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du,
- struct vmw_framebuffer *vfb)
-{
- mutex_lock(&dev_priv->global_kms_state_mutex);
- WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);
-
- if (!du->active_implicit && du->is_implicit) {
- dev_priv->implicit_fb = vfb;
- du->active_implicit = true;
- dev_priv->num_implicit++;
- }
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
- * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
- *
- * @dev_priv: Pointer to device-private struct.
- * @crtc: The crtc we want to flip.
- *
- * Returns true or false depending whether it's OK to flip this crtc
- * based on the criterion that we must not have more than one implicit
- * frame-buffer at any one time.
- */
-bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- bool ret;
-
- mutex_lock(&dev_priv->global_kms_state_mutex);
- ret = !du->is_implicit || dev_priv->num_implicit == 1;
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-
- return ret;
-}
-
-/**
- * vmw_kms_update_implicit_fb - Update the implicit fb.
- *
- * @dev_priv: Pointer to device-private struct.
- * @crtc: The crtc the new implicit frame-buffer is bound to.
- */
-void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct drm_plane *plane = crtc->primary;
- struct vmw_framebuffer *vfb;
-
- mutex_lock(&dev_priv->global_kms_state_mutex);
-
- if (!du->is_implicit)
- goto out_unlock;
-
- vfb = vmw_framebuffer_to_vfb(plane->state->fb);
- WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
- dev_priv->implicit_fb != vfb);
-
- dev_priv->implicit_fb = vfb;
-out_unlock:
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
* vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
* property.
*
* @dev_priv: Pointer to a device private struct.
- * @immutable: Whether the property is immutable.
*
* Sets up the implicit placement property unless it's already set up.
*/
void
-vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
- bool immutable)
+vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
{
if (dev_priv->implicit_placement_property)
return;
dev_priv->implicit_placement_property =
drm_property_create_range(dev_priv->dev,
- immutable ?
- DRM_MODE_PROP_IMMUTABLE : 0,
+ DRM_MODE_PROP_IMMUTABLE,
"implicit_placement", 0, 1);
-
}
-
-/**
- * vmw_kms_set_config - Wrapper around drm_atomic_helper_set_config
- *
- * @set: The configuration to set.
- *
- * The vmwgfx Xorg driver doesn't assign the mode::type member, which
- * when drm_mode_set_crtcinfo is called as part of the configuration setting
- * causes it to return incorrect crtc dimensions causing severe problems in
- * the vmwgfx modesetting. So explicitly clear that member before calling
- * into drm_atomic_helper_set_config.
- */
-int vmw_kms_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx)
-{
- if (set && set->mode)
- set->mode->type = 0;
-
- return drm_atomic_helper_set_config(set, ctx);
-}
-
-
/**
* vmw_kms_suspend - Save modesetting state and turn modesetting off.
*
@@ -2935,3 +2779,124 @@ void vmw_kms_lost_device(struct drm_device *dev)
{
drm_atomic_helper_shutdown(dev);
}
+
+/**
+ * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
+ * @update: The closure structure.
+ *
+ * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
+ * update on display unit.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
+{
+ struct drm_plane_state *state = update->plane->state;
+ struct drm_plane_state *old_state = update->old_state;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ struct drm_rect bb;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
+ uint32_t reserved_size = 0;
+ uint32_t submit_size = 0;
+ uint32_t curr_size = 0;
+ uint32_t num_hits = 0;
+ void *cmd_start;
+ char *cmd_next;
+ int ret;
+
+ /*
+ * Iterate in advance to check if really need plane update and find the
+ * number of clips that actually are in plane src for fifo allocation.
+ */
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ if (num_hits == 0)
+ return 0;
+
+ if (update->vfb->bo) {
+ struct vmw_framebuffer_bo *vfbbo =
+ container_of(update->vfb, typeof(*vfbbo), base);
+
+ ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
+ update->cpu_blit);
+ } else {
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(update->vfb, typeof(*vfbs), base);
+
+ ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
+ 0, NULL, NULL);
+ }
+
+ if (ret)
+ return ret;
+
+ ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
+ if (ret)
+ goto out_unref;
+
+ reserved_size = update->calc_fifo_size(update, num_hits);
+ cmd_start = vmw_fifo_reserve(update->dev_priv, reserved_size);
+ if (!cmd_start) {
+ ret = -ENOMEM;
+ goto out_revert;
+ }
+
+ cmd_next = cmd_start;
+
+ if (update->post_prepare) {
+ curr_size = update->post_prepare(update, cmd_next);
+ cmd_next += curr_size;
+ submit_size += curr_size;
+ }
+
+ if (update->pre_clip) {
+ curr_size = update->pre_clip(update, cmd_next, num_hits);
+ cmd_next += curr_size;
+ submit_size += curr_size;
+ }
+
+ bb.x1 = INT_MAX;
+ bb.y1 = INT_MAX;
+ bb.x2 = INT_MIN;
+ bb.y2 = INT_MIN;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ uint32_t fb_x = clip.x1;
+ uint32_t fb_y = clip.y1;
+
+ vmw_du_translate_to_crtc(state, &clip);
+ if (update->clip) {
+ curr_size = update->clip(update, cmd_next, &clip, fb_x,
+ fb_y);
+ cmd_next += curr_size;
+ submit_size += curr_size;
+ }
+ bb.x1 = min_t(int, bb.x1, clip.x1);
+ bb.y1 = min_t(int, bb.y1, clip.y1);
+ bb.x2 = max_t(int, bb.x2, clip.x2);
+ bb.y2 = max_t(int, bb.y2, clip.y2);
+ }
+
+ curr_size = update->post_clip(update, cmd_next, &bb);
+ submit_size += curr_size;
+
+ if (reserved_size < submit_size)
+ submit_size = 0;
+
+ vmw_fifo_commit(update->dev_priv, submit_size);
+
+ vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
+ update->out_fence, NULL);
+ return ret;
+
+out_revert:
+ vmw_validation_revert(&val_ctx);
+
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 76ec570c0684..655abbcd4058 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -33,7 +33,123 @@
#include <drm/drm_encoder.h>
#include "vmwgfx_drv.h"
+/**
+ * struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update
+ * @plane: Plane which is being updated.
+ * @old_state: Old state of plane.
+ * @dev_priv: Device private.
+ * @du: Display unit on which to update the plane.
+ * @vfb: Framebuffer which is blitted to display unit.
+ * @out_fence: Out fence for resource finish.
+ * @mutex: The mutex used to protect resource reservation.
+ * @cpu_blit: True if need cpu blit.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * This structure loosely represent the set of operations needed to perform a
+ * plane update on a display unit. Implementer will define that functionality
+ * according to the function callbacks for this structure. In brief it involves
+ * surface/buffer object validation, populate FIFO commands and command
+ * submission to the device.
+ */
+struct vmw_du_update_plane {
+ /**
+ * @calc_fifo_size: Calculate fifo size.
+ *
+ * Determine fifo size for the commands needed for update. The number of
+ * damage clips on display unit @num_hits will be passed to allocate
+ * sufficient fifo space.
+ *
+ * Return: Fifo size needed
+ */
+ uint32_t (*calc_fifo_size)(struct vmw_du_update_plane *update,
+ uint32_t num_hits);
+
+ /**
+ * @post_prepare: Populate fifo for resource preparation.
+ *
+ * Some surface resource or buffer object need some extra cmd submission
+ * like update GB image for proxy surface and define a GMRFB for screen
+ * object. That should should be done here as this callback will be
+ * called after FIFO allocation with the address of command buufer.
+ *
+ * This callback is optional.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*post_prepare)(struct vmw_du_update_plane *update, void *cmd);
+
+ /**
+ * @pre_clip: Populate fifo before clip.
+ *
+ * This is where pre clip related command should be populated like
+ * surface copy/DMA, etc.
+ *
+ * This callback is optional.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*pre_clip)(struct vmw_du_update_plane *update, void *cmd,
+ uint32_t num_hits);
+
+ /**
+ * @clip: Populate fifo for clip.
+ *
+ * This is where to populate clips for surface copy/dma or blit commands
+ * if needed. This will be called times have damage in display unit,
+ * which is one if doing full update. @clip is the damage in destination
+ * coordinates which is crtc/DU and @src_x, @src_y is damage clip src in
+ * framebuffer coordinate.
+ *
+ * This callback is optional.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*clip)(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *clip, uint32_t src_x, uint32_t src_y);
+
+ /**
+ * @post_clip: Populate fifo after clip.
+ *
+ * This is where to populate display unit update commands or blit
+ * commands.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*post_clip)(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *bb);
+
+ struct drm_plane *plane;
+ struct drm_plane_state *old_state;
+ struct vmw_private *dev_priv;
+ struct vmw_display_unit *du;
+ struct vmw_framebuffer *vfb;
+ struct vmw_fence_obj **out_fence;
+ struct mutex *mutex;
+ bool cpu_blit;
+ bool intr;
+};
+
+/**
+ * struct vmw_du_update_plane_surface - closure structure for surface
+ * @base: base closure structure.
+ * @cmd_start: FIFO command start address (used by SOU only).
+ */
+struct vmw_du_update_plane_surface {
+ struct vmw_du_update_plane base;
+ /* This member is to handle special case SOU surface update */
+ void *cmd_start;
+};
+/**
+ * struct vmw_du_update_plane_buffer - Closure structure for buffer object
+ * @base: Base closure structure.
+ * @fb_left: x1 for fb damage bounding box.
+ * @fb_top: y1 for fb damage bounding box.
+ */
+struct vmw_du_update_plane_buffer {
+ struct vmw_du_update_plane base;
+ int fb_left, fb_top;
+};
/**
* struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
@@ -191,8 +307,6 @@ struct vmw_plane_state {
struct vmw_connector_state {
struct drm_connector_state base;
- bool is_implicit;
-
/**
* @gui_x:
*
@@ -254,7 +368,6 @@ struct vmw_display_unit {
int gui_x;
int gui_y;
bool is_implicit;
- bool active_implicit;
int set_gui_x;
int set_gui_y;
};
@@ -334,17 +447,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
struct drm_crtc **p_crtc,
struct drm_display_mode **p_mode);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
-void vmw_kms_del_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du);
-void vmw_kms_add_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du,
- struct vmw_framebuffer *vfb);
-bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
-void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
-void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
- bool immutable);
+void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
+void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
@@ -456,6 +560,20 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
bool interruptible,
struct drm_crtc *crtc);
-int vmw_kms_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx);
+int vmw_du_helper_plane_update(struct vmw_du_update_plane *update);
+
+/**
+ * vmw_du_translate_to_crtc - Translate a rect from framebuffer to crtc
+ * @state: Plane state.
+ * @r: Rectangle to translate.
+ */
+static inline void vmw_du_translate_to_crtc(struct drm_plane_state *state,
+ struct drm_rect *r)
+{
+ int translate_crtc_x = -((state->src_x >> 16) - state->crtc_x);
+ int translate_crtc_y = -((state->src_y >> 16) - state->crtc_y);
+
+ drm_rect_translate(r, translate_crtc_x, translate_crtc_y);
+}
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 723578117191..16be515c4c0f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -233,7 +233,7 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
- .set_config = vmw_kms_set_config,
+ .set_config = drm_atomic_helper_set_config,
};
@@ -263,18 +263,14 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
.destroy = vmw_ldu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
.atomic_destroy_state = vmw_du_connector_destroy_state,
- .atomic_set_property = vmw_du_connector_atomic_set_property,
- .atomic_get_property = vmw_du_connector_atomic_get_property,
};
static const struct
drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
- .best_encoder = drm_atomic_helper_best_encoder,
};
/*
@@ -417,7 +413,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
-
vmw_du_connector_reset(connector);
ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -428,8 +423,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_connector_helper_add(connector, &vmw_ldu_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, true);
- vmw_connector_state_to_vcs(connector->state)->is_implicit = true;
-
ret = drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -448,7 +441,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_encoder;
}
-
vmw_du_crtc_reset(crtc);
ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
&ldu->base.cursor,
@@ -514,7 +506,7 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
if (ret != 0)
goto err_free;
- vmw_kms_create_implicit_placement_property(dev_priv, true);
+ vmw_kms_create_implicit_placement_property(dev_priv);
if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 8a029bade32a..3025bfc001a1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -85,7 +85,7 @@ static void vmw_resource_release(struct kref *kref)
struct ttm_validate_buffer val_buf;
val_buf.bo = bo;
- val_buf.shared = false;
+ val_buf.num_shared = 0;
res->func->unbind(res, false, &val_buf);
}
res->backup_dirty = false;
@@ -462,7 +462,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
- val_buf->shared = false;
+ val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
@@ -565,7 +565,7 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
BUG_ON(!func->may_evict);
val_buf.bo = NULL;
- val_buf.shared = false;
+ val_buf.num_shared = 0;
ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
@@ -614,7 +614,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
return 0;
val_buf.bo = NULL;
- val_buf.shared = false;
+ val_buf.num_shared = 0;
if (res->backup)
val_buf.bo = &res->backup->base;
do {
@@ -685,7 +685,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
struct vmw_resource *res, *next;
struct ttm_validate_buffer val_buf = {
.bo = &vbo->base,
- .shared = false
+ .num_shared = 0
};
lockdep_assert_held(&vbo->base.resv->lock.base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 53316b1bda3d..cd586c52af7e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -29,6 +29,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
#define vmw_crtc_to_sou(x) \
@@ -76,6 +77,11 @@ struct vmw_kms_sou_dirty_cmd {
SVGA3dCmdBlitSurfaceToScreen body;
};
+struct vmw_kms_sou_define_gmrfb {
+ uint32_t header;
+ SVGAFifoCmdDefineGMRFB body;
+};
+
/**
* Display unit using screen objects.
*/
@@ -241,28 +247,20 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
sou->buffer = vps->bo;
sou->buffer_size = vps->bo_size;
- if (sou->base.is_implicit) {
- x = crtc->x;
- y = crtc->y;
- } else {
- conn_state = sou->base.connector.state;
- vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+ conn_state = sou->base.connector.state;
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
- x = vmw_conn_state->gui_x;
- y = vmw_conn_state->gui_y;
- }
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode);
if (ret)
DRM_ERROR("Failed to define Screen Object %dx%d\n",
crtc->x, crtc->y);
- vmw_kms_add_active(dev_priv, &sou->base, vfb);
} else {
sou->buffer = NULL;
sou->buffer_size = 0;
-
- vmw_kms_del_active(dev_priv, &sou->base);
}
}
@@ -317,38 +315,14 @@ static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
}
}
-static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *new_fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- int ret;
-
- if (!vmw_kms_crtc_flippable(dev_priv, crtc))
- return -EINVAL;
-
- ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
- if (ret) {
- DRM_ERROR("Page flip error %d.\n", ret);
- return ret;
- }
-
- if (vmw_crtc_to_du(crtc)->is_implicit)
- vmw_kms_update_implicit_fb(dev_priv, crtc);
-
- return ret;
-}
-
static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_sou_crtc_destroy,
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
- .set_config = vmw_kms_set_config,
- .page_flip = vmw_sou_crtc_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
};
/*
@@ -377,19 +351,15 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
.destroy = vmw_sou_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
.atomic_destroy_state = vmw_du_connector_destroy_state,
- .atomic_set_property = vmw_du_connector_atomic_set_property,
- .atomic_get_property = vmw_du_connector_atomic_get_property,
};
static const struct
drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
- .best_encoder = drm_atomic_helper_best_encoder,
};
@@ -499,6 +469,263 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
}
+static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_kms_sou_define_gmrfb) +
+ sizeof(struct vmw_kms_sou_bo_blit) * num_hits;
+}
+
+static uint32_t vmw_sou_bo_define_gmrfb(struct vmw_du_update_plane *update,
+ void *cmd)
+{
+ struct vmw_framebuffer_bo *vfbbo =
+ container_of(update->vfb, typeof(*vfbbo), base);
+ struct vmw_kms_sou_define_gmrfb *gmr = cmd;
+ int depth = update->vfb->base.format->depth;
+
+ /* Emulate RGBA support, contrary to svga_reg.h this is not
+ * supported by hosts. This is only a problem if we are reading
+ * this value later and expecting what we uploaded back.
+ */
+ if (depth == 32)
+ depth = 24;
+
+ gmr->header = SVGA_CMD_DEFINE_GMRFB;
+
+ gmr->body.format.bitsPerPixel = update->vfb->base.format->cpp[0] * 8;
+ gmr->body.format.colorDepth = depth;
+ gmr->body.format.reserved = 0;
+ gmr->body.bytesPerLine = update->vfb->base.pitches[0];
+ vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &gmr->body.ptr);
+
+ return sizeof(*gmr);
+}
+
+static uint32_t vmw_sou_bo_populate_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t fb_x, uint32_t fb_y)
+{
+ struct vmw_kms_sou_bo_blit *blit = cmd;
+
+ blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+ blit->body.destScreenId = update->du->unit;
+ blit->body.srcOrigin.x = fb_x;
+ blit->body.srcOrigin.y = fb_y;
+ blit->body.destRect.left = clip->x1;
+ blit->body.destRect.top = clip->y1;
+ blit->body.destRect.right = clip->x2;
+ blit->body.destRect.bottom = clip->y2;
+
+ return sizeof(*blit);
+}
+
+static uint32_t vmw_stud_bo_post_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *bb)
+{
+ return 0;
+}
+
+/**
+ * vmw_sou_plane_update_bo - Update display unit for bo backed fb.
+ * @dev_priv: Device private.
+ * @plane: Plane state.
+ * @old_state: Old plane state.
+ * @vfb: Framebuffer which is blitted to display unit.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_sou_plane_update_bo(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane_buffer bo_update;
+
+ memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer));
+ bo_update.base.plane = plane;
+ bo_update.base.old_state = old_state;
+ bo_update.base.dev_priv = dev_priv;
+ bo_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+ bo_update.base.vfb = vfb;
+ bo_update.base.out_fence = out_fence;
+ bo_update.base.mutex = NULL;
+ bo_update.base.cpu_blit = false;
+ bo_update.base.intr = true;
+
+ bo_update.base.calc_fifo_size = vmw_sou_bo_fifo_size;
+ bo_update.base.post_prepare = vmw_sou_bo_define_gmrfb;
+ bo_update.base.clip = vmw_sou_bo_populate_clip;
+ bo_update.base.post_clip = vmw_stud_bo_post_clip;
+
+ return vmw_du_helper_plane_update(&bo_update.base);
+}
+
+static uint32_t vmw_sou_surface_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_kms_sou_dirty_cmd) + sizeof(SVGASignedRect) *
+ num_hits;
+}
+
+static uint32_t vmw_sou_surface_post_prepare(struct vmw_du_update_plane *update,
+ void *cmd)
+{
+ struct vmw_du_update_plane_surface *srf_update;
+
+ srf_update = container_of(update, typeof(*srf_update), base);
+
+ /*
+ * SOU SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN is special in the sense that
+ * its bounding box is filled before iterating over all the clips. So
+ * store the FIFO start address and revisit to fill the details.
+ */
+ srf_update->cmd_start = cmd;
+
+ return 0;
+}
+
+static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
+ void *cmd, uint32_t num_hits)
+{
+ struct vmw_kms_sou_dirty_cmd *blit = cmd;
+ struct vmw_framebuffer_surface *vfbs;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ blit->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
+ blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) *
+ num_hits;
+
+ blit->body.srcImage.sid = vfbs->surface->res.id;
+ blit->body.destScreenId = update->du->unit;
+
+ /* Update the source and destination bounding box later in post_clip */
+ blit->body.srcRect.left = 0;
+ blit->body.srcRect.top = 0;
+ blit->body.srcRect.right = 0;
+ blit->body.srcRect.bottom = 0;
+
+ blit->body.destRect.left = 0;
+ blit->body.destRect.top = 0;
+ blit->body.destRect.right = 0;
+ blit->body.destRect.bottom = 0;
+
+ return sizeof(*blit);
+}
+
+static uint32_t vmw_sou_surface_clip_rect(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t src_x, uint32_t src_y)
+{
+ SVGASignedRect *rect = cmd;
+
+ /*
+ * rects are relative to dest bounding box rect on screen object, so
+ * translate to it later in post_clip
+ */
+ rect->left = clip->x1;
+ rect->top = clip->y1;
+ rect->right = clip->x2;
+ rect->bottom = clip->y2;
+
+ return sizeof(*rect);
+}
+
+static uint32_t vmw_sou_surface_post_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *bb)
+{
+ struct vmw_du_update_plane_surface *srf_update;
+ struct drm_plane_state *state = update->plane->state;
+ struct drm_rect src_bb;
+ struct vmw_kms_sou_dirty_cmd *blit;
+ SVGASignedRect *rect;
+ uint32_t num_hits;
+ int translate_src_x;
+ int translate_src_y;
+ int i;
+
+ srf_update = container_of(update, typeof(*srf_update), base);
+
+ blit = srf_update->cmd_start;
+ rect = (SVGASignedRect *)&blit[1];
+
+ num_hits = (blit->header.size - sizeof(blit->body))/
+ sizeof(SVGASignedRect);
+
+ src_bb = *bb;
+
+ /* To translate bb back to fb src coord */
+ translate_src_x = (state->src_x >> 16) - state->crtc_x;
+ translate_src_y = (state->src_y >> 16) - state->crtc_y;
+
+ drm_rect_translate(&src_bb, translate_src_x, translate_src_y);
+
+ blit->body.srcRect.left = src_bb.x1;
+ blit->body.srcRect.top = src_bb.y1;
+ blit->body.srcRect.right = src_bb.x2;
+ blit->body.srcRect.bottom = src_bb.y2;
+
+ blit->body.destRect.left = bb->x1;
+ blit->body.destRect.top = bb->y1;
+ blit->body.destRect.right = bb->x2;
+ blit->body.destRect.bottom = bb->y2;
+
+ /* rects are relative to dest bb rect */
+ for (i = 0; i < num_hits; i++) {
+ rect->left -= bb->x1;
+ rect->top -= bb->y1;
+ rect->right -= bb->x1;
+ rect->bottom -= bb->y1;
+ rect++;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_sou_plane_update_surface - Update display unit for surface backed fb.
+ * @dev_priv: Device private.
+ * @plane: Plane state.
+ * @old_state: Old plane state.
+ * @vfb: Framebuffer which is blitted to display unit
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_sou_plane_update_surface(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane_surface srf_update;
+
+ memset(&srf_update, 0, sizeof(struct vmw_du_update_plane_surface));
+ srf_update.base.plane = plane;
+ srf_update.base.old_state = old_state;
+ srf_update.base.dev_priv = dev_priv;
+ srf_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+ srf_update.base.vfb = vfb;
+ srf_update.base.out_fence = out_fence;
+ srf_update.base.mutex = &dev_priv->cmdbuf_mutex;
+ srf_update.base.cpu_blit = false;
+ srf_update.base.intr = true;
+
+ srf_update.base.calc_fifo_size = vmw_sou_surface_fifo_size;
+ srf_update.base.post_prepare = vmw_sou_surface_post_prepare;
+ srf_update.base.pre_clip = vmw_sou_surface_pre_clip;
+ srf_update.base.clip = vmw_sou_surface_clip_rect;
+ srf_update.base.post_clip = vmw_sou_surface_post_clip;
+
+ return vmw_du_helper_plane_update(&srf_update.base);
+}
static void
vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
@@ -509,47 +736,28 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
struct vmw_fence_obj *fence = NULL;
int ret;
+ /* In case of device error, maintain consistent atomic state */
if (crtc && plane->state->fb) {
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_framebuffer *vfb =
vmw_framebuffer_to_vfb(plane->state->fb);
- struct drm_vmw_rect vclips;
-
- vclips.x = crtc->x;
- vclips.y = crtc->y;
- vclips.w = crtc->mode.hdisplay;
- vclips.h = crtc->mode.vdisplay;
if (vfb->bo)
- ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
- &vclips, 1, 1, true,
- &fence, crtc);
+ ret = vmw_sou_plane_update_bo(dev_priv, plane,
+ old_state, vfb, &fence);
else
- ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
- &vclips, NULL, 0, 0,
- 1, 1, &fence, crtc);
-
- /*
- * We cannot really fail this function, so if we do, then output
- * an error and maintain consistent atomic state.
- */
+ ret = vmw_sou_plane_update_surface(dev_priv, plane,
+ old_state, vfb,
+ &fence);
if (ret != 0)
DRM_ERROR("Failed to update screen.\n");
} else {
- /*
- * When disabling a plane, CRTC and FB should always be NULL
- * together, otherwise it's an error.
- * Here primary plane is being disable so should really blank
- * the screen object display unit, if not already done.
- */
+ /* Do nothing when fb and crtc is NULL (blank crtc) */
return;
}
+ /* For error case vblank event is send from vmw_du_crtc_atomic_flush */
event = crtc->state->event;
- /*
- * In case of failure and other cases, vblank event will be sent in
- * vmw_du_crtc_atomic_flush.
- */
if (event && fence) {
struct drm_file *file_priv = event->base.file_priv;
@@ -640,7 +848,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
primary = &sou->base.primary;
cursor = &sou->base.cursor;
- sou->base.active_implicit = false;
sou->base.pref_active = (unit == 0);
sou->base.pref_width = dev_priv->initial_width;
sou->base.pref_height = dev_priv->initial_height;
@@ -666,6 +873,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary);
/* Initialize cursor plane */
vmw_du_plane_reset(cursor);
@@ -693,8 +901,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, true);
- vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
-
ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -733,12 +939,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
- if (dev_priv->implicit_placement_property)
- drm_object_attach_property
- (&connector->base,
- dev_priv->implicit_placement_property,
- sou->base.is_implicit);
-
return 0;
err_free_unregister:
@@ -764,15 +964,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
}
ret = -ENOMEM;
- dev_priv->num_implicit = 0;
- dev_priv->implicit_fb = NULL;
ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
if (unlikely(ret != 0))
return ret;
- vmw_kms_create_implicit_placement_property(dev_priv, false);
-
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_sou_init(dev_priv, i);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index e086565c1da6..096c2941a8e4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -30,7 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-
+#include <drm/drm_damage_helper.h>
#define vmw_crtc_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.crtc)
@@ -92,6 +92,10 @@ struct vmw_stdu_surface_copy {
SVGA3dCmdSurfaceCopy body;
};
+struct vmw_stdu_update_gb_image {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+};
/**
* struct vmw_screen_target_display_unit
@@ -396,13 +400,8 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (!crtc->state->enable)
return;
- if (stdu->base.is_implicit) {
- x = crtc->x;
- y = crtc->y;
- } else {
- x = vmw_conn_state->gui_x;
- y = vmw_conn_state->gui_y;
- }
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
vmw_svga_enable(dev_priv);
ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, x, y);
@@ -417,27 +416,9 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
{
}
-
static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct drm_plane_state *plane_state = crtc->primary->state;
- struct vmw_private *dev_priv;
- struct vmw_screen_target_display_unit *stdu;
- struct vmw_framebuffer *vfb;
- struct drm_framebuffer *fb;
-
-
- stdu = vmw_crtc_to_stdu(crtc);
- dev_priv = vmw_priv(crtc->dev);
- fb = plane_state->fb;
-
- vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
-
- if (vfb)
- vmw_kms_add_active(dev_priv, &stdu->base, vfb);
- else
- vmw_kms_del_active(dev_priv, &stdu->base);
}
static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -472,49 +453,6 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
}
/**
- * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
- *
- * @crtc: CRTC to attach FB to
- * @fb: FB to attach
- * @event: Event to be posted. This event should've been alloced
- * using k[mz]alloc, and should've been completely initialized.
- * @page_flip_flags: Input flags.
- *
- * If the STDU uses the same display and content buffers, i.e. a true flip,
- * this function will replace the existing display buffer with the new content
- * buffer.
- *
- * If the STDU uses different display and content buffers, i.e. a blit, then
- * only the content buffer will be updated.
- *
- * RETURNS:
- * 0 on success, error code on failure
- */
-static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *new_fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags,
- struct drm_modeset_acquire_ctx *ctx)
-
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc);
- int ret;
-
- if (!stdu->defined || !vmw_kms_crtc_flippable(dev_priv, crtc))
- return -EINVAL;
-
- ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
- if (ret) {
- DRM_ERROR("Page flip error %d.\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-
-/**
* vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
*
* @dirty: The closure structure.
@@ -986,8 +924,8 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
- .set_config = vmw_kms_set_config,
- .page_flip = vmw_stdu_crtc_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
};
@@ -1042,19 +980,15 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
.destroy = vmw_stdu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
.atomic_destroy_state = vmw_du_connector_destroy_state,
- .atomic_set_property = vmw_du_connector_atomic_set_property,
- .atomic_get_property = vmw_du_connector_atomic_get_property,
};
static const struct
drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
- .best_encoder = drm_atomic_helper_best_encoder,
};
@@ -1257,11 +1191,402 @@ out_srf_unref:
return ret;
}
+static uint32_t vmw_stdu_bo_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_stdu_dma) + sizeof(SVGA3dCopyBox) * num_hits +
+ sizeof(SVGA3dCmdSurfaceDMASuffix) +
+ sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_stdu_update_gb_image) +
+ sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_populate_dma(struct vmw_du_update_plane *update,
+ void *cmd, uint32_t num_hits)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_bo *vfbbo;
+ struct vmw_stdu_dma *cmd_dma = cmd;
+
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+ cmd_dma->header.id = SVGA_3D_CMD_SURFACE_DMA;
+ cmd_dma->header.size = sizeof(cmd_dma->body) +
+ sizeof(struct SVGA3dCopyBox) * num_hits +
+ sizeof(SVGA3dCmdSurfaceDMASuffix);
+ vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &cmd_dma->body.guest.ptr);
+ cmd_dma->body.guest.pitch = update->vfb->base.pitches[0];
+ cmd_dma->body.host.sid = stdu->display_srf->res.id;
+ cmd_dma->body.host.face = 0;
+ cmd_dma->body.host.mipmap = 0;
+ cmd_dma->body.transfer = SVGA3D_WRITE_HOST_VRAM;
+
+ return sizeof(*cmd_dma);
+}
+
+static uint32_t vmw_stdu_bo_populate_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t fb_x, uint32_t fb_y)
+{
+ struct SVGA3dCopyBox *box = cmd;
+
+ box->srcx = fb_x;
+ box->srcy = fb_y;
+ box->srcz = 0;
+ box->x = clip->x1;
+ box->y = clip->y1;
+ box->z = 0;
+ box->w = drm_rect_width(clip);
+ box->h = drm_rect_height(clip);
+ box->d = 1;
+
+ return sizeof(*box);
+}
+
+static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *bb)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_bo *vfbbo;
+ SVGA3dCmdSurfaceDMASuffix *suffix = cmd;
+
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+ suffix->suffixSize = sizeof(*suffix);
+ suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
+
+ vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
+ bb->y1, bb->y2);
+
+ return sizeof(*suffix) + sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_pre_clip_cpu(struct vmw_du_update_plane *update,
+ void *cmd, uint32_t num_hits)
+{
+ struct vmw_du_update_plane_buffer *bo_update =
+ container_of(update, typeof(*bo_update), base);
+
+ bo_update->fb_left = INT_MAX;
+ bo_update->fb_top = INT_MAX;
+
+ return 0;
+}
+
+static uint32_t vmw_stdu_bo_clip_cpu(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t fb_x, uint32_t fb_y)
+{
+ struct vmw_du_update_plane_buffer *bo_update =
+ container_of(update, typeof(*bo_update), base);
+
+ bo_update->fb_left = min_t(int, bo_update->fb_left, fb_x);
+ bo_update->fb_top = min_t(int, bo_update->fb_top, fb_y);
+
+ return 0;
+}
+
+static uint32_t
+vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *bb)
+{
+ struct vmw_du_update_plane_buffer *bo_update;
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_bo *vfbbo;
+ struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0);
+ struct vmw_stdu_update_gb_image *cmd_img = cmd;
+ struct vmw_stdu_update *cmd_update;
+ struct ttm_buffer_object *src_bo, *dst_bo;
+ u32 src_offset, dst_offset;
+ s32 src_pitch, dst_pitch;
+ s32 width, height;
+
+ bo_update = container_of(update, typeof(*bo_update), base);
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+ width = bb->x2 - bb->x1;
+ height = bb->y2 - bb->y1;
+
+ diff.cpp = stdu->cpp;
+
+ dst_bo = &stdu->display_srf->res.backup->base;
+ dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+ dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
+
+ src_bo = &vfbbo->buffer->base;
+ src_pitch = update->vfb->base.pitches[0];
+ src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
+ stdu->cpp;
+
+ (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch, src_bo,
+ src_offset, src_pitch, width * stdu->cpp, height,
+ &diff);
+
+ if (drm_rect_visible(&diff.rect)) {
+ SVGA3dBox *box = &cmd_img->body.box;
+
+ cmd_img->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd_img->header.size = sizeof(cmd_img->body);
+ cmd_img->body.image.sid = stdu->display_srf->res.id;
+ cmd_img->body.image.face = 0;
+ cmd_img->body.image.mipmap = 0;
+
+ box->x = diff.rect.x1;
+ box->y = diff.rect.y1;
+ box->z = 0;
+ box->w = drm_rect_width(&diff.rect);
+ box->h = drm_rect_height(&diff.rect);
+ box->d = 1;
+
+ cmd_update = (struct vmw_stdu_update *)&cmd_img[1];
+ vmw_stdu_populate_update(cmd_update, stdu->base.unit,
+ diff.rect.x1, diff.rect.x2,
+ diff.rect.y1, diff.rect.y2);
+
+ return sizeof(*cmd_img) + sizeof(*cmd_update);
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_stdu_plane_update_bo - Update display unit for bo backed fb.
+ * @dev_priv: device private.
+ * @plane: plane state.
+ * @old_state: old plane state.
+ * @vfb: framebuffer which is blitted to display unit.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_stdu_plane_update_bo(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane_buffer bo_update;
+
+ memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer));
+ bo_update.base.plane = plane;
+ bo_update.base.old_state = old_state;
+ bo_update.base.dev_priv = dev_priv;
+ bo_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+ bo_update.base.vfb = vfb;
+ bo_update.base.out_fence = out_fence;
+ bo_update.base.mutex = NULL;
+ bo_update.base.cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
+ bo_update.base.intr = false;
+
+ /*
+ * VM without 3D support don't have surface DMA command and framebuffer
+ * should be moved out of VRAM.
+ */
+ if (bo_update.base.cpu_blit) {
+ bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu;
+ bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu;
+ bo_update.base.clip = vmw_stdu_bo_clip_cpu;
+ bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu;
+ } else {
+ bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size;
+ bo_update.base.pre_clip = vmw_stdu_bo_populate_dma;
+ bo_update.base.clip = vmw_stdu_bo_populate_clip;
+ bo_update.base.post_clip = vmw_stdu_bo_populate_update;
+ }
+
+ return vmw_du_helper_plane_update(&bo_update.base);
+}
+
+static uint32_t
+vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ struct vmw_framebuffer_surface *vfbs;
+ uint32_t size = 0;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ if (vfbs->is_bo_proxy)
+ size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+
+ size += sizeof(struct vmw_stdu_update);
+
+ return size;
+}
+
+static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ struct vmw_framebuffer_surface *vfbs;
+ uint32_t size = 0;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ if (vfbs->is_bo_proxy)
+ size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+
+ size += sizeof(struct vmw_stdu_surface_copy) + sizeof(SVGA3dCopyBox) *
+ num_hits + sizeof(struct vmw_stdu_update);
+
+ return size;
+}
+
+static uint32_t
+vmw_stdu_surface_update_proxy(struct vmw_du_update_plane *update, void *cmd)
+{
+ struct vmw_framebuffer_surface *vfbs;
+ struct drm_plane_state *state = update->plane->state;
+ struct drm_plane_state *old_state = update->old_state;
+ struct vmw_stdu_update_gb_image *cmd_update = cmd;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ uint32_t copy_size = 0;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ /*
+ * proxy surface is special where a buffer object type fb is wrapped
+ * in a surface and need an update gb image command to sync with device.
+ */
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ SVGA3dBox *box = &cmd_update->body.box;
+
+ cmd_update->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd_update->header.size = sizeof(cmd_update->body);
+ cmd_update->body.image.sid = vfbs->surface->res.id;
+ cmd_update->body.image.face = 0;
+ cmd_update->body.image.mipmap = 0;
+
+ box->x = clip.x1;
+ box->y = clip.y1;
+ box->z = 0;
+ box->w = drm_rect_width(&clip);
+ box->h = drm_rect_height(&clip);
+ box->d = 1;
+
+ copy_size += sizeof(*cmd_update);
+ cmd_update++;
+ }
+
+ return copy_size;
+}
+
+static uint32_t
+vmw_stdu_surface_populate_copy(struct vmw_du_update_plane *update, void *cmd,
+ uint32_t num_hits)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_surface *vfbs;
+ struct vmw_stdu_surface_copy *cmd_copy = cmd;
+
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ cmd_copy->header.id = SVGA_3D_CMD_SURFACE_COPY;
+ cmd_copy->header.size = sizeof(cmd_copy->body) + sizeof(SVGA3dCopyBox) *
+ num_hits;
+ cmd_copy->body.src.sid = vfbs->surface->res.id;
+ cmd_copy->body.dest.sid = stdu->display_srf->res.id;
+
+ return sizeof(*cmd_copy);
+}
+
+static uint32_t
+vmw_stdu_surface_populate_clip(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *clip, uint32_t fb_x,
+ uint32_t fb_y)
+{
+ struct SVGA3dCopyBox *box = cmd;
+
+ box->srcx = fb_x;
+ box->srcy = fb_y;
+ box->srcz = 0;
+ box->x = clip->x1;
+ box->y = clip->y1;
+ box->z = 0;
+ box->w = drm_rect_width(clip);
+ box->h = drm_rect_height(clip);
+ box->d = 1;
+
+ return sizeof(*box);
+}
+
+static uint32_t
+vmw_stdu_surface_populate_update(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *bb)
+{
+ vmw_stdu_populate_update(cmd, update->du->unit, bb->x1, bb->x2, bb->y1,
+ bb->y2);
+ return sizeof(struct vmw_stdu_update);
+}
/**
- * vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane
+ * vmw_stdu_plane_update_surface - Update display unit for surface backed fb
+ * @dev_priv: Device private
+ * @plane: Plane state
+ * @old_state: Old plane state
+ * @vfb: Framebuffer which is blitted to display unit
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
*
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane srf_update;
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_surface *vfbs;
+
+ stdu = vmw_crtc_to_stdu(plane->state->crtc);
+ vfbs = container_of(vfb, typeof(*vfbs), base);
+
+ memset(&srf_update, 0, sizeof(struct vmw_du_update_plane));
+ srf_update.plane = plane;
+ srf_update.old_state = old_state;
+ srf_update.dev_priv = dev_priv;
+ srf_update.du = vmw_crtc_to_du(plane->state->crtc);
+ srf_update.vfb = vfb;
+ srf_update.out_fence = out_fence;
+ srf_update.mutex = &dev_priv->cmdbuf_mutex;
+ srf_update.cpu_blit = false;
+ srf_update.intr = true;
+
+ if (vfbs->is_bo_proxy)
+ srf_update.post_prepare = vmw_stdu_surface_update_proxy;
+
+ if (vfbs->surface->res.id != stdu->display_srf->res.id) {
+ srf_update.calc_fifo_size = vmw_stdu_surface_fifo_size;
+ srf_update.pre_clip = vmw_stdu_surface_populate_copy;
+ srf_update.clip = vmw_stdu_surface_populate_clip;
+ } else {
+ srf_update.calc_fifo_size =
+ vmw_stdu_surface_fifo_size_same_display;
+ }
+
+ srf_update.post_clip = vmw_stdu_surface_populate_update;
+
+ return vmw_du_helper_plane_update(&srf_update);
+}
+
+/**
+ * vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane
* @plane: display plane
* @old_state: Only used to get crtc info
*
@@ -1278,17 +1603,14 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_crtc *crtc = plane->state->crtc;
struct vmw_screen_target_display_unit *stdu;
struct drm_pending_vblank_event *event;
+ struct vmw_fence_obj *fence = NULL;
struct vmw_private *dev_priv;
int ret;
- /*
- * We cannot really fail this function, so if we do, then output an
- * error and maintain consistent atomic state.
- */
+ /* If case of device error, maintain consistent atomic state */
if (crtc && plane->state->fb) {
struct vmw_framebuffer *vfb =
vmw_framebuffer_to_vfb(plane->state->fb);
- struct drm_vmw_rect vclips;
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
@@ -1296,23 +1618,17 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu->content_fb_type = vps->content_fb_type;
stdu->cpp = vps->cpp;
- vclips.x = crtc->x;
- vclips.y = crtc->y;
- vclips.w = crtc->mode.hdisplay;
- vclips.h = crtc->mode.vdisplay;
-
ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
if (ret)
DRM_ERROR("Failed to bind surface to STDU.\n");
if (vfb->bo)
- ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
- &vclips, 1, 1, true, false,
- crtc);
+ ret = vmw_stdu_plane_update_bo(dev_priv, plane,
+ old_state, vfb, &fence);
else
- ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL,
- &vclips, NULL, 0, 0,
- 1, 1, NULL, crtc);
+ ret = vmw_stdu_plane_update_surface(dev_priv, plane,
+ old_state, vfb,
+ &fence);
if (ret)
DRM_ERROR("Failed to update STDU.\n");
} else {
@@ -1320,12 +1636,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
- /*
- * When disabling a plane, CRTC and FB should always be NULL
- * together, otherwise it's an error.
- * Here primary plane is being disable so blank the screen
- * target display unit, if not already done.
- */
+ /* Blank STDU when fb and crtc are NULL */
if (!stdu->defined)
return;
@@ -1340,36 +1651,25 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
return;
}
+ /* In case of error, vblank event is send in vmw_du_crtc_atomic_flush */
event = crtc->state->event;
- /*
- * In case of failure and other cases, vblank event will be sent in
- * vmw_du_crtc_atomic_flush.
- */
- if (event && (ret == 0)) {
- struct vmw_fence_obj *fence = NULL;
+ if (event && fence) {
struct drm_file *file_priv = event->base.file_priv;
- vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-
- /*
- * If fence is NULL, then already sync.
- */
- if (fence) {
- ret = vmw_event_fence_action_queue(
- file_priv, fence, &event->base,
- &event->event.vbl.tv_sec,
- &event->event.vbl.tv_usec,
- true);
- if (ret)
- DRM_ERROR("Failed to queue event on fence.\n");
- else
- crtc->state->event = NULL;
-
- vmw_fence_obj_unreference(&fence);
- }
- } else {
- (void) vmw_fifo_flush(dev_priv, false);
+ ret = vmw_event_fence_action_queue(file_priv,
+ fence,
+ &event->base,
+ &event->event.vbl.tv_sec,
+ &event->event.vbl.tv_usec,
+ true);
+ if (ret)
+ DRM_ERROR("Failed to queue event on fence.\n");
+ else
+ crtc->state->event = NULL;
}
+
+ if (fence)
+ vmw_fence_obj_unreference(&fence);
}
@@ -1457,11 +1757,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
stdu->base.pref_active = (unit == 0);
stdu->base.pref_width = dev_priv->initial_width;
stdu->base.pref_height = dev_priv->initial_height;
-
- /*
- * Remove this after enabling atomic because property values can
- * only exist in a state object
- */
stdu->base.is_implicit = false;
/* Initialize primary plane */
@@ -1478,6 +1773,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(primary, &vmw_stdu_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary);
/* Initialize cursor plane */
vmw_du_plane_reset(cursor);
@@ -1506,7 +1802,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
drm_connector_helper_add(connector, &vmw_stdu_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, false);
- vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
ret = drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -1544,11 +1839,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
- if (dev_priv->implicit_placement_property)
- drm_object_attach_property
- (&connector->base,
- dev_priv->implicit_placement_property,
- stdu->base.is_implicit);
return 0;
err_free_unregister:
@@ -1617,8 +1907,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
dev_priv->active_display_unit = vmw_du_screen_target;
- vmw_kms_create_implicit_placement_property(dev_priv, false);
-
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
ret = vmw_stdu_init(dev_priv, i);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 7b1e5a5cbd2c..154eb09aa91e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -42,57 +42,3 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
dev_priv = vmw_priv(file_priv->minor->dev);
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
}
-
-static int vmw_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- DRM_INFO("global init.\n");
- return ttm_mem_global_init(ref->object);
-}
-
-static void vmw_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-int vmw_ttm_global_init(struct vmw_private *dev_priv)
-{
- struct drm_global_reference *global_ref;
- int ret;
-
- global_ref = &dev_priv->mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &vmw_ttm_mem_global_init;
- global_ref->release = &vmw_ttm_mem_global_release;
-
- ret = drm_global_item_ref(global_ref);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed setting up TTM memory accounting.\n");
- return ret;
- }
-
- dev_priv->bo_global_ref.mem_glob =
- dev_priv->mem_global_ref.object;
- global_ref = &dev_priv->bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- ret = drm_global_item_ref(global_ref);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed setting up TTM buffer objects.\n");
- goto out_no_bo;
- }
-
- return 0;
-out_no_bo:
- drm_global_item_unref(&dev_priv->mem_global_ref);
- return ret;
-}
-
-void vmw_ttm_global_release(struct vmw_private *dev_priv)
-{
- drm_global_item_unref(&dev_priv->bo_global_ref.ref);
- drm_global_item_unref(&dev_priv->mem_global_ref);
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index 184025fa938e..fef22753f4de 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -266,7 +266,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
if (!val_buf->bo)
return -ESRCH;
- val_buf->shared = false;
+ val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
bo_node->as_mob = as_mob;
bo_node->cpu_blit = cpu_blit;
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index 11ef17c2d1c1..f5ea32ae8600 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -114,7 +114,7 @@ out_unbind:
component_unbind_all(dev, drm);
out_unregister:
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -124,10 +124,11 @@ static void zx_drm_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
component_unbind_all(dev, drm);
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops zx_drm_master_ops = {
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index ae8c53b4b261..83d236fd893c 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -446,7 +446,6 @@ static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
static void zx_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index a9d2501500a1..163fadb8a33a 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -259,6 +259,8 @@ EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority);
void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf)
{
+ WARN_ON_ONCE(buf & 0x7);
+
if (bufnum)
ipu_ch_param_write_field(ch, IPU_FIELD_EBA1, buf >> 3);
else
@@ -268,6 +270,8 @@ EXPORT_SYMBOL_GPL(ipu_cpmem_set_buffer);
void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off)
{
+ WARN_ON_ONCE((u_off & 0x7) || (v_off & 0x7));
+
ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_off / 8);
ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_off / 8);
}
@@ -435,6 +439,8 @@ void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
unsigned int uv_stride,
unsigned int u_offset, unsigned int v_offset)
{
+ WARN_ON_ONCE((u_offset & 0x7) || (v_offset & 0x7));
+
ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
@@ -739,48 +745,56 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUV420:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
- u_offset = U_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
- v_offset = V_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
+ u_offset = image->u_offset ?
+ image->u_offset : U_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = image->v_offset ?
+ image->v_offset : V_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_YVU420:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
- u_offset = U_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
- v_offset = V_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
+ u_offset = image->u_offset ?
+ image->u_offset : V_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = image->v_offset ?
+ image->v_offset : U_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
- v_offset, u_offset);
+ u_offset, v_offset);
break;
case V4L2_PIX_FMT_YUV422P:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
- u_offset = U2_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
- v_offset = V2_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
+ u_offset = image->u_offset ?
+ image->u_offset : U2_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = image->v_offset ?
+ image->v_offset : V2_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_NV12:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
- u_offset = UV_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
- v_offset = 0;
+ u_offset = image->u_offset ?
+ image->u_offset : UV_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = image->v_offset ? image->v_offset : 0;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_NV16:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
- u_offset = UV2_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
- v_offset = 0;
+ u_offset = image->u_offset ?
+ image->u_offset : UV2_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = image->v_offset ? image->v_offset : 0;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
u_offset, v_offset);
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
index 67cc820253a9..594c3cbc8291 100644
--- a/drivers/gpu/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -442,36 +442,40 @@ unlock:
}
EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init);
-int ipu_ic_task_init(struct ipu_ic *ic,
- int in_width, int in_height,
- int out_width, int out_height,
- enum ipu_color_space in_cs,
- enum ipu_color_space out_cs)
+int ipu_ic_task_init_rsc(struct ipu_ic *ic,
+ int in_width, int in_height,
+ int out_width, int out_height,
+ enum ipu_color_space in_cs,
+ enum ipu_color_space out_cs,
+ u32 rsc)
{
struct ipu_ic_priv *priv = ic->priv;
- u32 reg, downsize_coeff, resize_coeff;
+ u32 downsize_coeff, resize_coeff;
unsigned long flags;
int ret = 0;
- /* Setup vertical resizing */
- ret = calc_resize_coeffs(ic, in_height, out_height,
- &resize_coeff, &downsize_coeff);
- if (ret)
- return ret;
+ if (!rsc) {
+ /* Setup vertical resizing */
- reg = (downsize_coeff << 30) | (resize_coeff << 16);
+ ret = calc_resize_coeffs(ic, in_height, out_height,
+ &resize_coeff, &downsize_coeff);
+ if (ret)
+ return ret;
+
+ rsc = (downsize_coeff << 30) | (resize_coeff << 16);
- /* Setup horizontal resizing */
- ret = calc_resize_coeffs(ic, in_width, out_width,
- &resize_coeff, &downsize_coeff);
- if (ret)
- return ret;
+ /* Setup horizontal resizing */
+ ret = calc_resize_coeffs(ic, in_width, out_width,
+ &resize_coeff, &downsize_coeff);
+ if (ret)
+ return ret;
- reg |= (downsize_coeff << 14) | resize_coeff;
+ rsc |= (downsize_coeff << 14) | resize_coeff;
+ }
spin_lock_irqsave(&priv->lock, flags);
- ipu_ic_write(ic, reg, ic->reg->rsc);
+ ipu_ic_write(ic, rsc, ic->reg->rsc);
/* Setup color space conversion */
ic->in_cs = in_cs;
@@ -487,6 +491,16 @@ unlock:
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
+
+int ipu_ic_task_init(struct ipu_ic *ic,
+ int in_width, int in_height,
+ int out_width, int out_height,
+ enum ipu_color_space in_cs,
+ enum ipu_color_space out_cs)
+{
+ return ipu_ic_task_init_rsc(ic, in_width, in_height, out_width,
+ out_height, in_cs, out_cs, 0);
+}
EXPORT_SYMBOL_GPL(ipu_ic_task_init);
int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index f4081962784c..13103ab86050 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -37,17 +37,36 @@
* when double_buffering boolean is set).
*
* Note that the input frame must be split up into the same number
- * of tiles as the output frame.
+ * of tiles as the output frame:
*
- * FIXME: at this point there is no attempt to deal with visible seams
- * at the tile boundaries when upscaling. The seams are caused by a reset
- * of the bilinear upscale interpolation when starting a new tile. The
- * seams are barely visible for small upscale factors, but become
- * increasingly visible as the upscale factor gets larger, since more
- * interpolated pixels get thrown out at the tile boundaries. A possilble
- * fix might be to overlap tiles of different sizes, but this must be done
- * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
- * alignment restrictions of each tile.
+ * +---------+-----+
+ * +-----+---+ | A | B |
+ * | A | B | | | |
+ * +-----+---+ --> +---------+-----+
+ * | C | D | | C | D |
+ * +-----+---+ | | |
+ * +---------+-----+
+ *
+ * Clockwise 90° rotations are handled by first rescaling into a
+ * reusable temporary tile buffer and then rotating with the 8x8
+ * block rotator, writing to the correct destination:
+ *
+ * +-----+-----+
+ * | | |
+ * +-----+---+ +---------+ | C | A |
+ * | A | B | | A,B, | | | | |
+ * +-----+---+ --> | C,D | | --> | | |
+ * | C | D | +---------+ +-----+-----+
+ * +-----+---+ | D | B |
+ * | | |
+ * +-----+-----+
+ *
+ * If the 8x8 block rotator is used, horizontal or vertical flipping
+ * is done during the rotation step, otherwise flipping is done
+ * during the scaling step.
+ * With rotation or flipping, tile order changes between input and
+ * output image. Tiles are numbered row major from top left to bottom
+ * right for both input and output image.
*/
#define MAX_STRIPES_W 4
@@ -84,6 +103,8 @@ struct ipu_image_convert_dma_chan {
struct ipu_image_tile {
u32 width;
u32 height;
+ u32 left;
+ u32 top;
/* size and strides are in bytes */
u32 size;
u32 stride;
@@ -135,6 +156,12 @@ struct ipu_image_convert_ctx {
struct ipu_image_convert_image in;
struct ipu_image_convert_image out;
enum ipu_rotate_mode rot_mode;
+ u32 downsize_coeff_h;
+ u32 downsize_coeff_v;
+ u32 image_resize_coeff_h;
+ u32 image_resize_coeff_v;
+ u32 resize_coeffs_h[MAX_STRIPES_W];
+ u32 resize_coeffs_v[MAX_STRIPES_H];
/* intermediate buffer for rotation */
struct ipu_image_convert_dma_buf rot_intermediate[2];
@@ -300,12 +327,11 @@ static void dump_format(struct ipu_image_convert_ctx *ctx,
struct ipu_image_convert_priv *priv = chan->priv;
dev_dbg(priv->ipu->dev,
- "task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
+ "task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n",
chan->ic_task, ctx,
ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
ic_image->base.pix.width, ic_image->base.pix.height,
ic_image->num_cols, ic_image->num_rows,
- ic_image->tile[0].width, ic_image->tile[0].height,
ic_image->fmt->fourcc & 0xff,
(ic_image->fmt->fourcc >> 8) & 0xff,
(ic_image->fmt->fourcc >> 16) & 0xff,
@@ -353,24 +379,459 @@ static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
static inline int num_stripes(int dim)
{
- if (dim <= 1024)
- return 1;
- else if (dim <= 2048)
+ return (dim - 1) / 1024 + 1;
+}
+
+/*
+ * Calculate downsizing coefficients, which are the same for all tiles,
+ * and bilinear resizing coefficients, which are used to find the best
+ * seam positions.
+ */
+static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image *in,
+ struct ipu_image *out)
+{
+ u32 downsized_width = in->rect.width;
+ u32 downsized_height = in->rect.height;
+ u32 downsize_coeff_v = 0;
+ u32 downsize_coeff_h = 0;
+ u32 resized_width = out->rect.width;
+ u32 resized_height = out->rect.height;
+ u32 resize_coeff_h;
+ u32 resize_coeff_v;
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ resized_width = out->rect.height;
+ resized_height = out->rect.width;
+ }
+
+ /* Do not let invalid input lead to an endless loop below */
+ if (WARN_ON(resized_width == 0 || resized_height == 0))
+ return -EINVAL;
+
+ while (downsized_width >= resized_width * 2) {
+ downsized_width >>= 1;
+ downsize_coeff_h++;
+ }
+
+ while (downsized_height >= resized_height * 2) {
+ downsized_height >>= 1;
+ downsize_coeff_v++;
+ }
+
+ /*
+ * Calculate the bilinear resizing coefficients that could be used if
+ * we were converting with a single tile. The bottom right output pixel
+ * should sample as close as possible to the bottom right input pixel
+ * out of the decimator, but not overshoot it:
+ */
+ resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
+ resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
+
+ dev_dbg(ctx->chan->priv->ipu->dev,
+ "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
+ __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
+ resize_coeff_v, ctx->in.num_cols, ctx->in.num_rows);
+
+ if (downsize_coeff_h > 2 || downsize_coeff_v > 2 ||
+ resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
+ return -EINVAL;
+
+ ctx->downsize_coeff_h = downsize_coeff_h;
+ ctx->downsize_coeff_v = downsize_coeff_v;
+ ctx->image_resize_coeff_h = resize_coeff_h;
+ ctx->image_resize_coeff_v = resize_coeff_v;
+
+ return 0;
+}
+
+#define round_closest(x, y) round_down((x) + (y)/2, (y))
+
+/*
+ * Find the best aligned seam position in the inverval [out_start, out_end].
+ * Rotation and image offsets are out of scope.
+ *
+ * @out_start: start of inverval, must be within 1024 pixels / lines
+ * of out_end
+ * @out_end: end of interval, smaller than or equal to out_edge
+ * @in_edge: input right / bottom edge
+ * @out_edge: output right / bottom edge
+ * @in_align: input alignment, either horizontal 8-byte line start address
+ * alignment, or pixel alignment due to image format
+ * @out_align: output alignment, either horizontal 8-byte line start address
+ * alignment, or pixel alignment due to image format or rotator
+ * block size
+ * @in_burst: horizontal input burst size in case of horizontal flip
+ * @out_burst: horizontal output burst size or rotator block size
+ * @downsize_coeff: downsizing section coefficient
+ * @resize_coeff: main processing section resizing coefficient
+ * @_in_seam: aligned input seam position return value
+ * @_out_seam: aligned output seam position return value
+ */
+static void find_best_seam(struct ipu_image_convert_ctx *ctx,
+ unsigned int out_start,
+ unsigned int out_end,
+ unsigned int in_edge,
+ unsigned int out_edge,
+ unsigned int in_align,
+ unsigned int out_align,
+ unsigned int in_burst,
+ unsigned int out_burst,
+ unsigned int downsize_coeff,
+ unsigned int resize_coeff,
+ u32 *_in_seam,
+ u32 *_out_seam)
+{
+ struct device *dev = ctx->chan->priv->ipu->dev;
+ unsigned int out_pos;
+ /* Input / output seam position candidates */
+ unsigned int out_seam = 0;
+ unsigned int in_seam = 0;
+ unsigned int min_diff = UINT_MAX;
+
+ /*
+ * Output tiles must start at a multiple of 8 bytes horizontally and
+ * possibly at an even line horizontally depending on the pixel format.
+ * Only consider output aligned positions for the seam.
+ */
+ out_start = round_up(out_start, out_align);
+ for (out_pos = out_start; out_pos < out_end; out_pos += out_align) {
+ unsigned int in_pos;
+ unsigned int in_pos_aligned;
+ unsigned int abs_diff;
+
+ /*
+ * Tiles in the right row / bottom column may not be allowed to
+ * overshoot horizontally / vertically. out_burst may be the
+ * actual DMA burst size, or the rotator block size.
+ */
+ if ((out_burst > 1) && (out_edge - out_pos) % out_burst)
+ continue;
+
+ /*
+ * Input sample position, corresponding to out_pos, 19.13 fixed
+ * point.
+ */
+ in_pos = (out_pos * resize_coeff) << downsize_coeff;
+ /*
+ * The closest input sample position that we could actually
+ * start the input tile at, 19.13 fixed point.
+ */
+ in_pos_aligned = round_closest(in_pos, 8192U * in_align);
+
+ if ((in_burst > 1) &&
+ (in_edge - in_pos_aligned / 8192U) % in_burst)
+ continue;
+
+ if (in_pos < in_pos_aligned)
+ abs_diff = in_pos_aligned - in_pos;
+ else
+ abs_diff = in_pos - in_pos_aligned;
+
+ if (abs_diff < min_diff) {
+ in_seam = in_pos_aligned;
+ out_seam = out_pos;
+ min_diff = abs_diff;
+ }
+ }
+
+ *_out_seam = out_seam;
+ /* Convert 19.13 fixed point to integer seam position */
+ *_in_seam = DIV_ROUND_CLOSEST(in_seam, 8192U);
+
+ dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) diff %u.%03u\n",
+ __func__, out_seam, out_align, out_start, out_end,
+ *_in_seam, in_align, min_diff / 8192,
+ DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192));
+}
+
+/*
+ * Tile left edges are required to be aligned to multiples of 8 bytes
+ * by the IDMAC.
+ */
+static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt)
+{
+ if (fmt->planar)
+ return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec;
+ else
+ return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8;
+}
+
+/*
+ * Tile top edge alignment is only limited by chroma subsampling.
+ */
+static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt)
+{
+ return fmt->uv_height_dec > 1 ? 2 : 1;
+}
+
+static inline u32 tile_width_align(enum ipu_image_convert_type type,
+ const struct ipu_image_pixfmt *fmt,
+ enum ipu_rotate_mode rot_mode)
+{
+ if (type == IMAGE_CONVERT_IN) {
+ /*
+ * The IC burst reads 8 pixels at a time. Reading beyond the
+ * end of the line is usually acceptable. Those pixels are
+ * ignored, unless the IC has to write the scaled line in
+ * reverse.
+ */
+ return (!ipu_rot_mode_is_irt(rot_mode) &&
+ (rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2;
+ }
+
+ /*
+ * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
+ * formats to guarantee 8-byte aligned line start addresses in the
+ * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
+ * for all other formats.
+ */
+ return (ipu_rot_mode_is_irt(rot_mode) &&
+ fmt->planar && !fmt->uv_packed) ?
+ 8 * fmt->uv_width_dec : 8;
+}
+
+static inline u32 tile_height_align(enum ipu_image_convert_type type,
+ const struct ipu_image_pixfmt *fmt,
+ enum ipu_rotate_mode rot_mode)
+{
+ if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode))
return 2;
+
+ /*
+ * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
+ * formats to guarantee 8-byte aligned line start addresses in the
+ * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
+ * for all other formats.
+ */
+ return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8;
+}
+
+/*
+ * Fill in left position and width and for all tiles in an input column, and
+ * for all corresponding output tiles. If the 90° rotator is used, the output
+ * tiles are in a row, and output tile top position and height are set.
+ */
+static void fill_tile_column(struct ipu_image_convert_ctx *ctx,
+ unsigned int col,
+ struct ipu_image_convert_image *in,
+ unsigned int in_left, unsigned int in_width,
+ struct ipu_image_convert_image *out,
+ unsigned int out_left, unsigned int out_width)
+{
+ unsigned int row, tile_idx;
+ struct ipu_image_tile *in_tile, *out_tile;
+
+ for (row = 0; row < in->num_rows; row++) {
+ tile_idx = in->num_cols * row + col;
+ in_tile = &in->tile[tile_idx];
+ out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
+
+ in_tile->left = in_left;
+ in_tile->width = in_width;
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ out_tile->top = out_left;
+ out_tile->height = out_width;
+ } else {
+ out_tile->left = out_left;
+ out_tile->width = out_width;
+ }
+ }
+}
+
+/*
+ * Fill in top position and height and for all tiles in an input row, and
+ * for all corresponding output tiles. If the 90° rotator is used, the output
+ * tiles are in a column, and output tile left position and width are set.
+ */
+static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row,
+ struct ipu_image_convert_image *in,
+ unsigned int in_top, unsigned int in_height,
+ struct ipu_image_convert_image *out,
+ unsigned int out_top, unsigned int out_height)
+{
+ unsigned int col, tile_idx;
+ struct ipu_image_tile *in_tile, *out_tile;
+
+ for (col = 0; col < in->num_cols; col++) {
+ tile_idx = in->num_cols * row + col;
+ in_tile = &in->tile[tile_idx];
+ out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
+
+ in_tile->top = in_top;
+ in_tile->height = in_height;
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ out_tile->left = out_top;
+ out_tile->width = out_height;
+ } else {
+ out_tile->top = out_top;
+ out_tile->height = out_height;
+ }
+ }
+}
+
+/*
+ * Find the best horizontal and vertical seam positions to split into tiles.
+ * Minimize the fractional part of the input sampling position for the
+ * top / left pixels of each tile.
+ */
+static void find_seams(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image_convert_image *in,
+ struct ipu_image_convert_image *out)
+{
+ struct device *dev = ctx->chan->priv->ipu->dev;
+ unsigned int resized_width = out->base.rect.width;
+ unsigned int resized_height = out->base.rect.height;
+ unsigned int col;
+ unsigned int row;
+ unsigned int in_left_align = tile_left_align(in->fmt);
+ unsigned int in_top_align = tile_top_align(in->fmt);
+ unsigned int out_left_align = tile_left_align(out->fmt);
+ unsigned int out_top_align = tile_top_align(out->fmt);
+ unsigned int out_width_align = tile_width_align(out->type, out->fmt,
+ ctx->rot_mode);
+ unsigned int out_height_align = tile_height_align(out->type, out->fmt,
+ ctx->rot_mode);
+ unsigned int in_right = in->base.rect.width;
+ unsigned int in_bottom = in->base.rect.height;
+ unsigned int out_right = out->base.rect.width;
+ unsigned int out_bottom = out->base.rect.height;
+ unsigned int flipped_out_left;
+ unsigned int flipped_out_top;
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ /* Switch width/height and align top left to IRT block size */
+ resized_width = out->base.rect.height;
+ resized_height = out->base.rect.width;
+ out_left_align = out_height_align;
+ out_top_align = out_width_align;
+ out_width_align = out_left_align;
+ out_height_align = out_top_align;
+ out_right = out->base.rect.height;
+ out_bottom = out->base.rect.width;
+ }
+
+ for (col = in->num_cols - 1; col > 0; col--) {
+ bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) ||
+ !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
+ bool allow_out_overshoot = (col < in->num_cols - 1) &&
+ !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
+ unsigned int out_start;
+ unsigned int out_end;
+ unsigned int in_left;
+ unsigned int out_left;
+
+ /*
+ * Align input width to burst length if the scaling step flips
+ * horizontally.
+ */
+
+ /* Start within 1024 pixels of the right edge */
+ out_start = max_t(int, 0, out_right - 1024);
+ /* End before having to add more columns to the left */
+ out_end = min_t(unsigned int, out_right, col * 1024);
+
+ find_best_seam(ctx, out_start, out_end,
+ in_right, out_right,
+ in_left_align, out_left_align,
+ allow_in_overshoot ? 1 : 8 /* burst length */,
+ allow_out_overshoot ? 1 : out_width_align,
+ ctx->downsize_coeff_h, ctx->image_resize_coeff_h,
+ &in_left, &out_left);
+
+ if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
+ flipped_out_left = resized_width - out_right;
+ else
+ flipped_out_left = out_left;
+
+ fill_tile_column(ctx, col, in, in_left, in_right - in_left,
+ out, flipped_out_left, out_right - out_left);
+
+ dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col,
+ in_left, in_right - in_left,
+ flipped_out_left, out_right - out_left);
+
+ in_right = in_left;
+ out_right = out_left;
+ }
+
+ flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ?
+ resized_width - out_right : 0;
+
+ fill_tile_column(ctx, 0, in, 0, in_right,
+ out, flipped_out_left, out_right);
+
+ dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__,
+ in_right, flipped_out_left, out_right);
+
+ for (row = in->num_rows - 1; row > 0; row--) {
+ bool allow_overshoot = row < in->num_rows - 1;
+ unsigned int out_start;
+ unsigned int out_end;
+ unsigned int in_top;
+ unsigned int out_top;
+
+ /* Start within 1024 lines of the bottom edge */
+ out_start = max_t(int, 0, out_bottom - 1024);
+ /* End before having to add more rows above */
+ out_end = min_t(unsigned int, out_bottom, row * 1024);
+
+ find_best_seam(ctx, out_start, out_end,
+ in_bottom, out_bottom,
+ in_top_align, out_top_align,
+ 1, allow_overshoot ? 1 : out_height_align,
+ ctx->downsize_coeff_v, ctx->image_resize_coeff_v,
+ &in_top, &out_top);
+
+ if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
+ ipu_rot_mode_is_irt(ctx->rot_mode))
+ flipped_out_top = resized_height - out_bottom;
+ else
+ flipped_out_top = out_top;
+
+ fill_tile_row(ctx, row, in, in_top, in_bottom - in_top,
+ out, flipped_out_top, out_bottom - out_top);
+
+ dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row,
+ in_top, in_bottom - in_top,
+ flipped_out_top, out_bottom - out_top);
+
+ in_bottom = in_top;
+ out_bottom = out_top;
+ }
+
+ if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
+ ipu_rot_mode_is_irt(ctx->rot_mode))
+ flipped_out_top = resized_height - out_bottom;
else
- return 4;
+ flipped_out_top = 0;
+
+ fill_tile_row(ctx, 0, in, 0, in_bottom,
+ out, flipped_out_top, out_bottom);
+
+ dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__,
+ in_bottom, flipped_out_top, out_bottom);
}
static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
struct ipu_image_convert_image *image)
{
- int i;
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_priv *priv = chan->priv;
+ unsigned int i;
for (i = 0; i < ctx->num_tiles; i++) {
- struct ipu_image_tile *tile = &image->tile[i];
+ struct ipu_image_tile *tile;
+ const unsigned int row = i / image->num_cols;
+ const unsigned int col = i % image->num_cols;
+
+ if (image->type == IMAGE_CONVERT_OUT)
+ tile = &image->tile[ctx->out_tile_map[i]];
+ else
+ tile = &image->tile[i];
- tile->height = image->base.pix.height / image->num_rows;
- tile->width = image->base.pix.width / image->num_cols;
tile->size = ((tile->height * image->fmt->bpp) >> 3) *
tile->width;
@@ -383,6 +844,13 @@ static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
tile->rot_stride =
(image->fmt->bpp * tile->height) >> 3;
}
+
+ dev_dbg(priv->ipu->dev,
+ "task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n",
+ chan->ic_task, ctx,
+ image->type == IMAGE_CONVERT_IN ? "Input" : "Output",
+ row, col,
+ tile->width, tile->height, tile->left, tile->top);
}
}
@@ -459,14 +927,14 @@ static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
}
}
-static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
- struct ipu_image_convert_image *image)
+static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image_convert_image *image)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
const struct ipu_image_pixfmt *fmt = image->fmt;
unsigned int row, col, tile = 0;
- u32 H, w, h, y_stride, uv_stride;
+ u32 H, top, y_stride, uv_stride;
u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
u32 y_row_off, y_col_off, y_off;
u32 y_size, uv_size;
@@ -483,13 +951,12 @@ static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
for (row = 0; row < image->num_rows; row++) {
- w = image->tile[tile].width;
- h = image->tile[tile].height;
- y_row_off = row * h * y_stride;
- uv_row_off = (row * h * uv_stride) / fmt->uv_height_dec;
+ top = image->tile[tile].top;
+ y_row_off = top * y_stride;
+ uv_row_off = (top * uv_stride) / fmt->uv_height_dec;
for (col = 0; col < image->num_cols; col++) {
- y_col_off = col * w;
+ y_col_off = image->tile[tile].left;
uv_col_off = y_col_off / fmt->uv_width_dec;
if (fmt->uv_packed)
uv_col_off *= 2;
@@ -509,24 +976,30 @@ static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
image->tile[tile].u_off = u_off;
image->tile[tile++].v_off = v_off;
- dev_dbg(priv->ipu->dev,
- "task %u: ctx %p: %s@[%d,%d]: y_off %08x, u_off %08x, v_off %08x\n",
- chan->ic_task, ctx,
- image->type == IMAGE_CONVERT_IN ?
- "Input" : "Output", row, col,
- y_off, u_off, v_off);
+ if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) {
+ dev_err(priv->ipu->dev,
+ "task %u: ctx %p: %s@[%d,%d]: "
+ "y_off %08x, u_off %08x, v_off %08x\n",
+ chan->ic_task, ctx,
+ image->type == IMAGE_CONVERT_IN ?
+ "Input" : "Output", row, col,
+ y_off, u_off, v_off);
+ return -EINVAL;
+ }
}
}
+
+ return 0;
}
-static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
- struct ipu_image_convert_image *image)
+static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image_convert_image *image)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
const struct ipu_image_pixfmt *fmt = image->fmt;
unsigned int row, col, tile = 0;
- u32 w, h, bpp, stride;
+ u32 bpp, stride, offset;
u32 row_off, col_off;
/* setup some convenience vars */
@@ -534,34 +1007,183 @@ static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
bpp = fmt->bpp;
for (row = 0; row < image->num_rows; row++) {
- w = image->tile[tile].width;
- h = image->tile[tile].height;
- row_off = row * h * stride;
+ row_off = image->tile[tile].top * stride;
for (col = 0; col < image->num_cols; col++) {
- col_off = (col * w * bpp) >> 3;
+ col_off = (image->tile[tile].left * bpp) >> 3;
+
+ offset = row_off + col_off;
- image->tile[tile].offset = row_off + col_off;
+ image->tile[tile].offset = offset;
image->tile[tile].u_off = 0;
image->tile[tile++].v_off = 0;
- dev_dbg(priv->ipu->dev,
- "task %u: ctx %p: %s@[%d,%d]: phys %08x\n",
- chan->ic_task, ctx,
- image->type == IMAGE_CONVERT_IN ?
- "Input" : "Output", row, col,
- row_off + col_off);
+ if (offset & 0x7) {
+ dev_err(priv->ipu->dev,
+ "task %u: ctx %p: %s@[%d,%d]: "
+ "phys %08x\n",
+ chan->ic_task, ctx,
+ image->type == IMAGE_CONVERT_IN ?
+ "Input" : "Output", row, col,
+ row_off + col_off);
+ return -EINVAL;
+ }
}
}
+
+ return 0;
}
-static void calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
+static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
struct ipu_image_convert_image *image)
{
if (image->fmt->planar)
- calc_tile_offsets_planar(ctx, image);
+ return calc_tile_offsets_planar(ctx, image);
+
+ return calc_tile_offsets_packed(ctx, image);
+}
+
+/*
+ * Calculate the resizing ratio for the IC main processing section given input
+ * size, fixed downsizing coefficient, and output size.
+ * Either round to closest for the next tile's first pixel to minimize seams
+ * and distortion (for all but right column / bottom row), or round down to
+ * avoid sampling beyond the edges of the input image for this tile's last
+ * pixel.
+ * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff.
+ */
+static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff,
+ u32 output_size, bool allow_overshoot)
+{
+ u32 downsized = input_size >> downsize_coeff;
+
+ if (allow_overshoot)
+ return DIV_ROUND_CLOSEST(8192 * downsized, output_size);
else
- calc_tile_offsets_packed(ctx, image);
+ return 8192 * (downsized - 1) / (output_size - 1);
+}
+
+/*
+ * Slightly modify resize coefficients per tile to hide the bilinear
+ * interpolator reset at tile borders, shifting the right / bottom edge
+ * by up to a half input pixel. This removes noticeable seams between
+ * tiles at higher upscaling factors.
+ */
+static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
+{
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_priv *priv = chan->priv;
+ struct ipu_image_tile *in_tile, *out_tile;
+ unsigned int col, row, tile_idx;
+ unsigned int last_output;
+
+ for (col = 0; col < ctx->in.num_cols; col++) {
+ bool closest = (col < ctx->in.num_cols - 1) &&
+ !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
+ u32 resized_width;
+ u32 resize_coeff_h;
+
+ tile_idx = col;
+ in_tile = &ctx->in.tile[tile_idx];
+ out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode))
+ resized_width = out_tile->height;
+ else
+ resized_width = out_tile->width;
+
+ resize_coeff_h = calc_resize_coeff(in_tile->width,
+ ctx->downsize_coeff_h,
+ resized_width, closest);
+
+ dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
+ __func__, col, resize_coeff_h);
+
+
+ for (row = 0; row < ctx->in.num_rows; row++) {
+ tile_idx = row * ctx->in.num_cols + col;
+ in_tile = &ctx->in.tile[tile_idx];
+ out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+
+ /*
+ * With the horizontal scaling factor known, round up
+ * resized width (output width or height) to burst size.
+ */
+ if (ipu_rot_mode_is_irt(ctx->rot_mode))
+ out_tile->height = round_up(resized_width, 8);
+ else
+ out_tile->width = round_up(resized_width, 8);
+
+ /*
+ * Calculate input width from the last accessed input
+ * pixel given resized width and scaling coefficients.
+ * Round up to burst size.
+ */
+ last_output = round_up(resized_width, 8) - 1;
+ if (closest)
+ last_output++;
+ in_tile->width = round_up(
+ (DIV_ROUND_UP(last_output * resize_coeff_h,
+ 8192) + 1)
+ << ctx->downsize_coeff_h, 8);
+ }
+
+ ctx->resize_coeffs_h[col] = resize_coeff_h;
+ }
+
+ for (row = 0; row < ctx->in.num_rows; row++) {
+ bool closest = (row < ctx->in.num_rows - 1) &&
+ !(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
+ u32 resized_height;
+ u32 resize_coeff_v;
+
+ tile_idx = row * ctx->in.num_cols;
+ in_tile = &ctx->in.tile[tile_idx];
+ out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode))
+ resized_height = out_tile->width;
+ else
+ resized_height = out_tile->height;
+
+ resize_coeff_v = calc_resize_coeff(in_tile->height,
+ ctx->downsize_coeff_v,
+ resized_height, closest);
+
+ dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
+ __func__, row, resize_coeff_v);
+
+ for (col = 0; col < ctx->in.num_cols; col++) {
+ tile_idx = row * ctx->in.num_cols + col;
+ in_tile = &ctx->in.tile[tile_idx];
+ out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+
+ /*
+ * With the vertical scaling factor known, round up
+ * resized height (output width or height) to IDMAC
+ * limitations.
+ */
+ if (ipu_rot_mode_is_irt(ctx->rot_mode))
+ out_tile->width = round_up(resized_height, 2);
+ else
+ out_tile->height = round_up(resized_height, 2);
+
+ /*
+ * Calculate input width from the last accessed input
+ * pixel given resized height and scaling coefficients.
+ * Align to IDMAC restrictions.
+ */
+ last_output = round_up(resized_height, 2) - 1;
+ if (closest)
+ last_output++;
+ in_tile->height = round_up(
+ (DIV_ROUND_UP(last_output * resize_coeff_v,
+ 8192) + 1)
+ << ctx->downsize_coeff_v, 2);
+ }
+
+ ctx->resize_coeffs_v[row] = resize_coeff_v;
+ }
}
/*
@@ -611,7 +1233,8 @@ static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
struct ipuv3_channel *channel,
struct ipu_image_convert_image *image,
enum ipu_rotate_mode rot_mode,
- bool rot_swap_width_height)
+ bool rot_swap_width_height,
+ unsigned int tile)
{
struct ipu_image_convert_chan *chan = ctx->chan;
unsigned int burst_size;
@@ -621,23 +1244,23 @@ static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
unsigned int tile_idx[2];
if (image->type == IMAGE_CONVERT_OUT) {
- tile_idx[0] = ctx->out_tile_map[0];
+ tile_idx[0] = ctx->out_tile_map[tile];
tile_idx[1] = ctx->out_tile_map[1];
} else {
- tile_idx[0] = 0;
+ tile_idx[0] = tile;
tile_idx[1] = 1;
}
if (rot_swap_width_height) {
- width = image->tile[0].height;
- height = image->tile[0].width;
- stride = image->tile[0].rot_stride;
+ width = image->tile[tile_idx[0]].height;
+ height = image->tile[tile_idx[0]].width;
+ stride = image->tile[tile_idx[0]].rot_stride;
addr0 = ctx->rot_intermediate[0].phys;
if (ctx->double_buffering)
addr1 = ctx->rot_intermediate[1].phys;
} else {
- width = image->tile[0].width;
- height = image->tile[0].height;
+ width = image->tile[tile_idx[0]].width;
+ height = image->tile[tile_idx[0]].height;
stride = image->stride;
addr0 = image->base.phys0 +
image->tile[tile_idx[0]].offset;
@@ -655,12 +1278,12 @@ static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
tile_image.pix.pixelformat = image->fmt->fourcc;
tile_image.phys0 = addr0;
tile_image.phys1 = addr1;
- ipu_cpmem_set_image(channel, &tile_image);
+ if (image->fmt->planar && !rot_swap_width_height) {
+ tile_image.u_offset = image->tile[tile_idx[0]].u_off;
+ tile_image.v_offset = image->tile[tile_idx[0]].v_off;
+ }
- if (image->fmt->planar && !rot_swap_width_height)
- ipu_cpmem_set_uv_offset(channel,
- image->tile[tile_idx[0]].u_off,
- image->tile[tile_idx[0]].v_off);
+ ipu_cpmem_set_image(channel, &tile_image);
if (rot_mode)
ipu_cpmem_set_rotation(channel, rot_mode);
@@ -687,7 +1310,7 @@ static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
}
-static int convert_start(struct ipu_image_convert_run *run)
+static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
{
struct ipu_image_convert_ctx *ctx = run->ctx;
struct ipu_image_convert_chan *chan = ctx->chan;
@@ -695,31 +1318,47 @@ static int convert_start(struct ipu_image_convert_run *run)
struct ipu_image_convert_image *s_image = &ctx->in;
struct ipu_image_convert_image *d_image = &ctx->out;
enum ipu_color_space src_cs, dest_cs;
+ unsigned int dst_tile = ctx->out_tile_map[tile];
unsigned int dest_width, dest_height;
+ unsigned int col, row;
+ u32 rsc;
int ret;
- dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p\n",
- __func__, chan->ic_task, ctx, run);
+ dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
+ __func__, chan->ic_task, ctx, run, tile, dst_tile);
src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
/* swap width/height for resizer */
- dest_width = d_image->tile[0].height;
- dest_height = d_image->tile[0].width;
+ dest_width = d_image->tile[dst_tile].height;
+ dest_height = d_image->tile[dst_tile].width;
} else {
- dest_width = d_image->tile[0].width;
- dest_height = d_image->tile[0].height;
+ dest_width = d_image->tile[dst_tile].width;
+ dest_height = d_image->tile[dst_tile].height;
}
+ row = tile / s_image->num_cols;
+ col = tile % s_image->num_cols;
+
+ rsc = (ctx->downsize_coeff_v << 30) |
+ (ctx->resize_coeffs_v[row] << 16) |
+ (ctx->downsize_coeff_h << 14) |
+ (ctx->resize_coeffs_h[col]);
+
+ dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n",
+ __func__, s_image->tile[tile].width,
+ s_image->tile[tile].height, dest_width, dest_height, rsc);
+
/* setup the IC resizer and CSC */
- ret = ipu_ic_task_init(chan->ic,
- s_image->tile[0].width,
- s_image->tile[0].height,
+ ret = ipu_ic_task_init_rsc(chan->ic,
+ s_image->tile[tile].width,
+ s_image->tile[tile].height,
dest_width,
dest_height,
- src_cs, dest_cs);
+ src_cs, dest_cs,
+ rsc);
if (ret) {
dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
return ret;
@@ -727,27 +1366,27 @@ static int convert_start(struct ipu_image_convert_run *run)
/* init the source MEM-->IC PP IDMAC channel */
init_idmac_channel(ctx, chan->in_chan, s_image,
- IPU_ROTATE_NONE, false);
+ IPU_ROTATE_NONE, false, tile);
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
/* init the IC PP-->MEM IDMAC channel */
init_idmac_channel(ctx, chan->out_chan, d_image,
- IPU_ROTATE_NONE, true);
+ IPU_ROTATE_NONE, true, tile);
/* init the MEM-->IC PP ROT IDMAC channel */
init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
- ctx->rot_mode, true);
+ ctx->rot_mode, true, tile);
/* init the destination IC PP ROT-->MEM IDMAC channel */
init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
- IPU_ROTATE_NONE, false);
+ IPU_ROTATE_NONE, false, tile);
/* now link IC PP-->MEM to MEM-->IC PP ROT */
ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
} else {
/* init the destination IC PP-->MEM IDMAC channel */
init_idmac_channel(ctx, chan->out_chan, d_image,
- ctx->rot_mode, false);
+ ctx->rot_mode, false, tile);
}
/* enable the IC */
@@ -805,7 +1444,7 @@ static int do_run(struct ipu_image_convert_run *run)
list_del(&run->list);
chan->current_run = run;
- return convert_start(run);
+ return convert_start(run, 0);
}
/* hold irqlock when calling */
@@ -896,7 +1535,7 @@ static irqreturn_t do_bh(int irq, void *dev_id)
dev_dbg(priv->ipu->dev,
"%s: task %u: signaling abort for ctx %p\n",
__func__, chan->ic_task, ctx);
- complete(&ctx->aborted);
+ complete_all(&ctx->aborted);
}
}
@@ -908,6 +1547,24 @@ static irqreturn_t do_bh(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
+{
+ unsigned int cur_tile = ctx->next_tile - 1;
+ unsigned int next_tile = ctx->next_tile;
+
+ if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] !=
+ ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] ||
+ ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] !=
+ ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] ||
+ ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width ||
+ ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height ||
+ ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width ||
+ ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height)
+ return true;
+
+ return false;
+}
+
/* hold irqlock when calling */
static irqreturn_t do_irq(struct ipu_image_convert_run *run)
{
@@ -951,27 +1608,32 @@ static irqreturn_t do_irq(struct ipu_image_convert_run *run)
* not done, place the next tile buffers.
*/
if (!ctx->double_buffering) {
-
- src_tile = &s_image->tile[ctx->next_tile];
- dst_idx = ctx->out_tile_map[ctx->next_tile];
- dst_tile = &d_image->tile[dst_idx];
-
- ipu_cpmem_set_buffer(chan->in_chan, 0,
- s_image->base.phys0 + src_tile->offset);
- ipu_cpmem_set_buffer(outch, 0,
- d_image->base.phys0 + dst_tile->offset);
- if (s_image->fmt->planar)
- ipu_cpmem_set_uv_offset(chan->in_chan,
- src_tile->u_off,
- src_tile->v_off);
- if (d_image->fmt->planar)
- ipu_cpmem_set_uv_offset(outch,
- dst_tile->u_off,
- dst_tile->v_off);
-
- ipu_idmac_select_buffer(chan->in_chan, 0);
- ipu_idmac_select_buffer(outch, 0);
-
+ if (ic_settings_changed(ctx)) {
+ convert_stop(run);
+ convert_start(run, ctx->next_tile);
+ } else {
+ src_tile = &s_image->tile[ctx->next_tile];
+ dst_idx = ctx->out_tile_map[ctx->next_tile];
+ dst_tile = &d_image->tile[dst_idx];
+
+ ipu_cpmem_set_buffer(chan->in_chan, 0,
+ s_image->base.phys0 +
+ src_tile->offset);
+ ipu_cpmem_set_buffer(outch, 0,
+ d_image->base.phys0 +
+ dst_tile->offset);
+ if (s_image->fmt->planar)
+ ipu_cpmem_set_uv_offset(chan->in_chan,
+ src_tile->u_off,
+ src_tile->v_off);
+ if (d_image->fmt->planar)
+ ipu_cpmem_set_uv_offset(outch,
+ dst_tile->u_off,
+ dst_tile->v_off);
+
+ ipu_idmac_select_buffer(chan->in_chan, 0);
+ ipu_idmac_select_buffer(outch, 0);
+ }
} else if (ctx->next_tile < ctx->num_tiles - 1) {
src_tile = &s_image->tile[ctx->next_tile + 1];
@@ -1198,9 +1860,6 @@ static int fill_image(struct ipu_image_convert_ctx *ctx,
else
ic_image->stride = ic_image->base.pix.bytesperline;
- calc_tile_dimensions(ctx, ic_image);
- calc_tile_offsets(ctx, ic_image);
-
return 0;
}
@@ -1221,40 +1880,11 @@ static unsigned int clamp_align(unsigned int x, unsigned int min,
return x;
}
-/*
- * We have to adjust the tile width such that the tile physaddrs and
- * U and V plane offsets are multiples of 8 bytes as required by
- * the IPU DMA Controller. For the planar formats, this corresponds
- * to a pixel alignment of 16 (but use a more formal equation since
- * the variables are available). For all the packed formats, 8 is
- * good enough.
- */
-static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
-{
- return fmt->planar ? 8 * fmt->uv_width_dec : 8;
-}
-
-/*
- * For tile height alignment, we have to ensure that the output tile
- * heights are multiples of 8 lines if the IRT is required by the
- * given rotation mode (the IRT performs rotations on 8x8 blocks
- * at a time). If the IRT is not used, or for input image tiles,
- * 2 lines are good enough.
- */
-static inline u32 tile_height_align(enum ipu_image_convert_type type,
- enum ipu_rotate_mode rot_mode)
-{
- return (type == IMAGE_CONVERT_OUT &&
- ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
-}
-
/* Adjusts input/output images to IPU restrictions */
void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
enum ipu_rotate_mode rot_mode)
{
const struct ipu_image_pixfmt *infmt, *outfmt;
- unsigned int num_in_rows, num_in_cols;
- unsigned int num_out_rows, num_out_cols;
u32 w_align, h_align;
infmt = get_format(in->pix.pixelformat);
@@ -1286,36 +1916,31 @@ void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
in->pix.height / 4);
}
- /* get tiling rows/cols from output format */
- num_out_rows = num_stripes(out->pix.height);
- num_out_cols = num_stripes(out->pix.width);
- if (ipu_rot_mode_is_irt(rot_mode)) {
- num_in_rows = num_out_cols;
- num_in_cols = num_out_rows;
- } else {
- num_in_rows = num_out_rows;
- num_in_cols = num_out_cols;
- }
-
/* align input width/height */
- w_align = ilog2(tile_width_align(infmt) * num_in_cols);
- h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
- num_in_rows);
+ w_align = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt, rot_mode));
+ h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt, rot_mode));
in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
/* align output width/height */
- w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
- h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
- num_out_rows);
+ w_align = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt, rot_mode));
+ h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt, rot_mode));
out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
/* set input/output strides and image sizes */
- in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
- in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
- out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
- out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
+ in->pix.bytesperline = infmt->planar ?
+ clamp_align(in->pix.width, 2 << w_align, MAX_W, w_align) :
+ clamp_align((in->pix.width * infmt->bpp) >> 3,
+ 2 << w_align, MAX_W, w_align);
+ in->pix.sizeimage = infmt->planar ?
+ (in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 :
+ in->pix.height * in->pix.bytesperline;
+ out->pix.bytesperline = outfmt->planar ? out->pix.width :
+ (out->pix.width * outfmt->bpp) >> 3;
+ out->pix.sizeimage = outfmt->planar ?
+ (out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 :
+ out->pix.height * out->pix.bytesperline;
}
EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
@@ -1360,6 +1985,7 @@ ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
struct ipu_image_convert_chan *chan;
struct ipu_image_convert_ctx *ctx;
unsigned long flags;
+ unsigned int i;
bool get_res;
int ret;
@@ -1412,8 +2038,26 @@ ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
if (ret)
goto out_free;
+ ret = calc_image_resize_coefficients(ctx, in, out);
+ if (ret)
+ goto out_free;
+
calc_out_tile_map(ctx);
+ find_seams(ctx, s_image, d_image);
+
+ calc_tile_dimensions(ctx, s_image);
+ ret = calc_tile_offsets(ctx, s_image);
+ if (ret)
+ goto out_free;
+
+ calc_tile_dimensions(ctx, d_image);
+ ret = calc_tile_offsets(ctx, d_image);
+ if (ret)
+ goto out_free;
+
+ calc_tile_resize_coefficients(ctx);
+
dump_format(ctx, s_image);
dump_format(ctx, d_image);
@@ -1429,21 +2073,51 @@ ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
* for every tile, and therefore would have to be updated for
* each buffer which is not possible. So double-buffering is
* impossible when either the source or destination images are
- * a planar format (YUV420, YUV422P, etc.).
+ * a planar format (YUV420, YUV422P, etc.). Further, differently
+ * sized tiles or different resizing coefficients per tile
+ * prevent double-buffering as well.
*/
ctx->double_buffering = (ctx->num_tiles > 1 &&
!s_image->fmt->planar &&
!d_image->fmt->planar);
+ for (i = 1; i < ctx->num_tiles; i++) {
+ if (ctx->in.tile[i].width != ctx->in.tile[0].width ||
+ ctx->in.tile[i].height != ctx->in.tile[0].height ||
+ ctx->out.tile[i].width != ctx->out.tile[0].width ||
+ ctx->out.tile[i].height != ctx->out.tile[0].height) {
+ ctx->double_buffering = false;
+ break;
+ }
+ }
+ for (i = 1; i < ctx->in.num_cols; i++) {
+ if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) {
+ ctx->double_buffering = false;
+ break;
+ }
+ }
+ for (i = 1; i < ctx->in.num_rows; i++) {
+ if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) {
+ ctx->double_buffering = false;
+ break;
+ }
+ }
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ unsigned long intermediate_size = d_image->tile[0].size;
+
+ for (i = 1; i < ctx->num_tiles; i++) {
+ if (d_image->tile[i].size > intermediate_size)
+ intermediate_size = d_image->tile[i].size;
+ }
+
ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
- d_image->tile[0].size);
+ intermediate_size);
if (ret)
goto out_free;
if (ctx->double_buffering) {
ret = alloc_dma_buf(priv,
&ctx->rot_intermediate[1],
- d_image->tile[0].size);
+ intermediate_size);
if (ret)
goto out_free_dmabuf0;
}
@@ -1524,16 +2198,13 @@ unlock:
EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
/* Abort any active or pending conversions for this context */
-void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
struct ipu_image_convert_run *run, *active_run, *tmp;
unsigned long flags;
int run_count, ret;
- bool need_abort;
-
- reinit_completion(&ctx->aborted);
spin_lock_irqsave(&chan->irqlock, flags);
@@ -1549,22 +2220,28 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
chan->current_run : NULL;
- need_abort = (run_count || active_run);
+ if (active_run)
+ reinit_completion(&ctx->aborted);
- ctx->aborting = need_abort;
+ ctx->aborting = true;
spin_unlock_irqrestore(&chan->irqlock, flags);
- if (!need_abort) {
+ if (!run_count && !active_run) {
dev_dbg(priv->ipu->dev,
"%s: task %u: no abort needed for ctx %p\n",
__func__, chan->ic_task, ctx);
return;
}
+ if (!active_run) {
+ empty_done_q(chan);
+ return;
+ }
+
dev_dbg(priv->ipu->dev,
- "%s: task %u: wait for completion: %d runs, active run %p\n",
- __func__, chan->ic_task, run_count, active_run);
+ "%s: task %u: wait for completion: %d runs\n",
+ __func__, chan->ic_task, run_count);
ret = wait_for_completion_timeout(&ctx->aborted,
msecs_to_jiffies(10000));
@@ -1572,7 +2249,11 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
force_abort(ctx);
}
+}
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+{
+ __ipu_image_convert_abort(ctx);
ctx->aborting = false;
}
EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
@@ -1586,7 +2267,7 @@ void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
bool put_res;
/* make sure no runs are hanging around */
- ipu_image_convert_abort(ctx);
+ __ipu_image_convert_abort(ctx);
dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
chan->ic_task, ctx);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cf2a18571d48..a132c37d7334 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -380,6 +380,9 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
mutex_unlock(&vgasr_mutex);
return -EINVAL;
}
+ /* notify if GPU has been already bound */
+ if (ops->gpu_bound)
+ ops->gpu_bound(pdev, id);
}
mutex_unlock(&vgasr_mutex);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c61b04555779..dc8e039bfab5 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -676,7 +676,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
vga_arbiter_check_bridge_sharing(vgadev);
/* Add to the list */
- list_add(&vgadev->list, &vga_list);
+ list_add_tail(&vgadev->list, &vga_list);
vga_count++;
vgaarb_info(&pdev->dev, "VGA device added: decodes=%s,owns=%s,locks=%s\n",
vga_iostate_to_str(vgadev->decodes),
@@ -1408,6 +1408,18 @@ static void __init vga_arb_select_default_device(void)
struct vga_device *vgadev;
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+ u64 base = screen_info.lfb_base;
+ u64 size = screen_info.lfb_size;
+ u64 limit;
+ resource_size_t start, end;
+ unsigned long flags;
+ int i;
+
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ base |= (u64)screen_info.ext_lfb_base << 32;
+
+ limit = base + size;
+
list_for_each_entry(vgadev, &vga_list, list) {
struct device *dev = &vgadev->pdev->dev;
/*
@@ -1418,11 +1430,6 @@ static void __init vga_arb_select_default_device(void)
* Select the device owning the boot framebuffer if there is
* one.
*/
- resource_size_t start, end, limit;
- unsigned long flags;
- int i;
-
- limit = screen_info.lfb_base + screen_info.lfb_size;
/* Does firmware framebuffer belong to us? */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -1437,7 +1444,7 @@ static void __init vga_arb_select_default_device(void)
if (!start || !end)
continue;
- if (screen_info.lfb_base < start || limit >= end)
+ if (base < start || limit >= end)
continue;
if (!vga_default_device())
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index aec253b44156..3cd7229b6e54 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -660,6 +660,20 @@ exit:
return ret;
}
+static int alps_sp_open(struct input_dev *dev)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+
+ return hid_hw_open(hid);
+}
+
+static void alps_sp_close(struct input_dev *dev)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+
+ hid_hw_close(hid);
+}
+
static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
struct alps_dev *data = hid_get_drvdata(hdev);
@@ -733,6 +747,10 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
input2->id.version = input->id.version;
input2->dev.parent = input->dev.parent;
+ input_set_drvdata(input2, hdev);
+ input2->open = alps_sp_open;
+ input2->close = alps_sp_close;
+
__set_bit(EV_KEY, input2->evbit);
data->sp_btn_cnt = (data->sp_btn_info & 0x0F);
for (i = 0; i < data->sp_btn_cnt; i++)
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index dc6d6477e961..a1fa2fc8c9b5 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -359,6 +359,9 @@ static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev)
u32 value;
int ret;
+ if (!IS_ENABLED(CONFIG_ASUS_WMI))
+ return false;
+
ret = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2,
ASUS_WMI_DEVID_KBD_BACKLIGHT, 0, &value);
hid_dbg(hdev, "WMI backlight check: rc %d value %x", ret, value);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f63489c882bb..ed35c9a9a110 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -275,6 +275,9 @@
#define USB_VENDOR_ID_CIDC 0x1677
+#define I2C_VENDOR_ID_CIRQUE 0x0488
+#define I2C_PRODUCT_ID_CIRQUE_121F 0x121F
+
#define USB_VENDOR_ID_CJTOUCH 0x24b8
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
@@ -707,6 +710,7 @@
#define USB_VENDOR_ID_LG 0x1fd2
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
#define USB_DEVICE_ID_LG_MELFAS_MT 0x6007
+#define I2C_DEVICE_ID_LG_8001 0x8001
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
@@ -805,6 +809,7 @@
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
+#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -927,6 +932,9 @@
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
+#define I2C_VENDOR_ID_RAYDIUM 0x2386
+#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33
+
#define USB_VENDOR_ID_RAZER 0x1532
#define USB_DEVICE_ID_RAZER_BLADE_14 0x011D
@@ -1040,6 +1048,7 @@
#define USB_VENDOR_ID_SYMBOL 0x05e0
#define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800
#define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
+#define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200
#define USB_VENDOR_ID_SYNAPTICS 0x06cb
#define USB_DEVICE_ID_SYNAPTICS_TP 0x0001
@@ -1201,6 +1210,8 @@
#define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
#define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72
+#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f
+#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22 0x4e22
#define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index a2f74e6adc70..d6fab5798487 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -325,6 +325,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM,
USB_DEVICE_ID_ELECOM_BM084),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL,
+ USB_DEVICE_ID_SYMBOL_SCANNER_3),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
@@ -1838,47 +1841,3 @@ void hidinput_disconnect(struct hid_device *hid)
}
EXPORT_SYMBOL_GPL(hidinput_disconnect);
-/**
- * hid_scroll_counter_handle_scroll() - Send high- and low-resolution scroll
- * events given a high-resolution wheel
- * movement.
- * @counter: a hid_scroll_counter struct describing the wheel.
- * @hi_res_value: the movement of the wheel, in the mouse's high-resolution
- * units.
- *
- * Given a high-resolution movement, this function converts the movement into
- * microns and emits high-resolution scroll events for the input device. It also
- * uses the multiplier from &struct hid_scroll_counter to emit low-resolution
- * scroll events when appropriate for backwards-compatibility with userspace
- * input libraries.
- */
-void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
- int hi_res_value)
-{
- int low_res_value, remainder, multiplier;
-
- input_report_rel(counter->dev, REL_WHEEL_HI_RES,
- hi_res_value * counter->microns_per_hi_res_unit);
-
- /*
- * Update the low-res remainder with the high-res value,
- * but reset if the direction has changed.
- */
- remainder = counter->remainder;
- if ((remainder ^ hi_res_value) < 0)
- remainder = 0;
- remainder += hi_res_value;
-
- /*
- * Then just use the resolution multiplier to see if
- * we should send a low-res (aka regular wheel) event.
- */
- multiplier = counter->resolution_multiplier;
- low_res_value = remainder / multiplier;
- remainder -= low_res_value * multiplier;
- counter->remainder = remainder;
-
- if (low_res_value)
- input_report_rel(counter->dev, REL_WHEEL, low_res_value);
-}
-EXPORT_SYMBOL_GPL(hid_scroll_counter_handle_scroll);
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index f01280898b24..19cc980eebce 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -64,14 +64,6 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_QUIRK_NO_HIDINPUT BIT(23)
#define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24)
#define HIDPP_QUIRK_UNIFYING BIT(25)
-#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26)
-#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27)
-#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28)
-
-/* Convenience constant to check for any high-res support. */
-#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
- HIDPP_QUIRK_HI_RES_SCROLL_X2120 | \
- HIDPP_QUIRK_HI_RES_SCROLL_X2121)
#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT
@@ -157,7 +149,6 @@ struct hidpp_device {
unsigned long capabilities;
struct hidpp_battery battery;
- struct hid_scroll_counter vertical_wheel_counter;
};
/* HID++ 1.0 error codes */
@@ -409,53 +400,32 @@ static void hidpp_prefix_name(char **name, int name_length)
#define HIDPP_SET_LONG_REGISTER 0x82
#define HIDPP_GET_LONG_REGISTER 0x83
-/**
- * hidpp10_set_register_bit() - Sets a single bit in a HID++ 1.0 register.
- * @hidpp_dev: the device to set the register on.
- * @register_address: the address of the register to modify.
- * @byte: the byte of the register to modify. Should be less than 3.
- * Return: 0 if successful, otherwise a negative error code.
- */
-static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
- u8 register_address, u8 byte, u8 bit)
+#define HIDPP_REG_GENERAL 0x00
+
+static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
{
struct hidpp_report response;
int ret;
u8 params[3] = { 0 };
ret = hidpp_send_rap_command_sync(hidpp_dev,
- REPORT_ID_HIDPP_SHORT,
- HIDPP_GET_REGISTER,
- register_address,
- NULL, 0, &response);
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_GET_REGISTER,
+ HIDPP_REG_GENERAL,
+ NULL, 0, &response);
if (ret)
return ret;
memcpy(params, response.rap.params, 3);
- params[byte] |= BIT(bit);
+ /* Set the battery bit */
+ params[0] |= BIT(4);
return hidpp_send_rap_command_sync(hidpp_dev,
- REPORT_ID_HIDPP_SHORT,
- HIDPP_SET_REGISTER,
- register_address,
- params, 3, &response);
-}
-
-
-#define HIDPP_REG_GENERAL 0x00
-
-static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
-{
- return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_GENERAL, 0, 4);
-}
-
-#define HIDPP_REG_FEATURES 0x01
-
-/* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */
-static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev)
-{
- return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_FEATURES, 0, 6);
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_SET_REGISTER,
+ HIDPP_REG_GENERAL,
+ params, 3, &response);
}
#define HIDPP_REG_BATTERY_STATUS 0x07
@@ -1167,100 +1137,6 @@ static int hidpp_battery_get_property(struct power_supply *psy,
}
/* -------------------------------------------------------------------------- */
-/* 0x2120: Hi-resolution scrolling */
-/* -------------------------------------------------------------------------- */
-
-#define HIDPP_PAGE_HI_RESOLUTION_SCROLLING 0x2120
-
-#define CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE 0x10
-
-static int hidpp_hrs_set_highres_scrolling_mode(struct hidpp_device *hidpp,
- bool enabled, u8 *multiplier)
-{
- u8 feature_index;
- u8 feature_type;
- int ret;
- u8 params[1];
- struct hidpp_report response;
-
- ret = hidpp_root_get_feature(hidpp,
- HIDPP_PAGE_HI_RESOLUTION_SCROLLING,
- &feature_index,
- &feature_type);
- if (ret)
- return ret;
-
- params[0] = enabled ? BIT(0) : 0;
- ret = hidpp_send_fap_command_sync(hidpp, feature_index,
- CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE,
- params, sizeof(params), &response);
- if (ret)
- return ret;
- *multiplier = response.fap.params[1];
- return 0;
-}
-
-/* -------------------------------------------------------------------------- */
-/* 0x2121: HiRes Wheel */
-/* -------------------------------------------------------------------------- */
-
-#define HIDPP_PAGE_HIRES_WHEEL 0x2121
-
-#define CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY 0x00
-#define CMD_HIRES_WHEEL_SET_WHEEL_MODE 0x20
-
-static int hidpp_hrw_get_wheel_capability(struct hidpp_device *hidpp,
- u8 *multiplier)
-{
- u8 feature_index;
- u8 feature_type;
- int ret;
- struct hidpp_report response;
-
- ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
- &feature_index, &feature_type);
- if (ret)
- goto return_default;
-
- ret = hidpp_send_fap_command_sync(hidpp, feature_index,
- CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY,
- NULL, 0, &response);
- if (ret)
- goto return_default;
-
- *multiplier = response.fap.params[0];
- return 0;
-return_default:
- hid_warn(hidpp->hid_dev,
- "Couldn't get wheel multiplier (error %d), assuming %d.\n",
- ret, *multiplier);
- return ret;
-}
-
-static int hidpp_hrw_set_wheel_mode(struct hidpp_device *hidpp, bool invert,
- bool high_resolution, bool use_hidpp)
-{
- u8 feature_index;
- u8 feature_type;
- int ret;
- u8 params[1];
- struct hidpp_report response;
-
- ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
- &feature_index, &feature_type);
- if (ret)
- return ret;
-
- params[0] = (invert ? BIT(2) : 0) |
- (high_resolution ? BIT(1) : 0) |
- (use_hidpp ? BIT(0) : 0);
-
- return hidpp_send_fap_command_sync(hidpp, feature_index,
- CMD_HIRES_WHEEL_SET_WHEEL_MODE,
- params, sizeof(params), &response);
-}
-
-/* -------------------------------------------------------------------------- */
/* 0x4301: Solar Keyboard */
/* -------------------------------------------------------------------------- */
@@ -2523,8 +2399,7 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
input_report_rel(mydata->input, REL_Y, v);
v = hid_snto32(data[6], 8);
- hid_scroll_counter_handle_scroll(
- &hidpp->vertical_wheel_counter, v);
+ input_report_rel(mydata->input, REL_WHEEL, v);
input_sync(mydata->input);
}
@@ -2653,72 +2528,6 @@ static int g920_get_config(struct hidpp_device *hidpp)
}
/* -------------------------------------------------------------------------- */
-/* High-resolution scroll wheels */
-/* -------------------------------------------------------------------------- */
-
-/**
- * struct hi_res_scroll_info - Stores info on a device's high-res scroll wheel.
- * @product_id: the HID product ID of the device being described.
- * @microns_per_hi_res_unit: the distance moved by the user's finger for each
- * high-resolution unit reported by the device, in
- * 256ths of a millimetre.
- */
-struct hi_res_scroll_info {
- __u32 product_id;
- int microns_per_hi_res_unit;
-};
-
-static struct hi_res_scroll_info hi_res_scroll_devices[] = {
- { /* Anywhere MX */
- .product_id = 0x1017, .microns_per_hi_res_unit = 445 },
- { /* Performance MX */
- .product_id = 0x101a, .microns_per_hi_res_unit = 406 },
- { /* M560 */
- .product_id = 0x402d, .microns_per_hi_res_unit = 435 },
- { /* MX Master 2S */
- .product_id = 0x4069, .microns_per_hi_res_unit = 406 },
-};
-
-static int hi_res_scroll_look_up_microns(__u32 product_id)
-{
- int i;
- int num_devices = sizeof(hi_res_scroll_devices)
- / sizeof(hi_res_scroll_devices[0]);
- for (i = 0; i < num_devices; i++) {
- if (hi_res_scroll_devices[i].product_id == product_id)
- return hi_res_scroll_devices[i].microns_per_hi_res_unit;
- }
- /* We don't have a value for this device, so use a sensible default. */
- return 406;
-}
-
-static int hi_res_scroll_enable(struct hidpp_device *hidpp)
-{
- int ret;
- u8 multiplier = 8;
-
- if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2121) {
- ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false);
- hidpp_hrw_get_wheel_capability(hidpp, &multiplier);
- } else if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2120) {
- ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true,
- &multiplier);
- } else /* if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) */
- ret = hidpp10_enable_scrolling_acceleration(hidpp);
-
- if (ret)
- return ret;
-
- hidpp->vertical_wheel_counter.resolution_multiplier = multiplier;
- hidpp->vertical_wheel_counter.microns_per_hi_res_unit =
- hi_res_scroll_look_up_microns(hidpp->hid_dev->product);
- hid_info(hidpp->hid_dev, "multiplier = %d, microns = %d\n",
- multiplier,
- hidpp->vertical_wheel_counter.microns_per_hi_res_unit);
- return 0;
-}
-
-/* -------------------------------------------------------------------------- */
/* Generic HID++ devices */
/* -------------------------------------------------------------------------- */
@@ -2763,11 +2572,6 @@ static void hidpp_populate_input(struct hidpp_device *hidpp,
wtp_populate_input(hidpp, input, origin_is_hid_core);
else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560)
m560_populate_input(hidpp, input, origin_is_hid_core);
-
- if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) {
- input_set_capability(input, EV_REL, REL_WHEEL_HI_RES);
- hidpp->vertical_wheel_counter.dev = input;
- }
}
static int hidpp_input_configured(struct hid_device *hdev,
@@ -2886,27 +2690,6 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
return 0;
}
-static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
- struct hid_usage *usage, __s32 value)
-{
- /* This function will only be called for scroll events, due to the
- * restriction imposed in hidpp_usages.
- */
- struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- struct hid_scroll_counter *counter = &hidpp->vertical_wheel_counter;
- /* A scroll event may occur before the multiplier has been retrieved or
- * the input device set, or high-res scroll enabling may fail. In such
- * cases we must return early (falling back to default behaviour) to
- * avoid a crash in hid_scroll_counter_handle_scroll.
- */
- if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
- || counter->dev == NULL || counter->resolution_multiplier == 0)
- return 0;
-
- hid_scroll_counter_handle_scroll(counter, value);
- return 1;
-}
-
static int hidpp_initialize_battery(struct hidpp_device *hidpp)
{
static atomic_t battery_no = ATOMIC_INIT(0);
@@ -3118,9 +2901,6 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
- if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
- hi_res_scroll_enable(hidpp);
-
if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
/* if the input nodes are already created, we can stop now */
return;
@@ -3306,63 +3086,35 @@ static void hidpp_remove(struct hid_device *hdev)
mutex_destroy(&hidpp->send_mutex);
}
-#define LDJ_DEVICE(product) \
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
- USB_VENDOR_ID_LOGITECH, (product))
-
static const struct hid_device_id hidpp_devices[] = {
{ /* wireless touchpad */
- LDJ_DEVICE(0x4011),
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
+ USB_VENDOR_ID_LOGITECH, 0x4011),
.driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT |
HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS },
{ /* wireless touchpad T650 */
- LDJ_DEVICE(0x4101),
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
+ USB_VENDOR_ID_LOGITECH, 0x4101),
.driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT },
{ /* wireless touchpad T651 */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_T651),
.driver_data = HIDPP_QUIRK_CLASS_WTP },
- { /* Mouse Logitech Anywhere MX */
- LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
- { /* Mouse Logitech Cube */
- LDJ_DEVICE(0x4010), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
- { /* Mouse Logitech M335 */
- LDJ_DEVICE(0x4050), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech M515 */
- LDJ_DEVICE(0x4007), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
{ /* Mouse logitech M560 */
- LDJ_DEVICE(0x402d),
- .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560
- | HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
- { /* Mouse Logitech M705 (firmware RQM17) */
- LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
- { /* Mouse Logitech M705 (firmware RQM67) */
- LDJ_DEVICE(0x406d), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech M720 */
- LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Anywhere 2 */
- LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Anywhere 2S */
- LDJ_DEVICE(0x406a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Master */
- LDJ_DEVICE(0x4041), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0x4060), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Master 2S */
- LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech Performance MX */
- LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
+ USB_VENDOR_ID_LOGITECH, 0x402d),
+ .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
{ /* Keyboard logitech K400 */
- LDJ_DEVICE(0x4024),
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
+ USB_VENDOR_ID_LOGITECH, 0x4024),
.driver_data = HIDPP_QUIRK_CLASS_K400 },
{ /* Solar Keyboard Logitech K750 */
- LDJ_DEVICE(0x4002),
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
+ USB_VENDOR_ID_LOGITECH, 0x4002),
.driver_data = HIDPP_QUIRK_CLASS_K750 },
- { LDJ_DEVICE(HID_ANY_ID) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
+ USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
.driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
@@ -3371,19 +3123,12 @@ static const struct hid_device_id hidpp_devices[] = {
MODULE_DEVICE_TABLE(hid, hidpp_devices);
-static const struct hid_usage_id hidpp_usages[] = {
- { HID_GD_WHEEL, EV_REL, REL_WHEEL },
- { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
-};
-
static struct hid_driver hidpp_driver = {
.name = "logitech-hidpp-device",
.id_table = hidpp_devices,
.probe = hidpp_probe,
.remove = hidpp_remove,
.raw_event = hidpp_raw_event,
- .usage_table = hidpp_usages,
- .event = hidpp_event,
.input_configured = hidpp_input_configured,
.input_mapping = hidpp_input_mapping,
.input_mapped = hidpp_input_mapped,
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f7c6de2b6730..dca0a3a90fb8 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1814,6 +1814,12 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
+ /* Cirque devices */
+ { .driver_data = MT_CLS_WIN_8_DUAL,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ I2C_VENDOR_ID_CIRQUE,
+ I2C_PRODUCT_ID_CIRQUE_121F) },
+
/* CJTouch panels */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 52c3b01917e7..c85a79986b6a 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -107,7 +107,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
@@ -130,6 +130,8 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 0422ec2b13d2..dc4128bfe2ca 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -23,8 +23,9 @@
* In order to avoid breaking them this driver creates a layered hidraw device,
* so it can detect when the client is running and then:
* - it will not send any command to the controller.
- * - this input device will be disabled, to avoid double input of the same
+ * - this input device will be removed, to avoid double input of the same
* user action.
+ * When the client is closed, this input device will be created again.
*
* For additional functions, such as changing the right-pad margin or switching
* the led, you can use the user-space tool at:
@@ -113,7 +114,7 @@ struct steam_device {
spinlock_t lock;
struct hid_device *hdev, *client_hdev;
struct mutex mutex;
- bool client_opened, input_opened;
+ bool client_opened;
struct input_dev __rcu *input;
unsigned long quirks;
struct work_struct work_connect;
@@ -279,18 +280,6 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
}
}
-static void steam_update_lizard_mode(struct steam_device *steam)
-{
- mutex_lock(&steam->mutex);
- if (!steam->client_opened) {
- if (steam->input_opened)
- steam_set_lizard_mode(steam, false);
- else
- steam_set_lizard_mode(steam, lizard_mode);
- }
- mutex_unlock(&steam->mutex);
-}
-
static int steam_input_open(struct input_dev *dev)
{
struct steam_device *steam = input_get_drvdata(dev);
@@ -301,7 +290,6 @@ static int steam_input_open(struct input_dev *dev)
return ret;
mutex_lock(&steam->mutex);
- steam->input_opened = true;
if (!steam->client_opened && lizard_mode)
steam_set_lizard_mode(steam, false);
mutex_unlock(&steam->mutex);
@@ -313,7 +301,6 @@ static void steam_input_close(struct input_dev *dev)
struct steam_device *steam = input_get_drvdata(dev);
mutex_lock(&steam->mutex);
- steam->input_opened = false;
if (!steam->client_opened && lizard_mode)
steam_set_lizard_mode(steam, true);
mutex_unlock(&steam->mutex);
@@ -400,7 +387,7 @@ static int steam_battery_register(struct steam_device *steam)
return 0;
}
-static int steam_register(struct steam_device *steam)
+static int steam_input_register(struct steam_device *steam)
{
struct hid_device *hdev = steam->hdev;
struct input_dev *input;
@@ -414,17 +401,6 @@ static int steam_register(struct steam_device *steam)
return 0;
}
- /*
- * Unlikely, but getting the serial could fail, and it is not so
- * important, so make up a serial number and go on.
- */
- if (steam_get_serial(steam) < 0)
- strlcpy(steam->serial_no, "XXXXXXXXXX",
- sizeof(steam->serial_no));
-
- hid_info(hdev, "Steam Controller '%s' connected",
- steam->serial_no);
-
input = input_allocate_device();
if (!input)
return -ENOMEM;
@@ -492,11 +468,6 @@ static int steam_register(struct steam_device *steam)
goto input_register_fail;
rcu_assign_pointer(steam->input, input);
-
- /* ignore battery errors, we can live without it */
- if (steam->quirks & STEAM_QUIRK_WIRELESS)
- steam_battery_register(steam);
-
return 0;
input_register_fail:
@@ -504,27 +475,88 @@ input_register_fail:
return ret;
}
-static void steam_unregister(struct steam_device *steam)
+static void steam_input_unregister(struct steam_device *steam)
{
struct input_dev *input;
+ rcu_read_lock();
+ input = rcu_dereference(steam->input);
+ rcu_read_unlock();
+ if (!input)
+ return;
+ RCU_INIT_POINTER(steam->input, NULL);
+ synchronize_rcu();
+ input_unregister_device(input);
+}
+
+static void steam_battery_unregister(struct steam_device *steam)
+{
struct power_supply *battery;
rcu_read_lock();
- input = rcu_dereference(steam->input);
battery = rcu_dereference(steam->battery);
rcu_read_unlock();
- if (battery) {
- RCU_INIT_POINTER(steam->battery, NULL);
- synchronize_rcu();
- power_supply_unregister(battery);
+ if (!battery)
+ return;
+ RCU_INIT_POINTER(steam->battery, NULL);
+ synchronize_rcu();
+ power_supply_unregister(battery);
+}
+
+static int steam_register(struct steam_device *steam)
+{
+ int ret;
+
+ /*
+ * This function can be called several times in a row with the
+ * wireless adaptor, without steam_unregister() between them, because
+ * another client send a get_connection_status command, for example.
+ * The battery and serial number are set just once per device.
+ */
+ if (!steam->serial_no[0]) {
+ /*
+ * Unlikely, but getting the serial could fail, and it is not so
+ * important, so make up a serial number and go on.
+ */
+ if (steam_get_serial(steam) < 0)
+ strlcpy(steam->serial_no, "XXXXXXXXXX",
+ sizeof(steam->serial_no));
+
+ hid_info(steam->hdev, "Steam Controller '%s' connected",
+ steam->serial_no);
+
+ /* ignore battery errors, we can live without it */
+ if (steam->quirks & STEAM_QUIRK_WIRELESS)
+ steam_battery_register(steam);
+
+ mutex_lock(&steam_devices_lock);
+ list_add(&steam->list, &steam_devices);
+ mutex_unlock(&steam_devices_lock);
}
- if (input) {
- RCU_INIT_POINTER(steam->input, NULL);
- synchronize_rcu();
+
+ mutex_lock(&steam->mutex);
+ if (!steam->client_opened) {
+ steam_set_lizard_mode(steam, lizard_mode);
+ ret = steam_input_register(steam);
+ } else {
+ ret = 0;
+ }
+ mutex_unlock(&steam->mutex);
+
+ return ret;
+}
+
+static void steam_unregister(struct steam_device *steam)
+{
+ steam_battery_unregister(steam);
+ steam_input_unregister(steam);
+ if (steam->serial_no[0]) {
hid_info(steam->hdev, "Steam Controller '%s' disconnected",
steam->serial_no);
- input_unregister_device(input);
+ mutex_lock(&steam_devices_lock);
+ list_del(&steam->list);
+ mutex_unlock(&steam_devices_lock);
+ steam->serial_no[0] = 0;
}
}
@@ -600,6 +632,9 @@ static int steam_client_ll_open(struct hid_device *hdev)
mutex_lock(&steam->mutex);
steam->client_opened = true;
mutex_unlock(&steam->mutex);
+
+ steam_input_unregister(steam);
+
return ret;
}
@@ -609,13 +644,13 @@ static void steam_client_ll_close(struct hid_device *hdev)
mutex_lock(&steam->mutex);
steam->client_opened = false;
- if (steam->input_opened)
- steam_set_lizard_mode(steam, false);
- else
- steam_set_lizard_mode(steam, lizard_mode);
mutex_unlock(&steam->mutex);
hid_hw_close(steam->hdev);
+ if (steam->connected) {
+ steam_set_lizard_mode(steam, lizard_mode);
+ steam_input_register(steam);
+ }
}
static int steam_client_ll_raw_request(struct hid_device *hdev,
@@ -744,11 +779,6 @@ static int steam_probe(struct hid_device *hdev,
}
}
- mutex_lock(&steam_devices_lock);
- steam_update_lizard_mode(steam);
- list_add(&steam->list, &steam_devices);
- mutex_unlock(&steam_devices_lock);
-
return 0;
hid_hw_open_fail:
@@ -774,10 +804,6 @@ static void steam_remove(struct hid_device *hdev)
return;
}
- mutex_lock(&steam_devices_lock);
- list_del(&steam->list);
- mutex_unlock(&steam_devices_lock);
-
hid_destroy_device(steam->client_hdev);
steam->client_opened = false;
cancel_work_sync(&steam->work_connect);
@@ -792,12 +818,14 @@ static void steam_remove(struct hid_device *hdev)
static void steam_do_connect_event(struct steam_device *steam, bool connected)
{
unsigned long flags;
+ bool changed;
spin_lock_irqsave(&steam->lock, flags);
+ changed = steam->connected != connected;
steam->connected = connected;
spin_unlock_irqrestore(&steam->lock, flags);
- if (schedule_work(&steam->work_connect) == 0)
+ if (changed && schedule_work(&steam->work_connect) == 0)
dbg_hid("%s: connected=%d event already queued\n",
__func__, connected);
}
@@ -1019,13 +1047,8 @@ static int steam_raw_event(struct hid_device *hdev,
return 0;
rcu_read_lock();
input = rcu_dereference(steam->input);
- if (likely(input)) {
+ if (likely(input))
steam_do_input_event(steam, input, data);
- } else {
- dbg_hid("%s: input data without connect event\n",
- __func__);
- steam_do_connect_event(steam, true);
- }
rcu_read_unlock();
break;
case STEAM_EV_CONNECT:
@@ -1074,7 +1097,10 @@ static int steam_param_set_lizard_mode(const char *val,
mutex_lock(&steam_devices_lock);
list_for_each_entry(steam, &steam_devices, list) {
- steam_update_lizard_mode(steam);
+ mutex_lock(&steam->mutex);
+ if (!steam->client_opened)
+ steam_set_lizard_mode(steam, lizard_mode);
+ mutex_unlock(&steam->mutex);
}
mutex_unlock(&steam_devices_lock);
return 0;
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 4aab96cf0818..8555ce7e737b 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -49,6 +49,7 @@
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
+#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3)
/* flags */
#define I2C_HID_STARTED 0
@@ -158,6 +159,8 @@ struct i2c_hid {
bool irq_wake_enabled;
struct mutex reset_lock;
+
+ unsigned long sleep_delay;
};
static const struct i2c_hid_quirks {
@@ -172,6 +175,10 @@ static const struct i2c_hid_quirks {
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
I2C_HID_QUIRK_NO_RUNTIME_PM },
+ { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
+ I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
+ { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
+ I2C_HID_QUIRK_NO_RUNTIME_PM },
{ 0, 0 }
};
@@ -387,6 +394,7 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
int ret;
+ unsigned long now, delay;
i2c_hid_dbg(ihid, "%s\n", __func__);
@@ -404,9 +412,22 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
goto set_pwr_exit;
}
+ if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
+ power_state == I2C_HID_PWR_ON) {
+ now = jiffies;
+ if (time_after(ihid->sleep_delay, now)) {
+ delay = jiffies_to_usecs(ihid->sleep_delay - now);
+ usleep_range(delay, delay + 1);
+ }
+ }
+
ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
0, NULL, 0, NULL, 0);
+ if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
+ power_state == I2C_HID_PWR_SLEEP)
+ ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
+
if (ret)
dev_err(&client->dev, "failed to change power setting.\n");
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index cac262a912c1..89f2976f9c53 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -331,6 +331,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
.driver_data = (void *)&sipodev_desc
},
{
+ .ident = "Direkt-Tek DTLAPY133-1",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY133-1"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
.ident = "Mediacom Flexbook Edge 11",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 3c5507313606..840634e0f1e3 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -12,6 +12,7 @@
#include <linux/atomic.h>
#include <linux/compat.h>
+#include <linux/cred.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/hid.h>
@@ -496,12 +497,13 @@ static int uhid_dev_create2(struct uhid_device *uhid,
goto err_free;
}
- len = min(sizeof(hid->name), sizeof(ev->u.create2.name));
- strlcpy(hid->name, ev->u.create2.name, len);
- len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys));
- strlcpy(hid->phys, ev->u.create2.phys, len);
- len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq));
- strlcpy(hid->uniq, ev->u.create2.uniq, len);
+ /* @hid is zero-initialized, strncpy() is correct, strlcpy() not */
+ len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
+ strncpy(hid->name, ev->u.create2.name, len);
+ len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
+ strncpy(hid->phys, ev->u.create2.phys, len);
+ len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
+ strncpy(hid->uniq, ev->u.create2.uniq, len);
hid->ll_driver = &uhid_hid_driver;
hid->bus = ev->u.create2.bus;
@@ -722,6 +724,17 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
switch (uhid->input_buf.type) {
case UHID_CREATE:
+ /*
+ * 'struct uhid_create_req' contains a __user pointer which is
+ * copied from, so it's unsafe to allow this with elevated
+ * privileges (e.g. from a setuid binary) or via kernel_write().
+ */
+ if (file->f_cred != current_cred() || uaccess_kernel()) {
+ pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
+ task_tgid_vnr(current), current->comm);
+ ret = -EACCES;
+ goto unlock;
+ }
ret = uhid_dev_create(uhid, &uhid->input_buf);
break;
case UHID_CREATE2:
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 23872d08308c..a746017fac17 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -512,14 +512,24 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
if (cmd == HIDIOCGCOLLECTIONINDEX) {
if (uref->usage_index >= field->maxusage)
goto inval;
+ uref->usage_index =
+ array_index_nospec(uref->usage_index,
+ field->maxusage);
} else if (uref->usage_index >= field->report_count)
goto inval;
}
- if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
- uref->usage_index + uref_multi->num_values > field->report_count))
- goto inval;
+ if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
+ if (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+ uref->usage_index + uref_multi->num_values >
+ field->report_count)
+ goto inval;
+
+ uref->usage_index =
+ array_index_nospec(uref->usage_index,
+ field->report_count -
+ uref_multi->num_values);
+ }
switch (cmd) {
case HIDIOCGUSAGE:
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index a7513a8a8e37..d6106e1a0d4a 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -353,6 +353,9 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
+ /* fallthrough */
+
+ case KVP_OP_GET_IP_INFO:
utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
MAX_ADAPTER_ID_SIZE,
UTF16_LITTLE_ENDIAN,
@@ -405,7 +408,11 @@ kvp_send_key(struct work_struct *dummy)
process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
break;
case KVP_OP_GET_IP_INFO:
- /* We only need to pass on message->kvp_hdr.operation. */
+ /*
+ * We only need to pass on the info of operation, adapter_id
+ * and addr_family to the userland kvp daemon.
+ */
+ process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
break;
case KVP_OP_SET:
switch (in_msg->body.kvp_set.data.value_type) {
@@ -446,9 +453,9 @@ kvp_send_key(struct work_struct *dummy)
}
- break;
-
- case KVP_OP_GET:
+ /*
+ * The key is always a string - utf16 encoding.
+ */
message->body.kvp_set.data.key_size =
utf16s_to_utf8s(
(wchar_t *)in_msg->body.kvp_set.data.key,
@@ -456,6 +463,17 @@ kvp_send_key(struct work_struct *dummy)
UTF16_LITTLE_ENDIAN,
message->body.kvp_set.data.key,
HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
+
+ break;
+
+ case KVP_OP_GET:
+ message->body.kvp_get.data.key_size =
+ utf16s_to_utf8s(
+ (wchar_t *)in_msg->body.kvp_get.data.key,
+ in_msg->body.kvp_get.data.key_size,
+ UTF16_LITTLE_ENDIAN,
+ message->body.kvp_get.data.key,
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
break;
case KVP_OP_DELETE:
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 975c95169884..84f61cec6319 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -649,8 +649,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (info[i]->config[j] & HWMON_T_INPUT) {
err = hwmon_thermal_add_sensor(dev,
hwdev, j);
- if (err)
- goto free_device;
+ if (err) {
+ device_unregister(hdev);
+ goto ida_remove;
+ }
}
}
}
@@ -658,8 +660,6 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
return hdev;
-free_device:
- device_unregister(hdev);
free_hwmon:
kfree(hwdev);
ida_remove:
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index 0ccca87f5271..293dd1c6c7b3 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -181,7 +181,7 @@ static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%s\n", sdata->label);
}
-static int __init get_logical_cpu(int hwcpu)
+static int get_logical_cpu(int hwcpu)
{
int cpu;
@@ -192,9 +192,8 @@ static int __init get_logical_cpu(int hwcpu)
return -ENOENT;
}
-static void __init make_sensor_label(struct device_node *np,
- struct sensor_data *sdata,
- const char *label)
+static void make_sensor_label(struct device_node *np,
+ struct sensor_data *sdata, const char *label)
{
u32 id;
size_t n;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 56ccb1ea7da5..f2c681971201 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -224,6 +224,15 @@ config I2C_NFORCE2_S4985
This driver can also be built as a module. If so, the module
will be called i2c-nforce2-s4985.
+config I2C_NVIDIA_GPU
+ tristate "NVIDIA GPU I2C controller"
+ depends on PCI
+ help
+ If you say yes to this option, support will be included for the
+ NVIDIA GPU I2C controller which is used to communicate with the GPU's
+ Type-C controller. This driver can also be built as a module called
+ i2c-nvidia-gpu.
+
config I2C_SIS5595
tristate "SiS 5595"
depends on PCI
@@ -752,7 +761,7 @@ config I2C_OCORES
config I2C_OMAP
tristate "OMAP I2C adapter"
- depends on ARCH_OMAP
+ depends on ARCH_OMAP || ARCH_K3
default y if MACH_OMAP_H3 || MACH_OMAP_OSK
help
If you say yes to this option, support will be included for the
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 18b26af82b1c..5f0cb6915969 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_I2C_ISCH) += i2c-isch.o
obj-$(CONFIG_I2C_ISMT) += i2c-ismt.o
obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
obj-$(CONFIG_I2C_NFORCE2_S4985) += i2c-nforce2-s4985.o
+obj-$(CONFIG_I2C_NVIDIA_GPU) += i2c-nvidia-gpu.o
obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o
obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o
obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
new file mode 100644
index 000000000000..8822357bca0c
--- /dev/null
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nvidia GPU I2C controller Driver
+ *
+ * Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
+ * Author: Ajay Gupta <ajayg@nvidia.com>
+ */
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/unaligned.h>
+
+/* I2C definitions */
+#define I2C_MST_CNTL 0x00
+#define I2C_MST_CNTL_GEN_START BIT(0)
+#define I2C_MST_CNTL_GEN_STOP BIT(1)
+#define I2C_MST_CNTL_CMD_READ (1 << 2)
+#define I2C_MST_CNTL_CMD_WRITE (2 << 2)
+#define I2C_MST_CNTL_BURST_SIZE_SHIFT 6
+#define I2C_MST_CNTL_GEN_NACK BIT(28)
+#define I2C_MST_CNTL_STATUS GENMASK(30, 29)
+#define I2C_MST_CNTL_STATUS_OKAY (0 << 29)
+#define I2C_MST_CNTL_STATUS_NO_ACK (1 << 29)
+#define I2C_MST_CNTL_STATUS_TIMEOUT (2 << 29)
+#define I2C_MST_CNTL_STATUS_BUS_BUSY (3 << 29)
+#define I2C_MST_CNTL_CYCLE_TRIGGER BIT(31)
+
+#define I2C_MST_ADDR 0x04
+
+#define I2C_MST_I2C0_TIMING 0x08
+#define I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ 0x10e
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT 16
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX 255
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CHECK BIT(24)
+
+#define I2C_MST_DATA 0x0c
+
+#define I2C_MST_HYBRID_PADCTL 0x20
+#define I2C_MST_HYBRID_PADCTL_MODE_I2C BIT(0)
+#define I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV BIT(14)
+#define I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV BIT(15)
+
+struct gpu_i2c_dev {
+ struct device *dev;
+ void __iomem *regs;
+ struct i2c_adapter adapter;
+ struct i2c_board_info *gpu_ccgx_ucsi;
+};
+
+static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd)
+{
+ u32 val;
+
+ /* enable I2C */
+ val = readl(i2cd->regs + I2C_MST_HYBRID_PADCTL);
+ val |= I2C_MST_HYBRID_PADCTL_MODE_I2C |
+ I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+ I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV;
+ writel(val, i2cd->regs + I2C_MST_HYBRID_PADCTL);
+
+ /* enable 100KHZ mode */
+ val = I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ;
+ val |= (I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX
+ << I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT);
+ val |= I2C_MST_I2C0_TIMING_TIMEOUT_CHECK;
+ writel(val, i2cd->regs + I2C_MST_I2C0_TIMING);
+}
+
+static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
+{
+ unsigned long target = jiffies + msecs_to_jiffies(1000);
+ u32 val;
+
+ do {
+ val = readl(i2cd->regs + I2C_MST_CNTL);
+ if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER))
+ break;
+ if ((val & I2C_MST_CNTL_STATUS) !=
+ I2C_MST_CNTL_STATUS_BUS_BUSY)
+ break;
+ usleep_range(500, 600);
+ } while (time_is_after_jiffies(target));
+
+ if (time_is_before_jiffies(target)) {
+ dev_err(i2cd->dev, "i2c timeout error %x\n", val);
+ return -ETIME;
+ }
+
+ val = readl(i2cd->regs + I2C_MST_CNTL);
+ switch (val & I2C_MST_CNTL_STATUS) {
+ case I2C_MST_CNTL_STATUS_OKAY:
+ return 0;
+ case I2C_MST_CNTL_STATUS_NO_ACK:
+ return -EIO;
+ case I2C_MST_CNTL_STATUS_TIMEOUT:
+ return -ETIME;
+ default:
+ return 0;
+ }
+}
+
+static int gpu_i2c_read(struct gpu_i2c_dev *i2cd, u8 *data, u16 len)
+{
+ int status;
+ u32 val;
+
+ val = I2C_MST_CNTL_GEN_START | I2C_MST_CNTL_CMD_READ |
+ (len << I2C_MST_CNTL_BURST_SIZE_SHIFT) |
+ I2C_MST_CNTL_CYCLE_TRIGGER | I2C_MST_CNTL_GEN_NACK;
+ writel(val, i2cd->regs + I2C_MST_CNTL);
+
+ status = gpu_i2c_check_status(i2cd);
+ if (status < 0)
+ return status;
+
+ val = readl(i2cd->regs + I2C_MST_DATA);
+ switch (len) {
+ case 1:
+ data[0] = val;
+ break;
+ case 2:
+ put_unaligned_be16(val, data);
+ break;
+ case 3:
+ put_unaligned_be16(val >> 8, data);
+ data[2] = val;
+ break;
+ case 4:
+ put_unaligned_be32(val, data);
+ break;
+ default:
+ break;
+ }
+ return status;
+}
+
+static int gpu_i2c_start(struct gpu_i2c_dev *i2cd)
+{
+ writel(I2C_MST_CNTL_GEN_START, i2cd->regs + I2C_MST_CNTL);
+ return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_stop(struct gpu_i2c_dev *i2cd)
+{
+ writel(I2C_MST_CNTL_GEN_STOP, i2cd->regs + I2C_MST_CNTL);
+ return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_write(struct gpu_i2c_dev *i2cd, u8 data)
+{
+ u32 val;
+
+ writel(data, i2cd->regs + I2C_MST_DATA);
+
+ val = I2C_MST_CNTL_CMD_WRITE | (1 << I2C_MST_CNTL_BURST_SIZE_SHIFT);
+ writel(val, i2cd->regs + I2C_MST_CNTL);
+
+ return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct gpu_i2c_dev *i2cd = i2c_get_adapdata(adap);
+ int status, status2;
+ int i, j;
+
+ /*
+ * The controller supports maximum 4 byte read due to known
+ * limitation of sending STOP after every read.
+ */
+ for (i = 0; i < num; i++) {
+ if (msgs[i].flags & I2C_M_RD) {
+ /* program client address before starting read */
+ writel(msgs[i].addr, i2cd->regs + I2C_MST_ADDR);
+ /* gpu_i2c_read has implicit start */
+ status = gpu_i2c_read(i2cd, msgs[i].buf, msgs[i].len);
+ if (status < 0)
+ goto stop;
+ } else {
+ u8 addr = i2c_8bit_addr_from_msg(msgs + i);
+
+ status = gpu_i2c_start(i2cd);
+ if (status < 0) {
+ if (i == 0)
+ return status;
+ goto stop;
+ }
+
+ status = gpu_i2c_write(i2cd, addr);
+ if (status < 0)
+ goto stop;
+
+ for (j = 0; j < msgs[i].len; j++) {
+ status = gpu_i2c_write(i2cd, msgs[i].buf[j]);
+ if (status < 0)
+ goto stop;
+ }
+ }
+ }
+ status = gpu_i2c_stop(i2cd);
+ if (status < 0)
+ return status;
+
+ return i;
+stop:
+ status2 = gpu_i2c_stop(i2cd);
+ if (status2 < 0)
+ dev_err(i2cd->dev, "i2c stop failed %d\n", status2);
+ return status;
+}
+
+static const struct i2c_adapter_quirks gpu_i2c_quirks = {
+ .max_read_len = 4,
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+};
+
+static u32 gpu_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm gpu_i2c_algorithm = {
+ .master_xfer = gpu_i2c_master_xfer,
+ .functionality = gpu_i2c_functionality,
+};
+
+/*
+ * This driver is for Nvidia GPU cards with USB Type-C interface.
+ * We want to identify the cards using vendor ID and class code only
+ * to avoid dependency of adding product id for any new card which
+ * requires this driver.
+ * Currently there is no class code defined for UCSI device over PCI
+ * so using UNKNOWN class for now and it will be updated when UCSI
+ * over PCI gets a class code.
+ * There is no other NVIDIA cards with UNKNOWN class code. Even if the
+ * driver gets loaded for an undesired card then eventually i2c_read()
+ * (initiated from UCSI i2c_client) will timeout or UCSI commands will
+ * timeout.
+ */
+#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80
+static const struct pci_device_id gpu_i2c_ids[] = {
+ { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_UNKNOWN << 8, 0xffffff00},
+ { }
+};
+MODULE_DEVICE_TABLE(pci, gpu_i2c_ids);
+
+static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
+{
+ struct i2c_client *ccgx_client;
+
+ i2cd->gpu_ccgx_ucsi = devm_kzalloc(i2cd->dev,
+ sizeof(*i2cd->gpu_ccgx_ucsi),
+ GFP_KERNEL);
+ if (!i2cd->gpu_ccgx_ucsi)
+ return -ENOMEM;
+
+ strlcpy(i2cd->gpu_ccgx_ucsi->type, "ccgx-ucsi",
+ sizeof(i2cd->gpu_ccgx_ucsi->type));
+ i2cd->gpu_ccgx_ucsi->addr = 0x8;
+ i2cd->gpu_ccgx_ucsi->irq = irq;
+ ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
+ if (!ccgx_client)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct gpu_i2c_dev *i2cd;
+ int status;
+
+ i2cd = devm_kzalloc(&pdev->dev, sizeof(*i2cd), GFP_KERNEL);
+ if (!i2cd)
+ return -ENOMEM;
+
+ i2cd->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, i2cd);
+
+ status = pcim_enable_device(pdev);
+ if (status < 0) {
+ dev_err(&pdev->dev, "pcim_enable_device failed %d\n", status);
+ return status;
+ }
+
+ pci_set_master(pdev);
+
+ i2cd->regs = pcim_iomap(pdev, 0, 0);
+ if (!i2cd->regs) {
+ dev_err(&pdev->dev, "pcim_iomap failed\n");
+ return -ENOMEM;
+ }
+
+ status = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (status < 0) {
+ dev_err(&pdev->dev, "pci_alloc_irq_vectors err %d\n", status);
+ return status;
+ }
+
+ gpu_enable_i2c_bus(i2cd);
+
+ i2c_set_adapdata(&i2cd->adapter, i2cd);
+ i2cd->adapter.owner = THIS_MODULE;
+ strlcpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
+ sizeof(i2cd->adapter.name));
+ i2cd->adapter.algo = &gpu_i2c_algorithm;
+ i2cd->adapter.quirks = &gpu_i2c_quirks;
+ i2cd->adapter.dev.parent = &pdev->dev;
+ status = i2c_add_adapter(&i2cd->adapter);
+ if (status < 0)
+ goto free_irq_vectors;
+
+ status = gpu_populate_client(i2cd, pdev->irq);
+ if (status < 0) {
+ dev_err(&pdev->dev, "gpu_populate_client failed %d\n", status);
+ goto del_adapter;
+ }
+
+ return 0;
+
+del_adapter:
+ i2c_del_adapter(&i2cd->adapter);
+free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+ return status;
+}
+
+static void gpu_i2c_remove(struct pci_dev *pdev)
+{
+ struct gpu_i2c_dev *i2cd = dev_get_drvdata(&pdev->dev);
+
+ i2c_del_adapter(&i2cd->adapter);
+ pci_free_irq_vectors(pdev);
+}
+
+static int gpu_i2c_resume(struct device *dev)
+{
+ struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev);
+
+ gpu_enable_i2c_bus(i2cd);
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(gpu_i2c_driver_pm, NULL, gpu_i2c_resume, NULL);
+
+static struct pci_driver gpu_i2c_driver = {
+ .name = "nvidia-gpu",
+ .id_table = gpu_i2c_ids,
+ .probe = gpu_i2c_probe,
+ .remove = gpu_i2c_remove,
+ .driver = {
+ .pm = &gpu_i2c_driver_pm,
+ },
+};
+
+module_pci_driver(gpu_i2c_driver);
+
+MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
+MODULE_DESCRIPTION("Nvidia GPU I2C controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 527f55c8c4c7..db075bc0d952 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -571,18 +571,19 @@ static int geni_i2c_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
- ret = i2c_add_adapter(&gi2c->adap);
- if (ret) {
- dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
- return ret;
- }
-
gi2c->suspended = 1;
pm_runtime_set_suspended(gi2c->se.dev);
pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
pm_runtime_use_autosuspend(gi2c->se.dev);
pm_runtime_enable(gi2c->se.dev);
+ ret = i2c_add_adapter(&gi2c->adap);
+ if (ret) {
+ dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
+ pm_runtime_disable(gi2c->se.dev);
+ return ret;
+ }
+
return 0;
}
@@ -590,8 +591,8 @@ static int geni_i2c_remove(struct platform_device *pdev)
{
struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
- pm_runtime_disable(gi2c->se.dev);
i2c_del_adapter(&gi2c->adap);
+ pm_runtime_disable(gi2c->se.dev);
return 0;
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index bb2cd29e1658..d8f7000a466a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -797,7 +797,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
&entry, sizeof(entry));
- entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
+ entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
+ (BIT_ULL(52)-1)) & ~7ULL;
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
&entry, sizeof(entry));
writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f3ccf025108b..41a4b8808802 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3075,7 +3075,7 @@ static int copy_context_table(struct intel_iommu *iommu,
}
if (old_ce)
- iounmap(old_ce);
+ memunmap(old_ce);
ret = 0;
if (devfn < 0x80)
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index db301efe126d..887150907526 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -595,7 +595,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
pr_err("%s: Page request without PASID: %08llx %08llx\n",
iommu->name, ((unsigned long long *)req)[0],
((unsigned long long *)req)[1]);
- goto bad_req;
+ goto no_pasid;
}
if (!svm || svm->pasid != req->pasid) {
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index b98a03189580..ddf3a492e1d5 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -498,6 +498,9 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
{
+ if (!domain->mmu)
+ return;
+
/*
* Disable the context. Flush the TLB as required when modifying the
* context registers.
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index ce7acd115dd8..1870cf87afe1 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -75,8 +75,6 @@ static void pattern_trig_timer_function(struct timer_list *t)
{
struct pattern_trig_data *data = from_timer(data, t, timer);
- mutex_lock(&data->lock);
-
for (;;) {
if (!data->is_indefinite && !data->repeat)
break;
@@ -87,9 +85,10 @@ static void pattern_trig_timer_function(struct timer_list *t)
data->curr->brightness);
mod_timer(&data->timer,
jiffies + msecs_to_jiffies(data->curr->delta_t));
-
- /* Skip the tuple with zero duration */
- pattern_trig_update_patterns(data);
+ if (!data->next->delta_t) {
+ /* Skip the tuple with zero duration */
+ pattern_trig_update_patterns(data);
+ }
/* Select next tuple */
pattern_trig_update_patterns(data);
} else {
@@ -116,8 +115,6 @@ static void pattern_trig_timer_function(struct timer_list *t)
break;
}
-
- mutex_unlock(&data->lock);
}
static int pattern_trig_start_pattern(struct led_classdev *led_cdev)
@@ -176,14 +173,10 @@ static ssize_t repeat_store(struct device *dev, struct device_attribute *attr,
if (res < -1 || res == 0)
return -EINVAL;
- /*
- * Clear previous patterns' performence firstly, and remove the timer
- * without mutex lock to avoid dead lock.
- */
- del_timer_sync(&data->timer);
-
mutex_lock(&data->lock);
+ del_timer_sync(&data->timer);
+
if (data->is_hw_pattern)
led_cdev->pattern_clear(led_cdev);
@@ -234,14 +227,10 @@ static ssize_t pattern_trig_store_patterns(struct led_classdev *led_cdev,
struct pattern_trig_data *data = led_cdev->trigger_data;
int ccount, cr, offset = 0, err = 0;
- /*
- * Clear previous patterns' performence firstly, and remove the timer
- * without mutex lock to avoid dead lock.
- */
- del_timer_sync(&data->timer);
-
mutex_lock(&data->lock);
+ del_timer_sync(&data->timer);
+
if (data->is_hw_pattern)
led_cdev->pattern_clear(led_cdev);
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 31d1f4ab915e..65a933a21e68 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -807,7 +807,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
}
if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) {
- dprintk(1, "%s: transmit queue full\n", __func__);
+ dprintk(2, "%s: transmit queue full\n", __func__);
return -EBUSY;
}
@@ -1180,6 +1180,8 @@ static int cec_config_log_addr(struct cec_adapter *adap,
{
struct cec_log_addrs *las = &adap->log_addrs;
struct cec_msg msg = { };
+ const unsigned int max_retries = 2;
+ unsigned int i;
int err;
if (cec_has_log_addr(adap, log_addr))
@@ -1188,19 +1190,44 @@ static int cec_config_log_addr(struct cec_adapter *adap,
/* Send poll message */
msg.len = 1;
msg.msg[0] = (log_addr << 4) | log_addr;
- err = cec_transmit_msg_fh(adap, &msg, NULL, true);
- /*
- * While trying to poll the physical address was reset
- * and the adapter was unconfigured, so bail out.
- */
- if (!adap->is_configuring)
- return -EINTR;
+ for (i = 0; i < max_retries; i++) {
+ err = cec_transmit_msg_fh(adap, &msg, NULL, true);
- if (err)
- return err;
+ /*
+ * While trying to poll the physical address was reset
+ * and the adapter was unconfigured, so bail out.
+ */
+ if (!adap->is_configuring)
+ return -EINTR;
+
+ if (err)
+ return err;
- if (msg.tx_status & CEC_TX_STATUS_OK)
+ /*
+ * The message was aborted due to a disconnect or
+ * unconfigure, just bail out.
+ */
+ if (msg.tx_status & CEC_TX_STATUS_ABORTED)
+ return -EINTR;
+ if (msg.tx_status & CEC_TX_STATUS_OK)
+ return 0;
+ if (msg.tx_status & CEC_TX_STATUS_NACK)
+ break;
+ /*
+ * Retry up to max_retries times if the message was neither
+ * OKed or NACKed. This can happen due to e.g. a Lost
+ * Arbitration condition.
+ */
+ }
+
+ /*
+ * If we are unable to get an OK or a NACK after max_retries attempts
+ * (and note that each attempt already consists of four polls), then
+ * then we assume that something is really weird and that it is not a
+ * good idea to try and claim this logical address.
+ */
+ if (i == max_retries)
return 0;
/*
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index f3899cc84e27..4551bca3c127 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -550,7 +550,7 @@ static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_
buffer[3] = 0;
buffer[3] = hdmi_infoframe_checksum(buffer, len + 4);
- if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
return;
}
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 9eb7c70a7712..9f99ef38bcca 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2420,7 +2420,7 @@ static int adv76xx_read_infoframe(struct v4l2_subdev *sd, int index,
buffer[i + 3] = infoframe_read(sd,
adv76xx_cri[index].payload_addr + i);
- if (hdmi_infoframe_unpack(frame, buffer) < 0) {
+ if (hdmi_infoframe_unpack(frame, buffer, sizeof(buffer)) < 0) {
v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__,
adv76xx_cri[index].desc);
return -ENOENT;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 4721d49dcf0f..0e6384f2016a 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2574,7 +2574,7 @@ static void log_infoframe(struct v4l2_subdev *sd, struct adv7842_cfg_read_infofr
for (i = 0; i < len; i++)
buffer[i + 3] = infoframe_read(sd, cri->payload_addr + i);
- if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
return;
}
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index ca5d92942820..22cafc07de25 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -444,7 +444,7 @@ static void print_avi_infoframe(struct v4l2_subdev *sd)
i2c_rd(sd, PK_AVI_0HEAD, buffer, HDMI_INFOFRAME_SIZE(AVI));
- if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
v4l2_err(sd, "%s: unpack of AVI infoframe failed\n", __func__);
return;
}
@@ -1918,7 +1918,6 @@ static int tc358743_probe_of(struct tc358743_state *state)
ret = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(ep), &endpoint);
if (ret) {
dev_err(dev, "failed to parse endpoint\n");
- ret = ret;
goto put_node;
}
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index c4c2a6134e1e..e8613e364403 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -1253,7 +1253,7 @@ tda1997x_parse_infoframe(struct tda1997x_state *state, u16 addr)
/* read data */
len = io_readn(sd, addr, sizeof(buffer), buffer);
- err = hdmi_infoframe_unpack(&frame, buffer);
+ err = hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer));
if (err) {
v4l_err(state->client,
"failed parsing %d byte infoframe: 0x%04x/0x%02x\n",
@@ -1928,7 +1928,7 @@ static int tda1997x_log_infoframe(struct v4l2_subdev *sd, int addr)
/* read data */
len = io_readn(sd, addr, sizeof(buffer), buffer);
v4l2_dbg(1, debug, sd, "infoframe: addr=%d len=%d\n", addr, len);
- err = hdmi_infoframe_unpack(&frame, buffer);
+ err = hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer));
if (err) {
v4l_err(state->client,
"failed parsing %d byte infoframe: 0x%04x/0x%02x\n",
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 452eb9b42140..447baaebca44 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -1844,14 +1844,12 @@ fail_mutex_destroy:
static void cio2_pci_remove(struct pci_dev *pci_dev)
{
struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
- unsigned int i;
+ media_device_unregister(&cio2->media_dev);
cio2_notifier_exit(cio2);
+ cio2_queues_exit(cio2);
cio2_fbpt_exit_dummy(cio2);
- for (i = 0; i < CIO2_QUEUES; i++)
- cio2_queue_exit(cio2, &cio2->queue[i]);
v4l2_device_unregister(&cio2->v4l2_dev);
- media_device_unregister(&cio2->media_dev);
media_device_cleanup(&cio2->media_dev);
mutex_destroy(&cio2->lock);
}
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 77fb7987b42f..13f2828d880d 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1587,6 +1587,8 @@ static void isp_pm_complete(struct device *dev)
static void isp_unregister_entities(struct isp_device *isp)
{
+ media_device_unregister(&isp->media_dev);
+
omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
@@ -1597,7 +1599,6 @@ static void isp_unregister_entities(struct isp_device *isp)
omap3isp_stat_unregister_entities(&isp->isp_hist);
v4l2_device_unregister(&isp->v4l2_dev);
- media_device_unregister(&isp->media_dev);
media_device_cleanup(&isp->media_dev);
}
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index 1eb9132bfc85..b292cff26c86 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(debug, " activates debug info");
#define MAX_WIDTH 4096U
#define MIN_WIDTH 640U
#define MAX_HEIGHT 2160U
-#define MIN_HEIGHT 480U
+#define MIN_HEIGHT 360U
#define dprintk(dev, fmt, arg...) \
v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index af150a0395df..d82db738f174 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -1009,7 +1009,7 @@ static const struct v4l2_m2m_ops m2m_ops = {
static const struct media_device_ops m2m_media_ops = {
.req_validate = vb2_request_validate,
- .req_queue = vb2_m2m_request_queue,
+ .req_queue = v4l2_m2m_request_queue,
};
static int vim2m_probe(struct platform_device *pdev)
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 6e37950292cd..5f2b033a7a42 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1664,6 +1664,11 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME)
return -EINVAL;
+ if (p_mpeg2_slice_params->pad ||
+ p_mpeg2_slice_params->picture.pad ||
+ p_mpeg2_slice_params->sequence.pad)
+ return -EINVAL;
+
return 0;
case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index a3ef1f50a4b3..481e3c65cf97 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -193,6 +193,22 @@ int v4l2_event_pending(struct v4l2_fh *fh)
}
EXPORT_SYMBOL_GPL(v4l2_event_pending);
+static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
+{
+ struct v4l2_fh *fh = sev->fh;
+ unsigned int i;
+
+ lockdep_assert_held(&fh->subscribe_lock);
+ assert_spin_locked(&fh->vdev->fh_lock);
+
+ /* Remove any pending events for this subscription */
+ for (i = 0; i < sev->in_use; i++) {
+ list_del(&sev->events[sev_pos(sev, i)].list);
+ fh->navailable--;
+ }
+ list_del(&sev->list);
+}
+
int v4l2_event_subscribe(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub, unsigned elems,
const struct v4l2_subscribed_event_ops *ops)
@@ -224,27 +240,23 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
+ if (!found_ev)
+ list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
/* Already listening */
kvfree(sev);
- goto out_unlock;
- }
-
- if (sev->ops && sev->ops->add) {
+ } else if (sev->ops && sev->ops->add) {
ret = sev->ops->add(sev, elems);
if (ret) {
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ __v4l2_event_unsubscribe(sev);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
kvfree(sev);
- goto out_unlock;
}
}
- spin_lock_irqsave(&fh->vdev->fh_lock, flags);
- list_add(&sev->list, &fh->subscribed);
- spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
-
-out_unlock:
mutex_unlock(&fh->subscribe_lock);
return ret;
@@ -279,7 +291,6 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
{
struct v4l2_subscribed_event *sev;
unsigned long flags;
- int i;
if (sub->type == V4L2_EVENT_ALL) {
v4l2_event_unsubscribe_all(fh);
@@ -291,14 +302,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
- if (sev != NULL) {
- /* Remove any pending events for this subscription */
- for (i = 0; i < sev->in_use; i++) {
- list_del(&sev->events[sev_pos(sev, i)].list);
- fh->navailable--;
- }
- list_del(&sev->list);
- }
+ if (sev != NULL)
+ __v4l2_event_unsubscribe(sev);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index d7806db222d8..1ed2465972ac 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -953,7 +953,7 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
-void vb2_m2m_request_queue(struct media_request *req)
+void v4l2_m2m_request_queue(struct media_request *req)
{
struct media_request_object *obj, *obj_safe;
struct v4l2_m2m_ctx *m2m_ctx = NULL;
@@ -997,7 +997,7 @@ void vb2_m2m_request_queue(struct media_request *req)
if (m2m_ctx)
v4l2_m2m_try_schedule(m2m_ctx);
}
-EXPORT_SYMBOL_GPL(vb2_m2m_request_queue);
+EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
/* Videobuf2 ioctl helpers */
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index b2a0340f277e..d8e3cc2dc747 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -132,7 +132,7 @@ static const struct of_device_id atmel_ssc_dt_ids[] = {
MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids);
#endif
-static inline const struct atmel_ssc_platform_data * __init
+static inline const struct atmel_ssc_platform_data *
atmel_ssc_get_driver_data(struct platform_device *pdev)
{
if (pdev->dev.of_node) {
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 313da3150262..1540a7785e14 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -27,6 +27,9 @@
#include <linux/delay.h>
#include <linux/bitops.h>
#include <asm/uv/uv_hub.h>
+
+#include <linux/nospec.h>
+
#include "gru.h"
#include "grutables.h"
#include "gruhandles.h"
@@ -196,6 +199,7 @@ int gru_dump_chiplet_request(unsigned long arg)
/* Currently, only dump by gid is implemented */
if (req.gid >= gru_max_gids)
return -EINVAL;
+ req.gid = array_index_nospec(req.gid, gru_max_gids);
gru = GID_TO_GRU(req.gid);
ubuf = req.buf;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 7bfd366d970d..c4115bae5db1 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -12,6 +12,7 @@
* - JMicron (hardware and technical support)
*/
+#include <linux/bitfield.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/highmem.h>
@@ -462,6 +463,9 @@ struct intel_host {
u32 dsm_fns;
int drv_strength;
bool d3_retune;
+ bool rpm_retune_ok;
+ u32 glk_rx_ctrl1;
+ u32 glk_tun_val;
};
static const guid_t intel_dsm_guid =
@@ -791,6 +795,77 @@ cleanup:
return ret;
}
+#ifdef CONFIG_PM
+#define GLK_RX_CTRL1 0x834
+#define GLK_TUN_VAL 0x840
+#define GLK_PATH_PLL GENMASK(13, 8)
+#define GLK_DLY GENMASK(6, 0)
+/* Workaround firmware failing to restore the tuning value */
+static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ struct intel_host *intel_host = sdhci_pci_priv(slot);
+ struct sdhci_host *host = slot->host;
+ u32 glk_rx_ctrl1;
+ u32 glk_tun_val;
+ u32 dly;
+
+ if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
+ return;
+
+ glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
+ glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
+
+ if (susp) {
+ intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
+ intel_host->glk_tun_val = glk_tun_val;
+ return;
+ }
+
+ if (!intel_host->glk_tun_val)
+ return;
+
+ if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
+ intel_host->rpm_retune_ok = true;
+ return;
+ }
+
+ dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
+ (intel_host->glk_tun_val << 1));
+ if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
+ return;
+
+ glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
+ sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
+
+ intel_host->rpm_retune_ok = true;
+ chip->rpm_retune = true;
+ mmc_retune_needed(host->mmc);
+ pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
+}
+
+static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
+{
+ if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
+ !chip->rpm_retune)
+ glk_rpm_retune_wa(chip, susp);
+}
+
+static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
+{
+ glk_rpm_retune_chk(chip, true);
+
+ return sdhci_cqhci_runtime_suspend(chip);
+}
+
+static int glk_runtime_resume(struct sdhci_pci_chip *chip)
+{
+ glk_rpm_retune_chk(chip, false);
+
+ return sdhci_cqhci_runtime_resume(chip);
+}
+#endif
+
#ifdef CONFIG_ACPI
static int ni_set_max_freq(struct sdhci_pci_slot *slot)
{
@@ -879,8 +954,8 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
.resume = sdhci_cqhci_resume,
#endif
#ifdef CONFIG_PM
- .runtime_suspend = sdhci_cqhci_runtime_suspend,
- .runtime_resume = sdhci_cqhci_runtime_resume,
+ .runtime_suspend = glk_runtime_suspend,
+ .runtime_resume = glk_runtime_resume,
#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
@@ -1762,8 +1837,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
device_init_wakeup(&pdev->dev, true);
if (slot->cd_idx >= 0) {
- ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx,
+ ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
slot->cd_override_level, 0, NULL);
+ if (ret && ret != -EPROBE_DEFER)
+ ret = mmc_gpiod_request_cd(host->mmc, NULL,
+ slot->cd_idx,
+ slot->cd_override_level,
+ 0, NULL);
if (ret == -EPROBE_DEFER)
goto remove;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index e514d57a0419..aa983422aa97 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -207,7 +207,7 @@ comment "Disk-On-Chip Device Drivers"
config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
select BCH
- select BCH_CONST_PARAMS
+ select BCH_CONST_PARAMS if !MTD_NAND_BCH
select BITREVERSE
help
This provides an MTD device driver for the M-Systems DiskOnChip
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 784c6e1a0391..fd5fe12d7461 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -221,7 +221,14 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
info->mtd = info->subdev[0].mtd;
ret = 0;
} else if (info->num_subdev > 1) {
- struct mtd_info *cdev[nr];
+ struct mtd_info **cdev;
+
+ cdev = kmalloc_array(nr, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
/*
* We detected multiple devices. Concatenate them together.
*/
@@ -230,6 +237,7 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
info->mtd = mtd_concat_create(cdev, info->num_subdev,
plat->name);
+ kfree(cdev);
if (info->mtd == NULL) {
ret = -ENXIO;
goto err;
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index fb33f6be7c4f..ad720494e8f7 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -2032,8 +2032,7 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
int ret;
nand_np = dev->of_node;
- nfc_np = of_find_compatible_node(dev->of_node, NULL,
- "atmel,sama5d3-nfc");
+ nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
if (!nfc_np) {
dev_err(dev, "Could not find device node for sama5d3-nfc\n");
return -ENODEV;
@@ -2447,15 +2446,19 @@ static int atmel_nand_controller_probe(struct platform_device *pdev)
}
if (caps->legacy_of_bindings) {
+ struct device_node *nfc_node;
u32 ale_offs = 21;
/*
* If we are parsing legacy DT props and the DT contains a
* valid NFC node, forward the request to the sama5 logic.
*/
- if (of_find_compatible_node(pdev->dev.of_node, NULL,
- "atmel,sama5d3-nfc"))
+ nfc_node = of_get_compatible_child(pdev->dev.of_node,
+ "atmel,sama5d3-nfc");
+ if (nfc_node) {
caps = &atmel_sama5_nand_caps;
+ of_node_put(nfc_node);
+ }
/*
* Even if the compatible says we are dealing with an
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 05bd0779fe9b..71050a0b31df 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -590,7 +590,6 @@ retry:
/**
* panic_nand_wait - [GENERIC] wait until the command is done
- * @mtd: MTD device structure
* @chip: NAND chip structure
* @timeo: timeout
*
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index ef75dfa62a4f..699d3cf49c6d 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -150,15 +150,15 @@
#define NAND_VERSION_MINOR_SHIFT 16
/* NAND OP_CMDs */
-#define PAGE_READ 0x2
-#define PAGE_READ_WITH_ECC 0x3
-#define PAGE_READ_WITH_ECC_SPARE 0x4
-#define PROGRAM_PAGE 0x6
-#define PAGE_PROGRAM_WITH_ECC 0x7
-#define PROGRAM_PAGE_SPARE 0x9
-#define BLOCK_ERASE 0xa
-#define FETCH_ID 0xb
-#define RESET_DEVICE 0xd
+#define OP_PAGE_READ 0x2
+#define OP_PAGE_READ_WITH_ECC 0x3
+#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
+#define OP_PROGRAM_PAGE 0x6
+#define OP_PAGE_PROGRAM_WITH_ECC 0x7
+#define OP_PROGRAM_PAGE_SPARE 0x9
+#define OP_BLOCK_ERASE 0xa
+#define OP_FETCH_ID 0xb
+#define OP_RESET_DEVICE 0xd
/* Default Value for NAND_DEV_CMD_VLD */
#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
@@ -692,11 +692,11 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
if (read) {
if (host->use_ecc)
- cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
+ cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
else
- cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
+ cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
} else {
- cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
+ cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
}
if (host->use_ecc) {
@@ -1170,7 +1170,7 @@ static int nandc_param(struct qcom_nand_host *host)
* in use. we configure the controller to perform a raw read of 512
* bytes to read onfi params
*/
- nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
+ nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
nandc_set_reg(nandc, NAND_ADDR0, 0);
nandc_set_reg(nandc, NAND_ADDR1, 0);
nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
@@ -1224,7 +1224,7 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
nandc_set_reg(nandc, NAND_FLASH_CMD,
- BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
+ OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
nandc_set_reg(nandc, NAND_ADDR0, page_addr);
nandc_set_reg(nandc, NAND_ADDR1, 0);
nandc_set_reg(nandc, NAND_DEV0_CFG0,
@@ -1255,7 +1255,7 @@ static int read_id(struct qcom_nand_host *host, int column)
if (column == -1)
return 0;
- nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
+ nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
nandc_set_reg(nandc, NAND_ADDR0, column);
nandc_set_reg(nandc, NAND_ADDR1, 0);
nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
@@ -1276,7 +1276,7 @@ static int reset(struct qcom_nand_host *host)
struct nand_chip *chip = &host->chip;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
+ nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index e24db817154e..04cedd3a2bf6 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -644,9 +644,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
ndelay(cqspi->wr_delay);
while (remaining > 0) {
+ size_t write_words, mod_bytes;
+
write_bytes = remaining > page_size ? page_size : remaining;
- iowrite32_rep(cqspi->ahb_base, txbuf,
- DIV_ROUND_UP(write_bytes, 4));
+ write_words = write_bytes / 4;
+ mod_bytes = write_bytes % 4;
+ /* Write 4 bytes at a time then single bytes. */
+ if (write_words) {
+ iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
+ txbuf += (write_words * 4);
+ }
+ if (mod_bytes) {
+ unsigned int temp = 0xFFFFFFFF;
+
+ memcpy(&temp, txbuf, mod_bytes);
+ iowrite32(temp, cqspi->ahb_base);
+ txbuf += mod_bytes;
+ }
if (!wait_for_completion_timeout(&cqspi->transfer_complete,
msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
@@ -655,7 +669,6 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
goto failwr;
}
- txbuf += write_bytes;
remaining -= write_bytes;
if (remaining > 0)
@@ -996,7 +1009,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
err_unmap:
dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
- return 0;
+ return ret;
}
static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 9407ca5f9443..93c9bc8931fc 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -2156,7 +2156,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
* @nor: pointer to a 'struct spi_nor'
* @addr: offset in the serial flash memory
* @len: number of bytes to read
- * @buf: buffer where the data is copied into
+ * @buf: buffer where the data is copied into (dma-safe memory)
*
* Return: 0 on success, -errno otherwise.
*/
@@ -2522,6 +2522,34 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
}
/**
+ * spi_nor_sort_erase_mask() - sort erase mask
+ * @map: the erase map of the SPI NOR
+ * @erase_mask: the erase type mask to be sorted
+ *
+ * Replicate the sort done for the map's erase types in BFPT: sort the erase
+ * mask in ascending order with the smallest erase type size starting from
+ * BIT(0) in the sorted erase mask.
+ *
+ * Return: sorted erase mask.
+ */
+static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
+{
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ int i;
+ u8 sorted_erase_mask = 0;
+
+ if (!erase_mask)
+ return 0;
+
+ /* Replicate the sort done for the map's erase types. */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
+ sorted_erase_mask |= BIT(i);
+
+ return sorted_erase_mask;
+}
+
+/**
* spi_nor_regions_sort_erase_types() - sort erase types in each region
* @map: the erase map of the SPI NOR
*
@@ -2536,19 +2564,13 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
{
struct spi_nor_erase_region *region = map->regions;
- struct spi_nor_erase_type *erase_type = map->erase_type;
- int i;
u8 region_erase_mask, sorted_erase_mask;
while (region) {
region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
- /* Replicate the sort done for the map's erase types. */
- sorted_erase_mask = 0;
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
- if (erase_type[i].size &&
- region_erase_mask & BIT(erase_type[i].idx))
- sorted_erase_mask |= BIT(i);
+ sorted_erase_mask = spi_nor_sort_erase_mask(map,
+ region_erase_mask);
/* Overwrite erase mask. */
region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
@@ -2855,52 +2877,84 @@ static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
* spi_nor_get_map_in_use() - get the configuration map in use
* @nor: pointer to a 'struct spi_nor'
* @smpt: pointer to the sector map parameter table
+ * @smpt_len: sector map parameter table length
+ *
+ * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
*/
-static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt)
+static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
+ u8 smpt_len)
{
- const u32 *ret = NULL;
- u32 i, addr;
+ const u32 *ret;
+ u8 *buf;
+ u32 addr;
int err;
+ u8 i;
u8 addr_width, read_opcode, read_dummy;
- u8 read_data_mask, data_byte, map_id;
+ u8 read_data_mask, map_id;
+
+ /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
addr_width = nor->addr_width;
read_dummy = nor->read_dummy;
read_opcode = nor->read_opcode;
map_id = 0;
- i = 0;
/* Determine if there are any optional Detection Command Descriptors */
- while (!(smpt[i] & SMPT_DESC_TYPE_MAP)) {
+ for (i = 0; i < smpt_len; i += 2) {
+ if (smpt[i] & SMPT_DESC_TYPE_MAP)
+ break;
+
read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
addr = smpt[i + 1];
- err = spi_nor_read_raw(nor, addr, 1, &data_byte);
- if (err)
+ err = spi_nor_read_raw(nor, addr, 1, buf);
+ if (err) {
+ ret = ERR_PTR(err);
goto out;
+ }
/*
* Build an index value that is used to select the Sector Map
* Configuration that is currently in use.
*/
- map_id = map_id << 1 | !!(data_byte & read_data_mask);
- i = i + 2;
+ map_id = map_id << 1 | !!(*buf & read_data_mask);
}
- /* Find the matching configuration map */
- while (SMPT_MAP_ID(smpt[i]) != map_id) {
+ /*
+ * If command descriptors are provided, they always precede map
+ * descriptors in the table. There is no need to start the iteration
+ * over smpt array all over again.
+ *
+ * Find the matching configuration map.
+ */
+ ret = ERR_PTR(-EINVAL);
+ while (i < smpt_len) {
+ if (SMPT_MAP_ID(smpt[i]) == map_id) {
+ ret = smpt + i;
+ break;
+ }
+
+ /*
+ * If there are no more configuration map descriptors and no
+ * configuration ID matched the configuration identifier, the
+ * sector address map is unknown.
+ */
if (smpt[i] & SMPT_DESC_END)
- goto out;
+ break;
+
/* increment the table index to the next map */
i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
}
- ret = smpt + i;
/* fall through */
out:
+ kfree(buf);
nor->addr_width = addr_width;
nor->read_dummy = read_dummy;
nor->read_opcode = read_opcode;
@@ -2946,7 +3000,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
u64 offset;
u32 region_count;
int i, j;
- u8 erase_type;
+ u8 erase_type, uniform_erase_type;
region_count = SMPT_MAP_REGION_COUNT(*smpt);
/*
@@ -2959,7 +3013,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
return -ENOMEM;
map->regions = region;
- map->uniform_erase_type = 0xff;
+ uniform_erase_type = 0xff;
offset = 0;
/* Populate regions. */
for (i = 0; i < region_count; i++) {
@@ -2974,12 +3028,15 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
* Save the erase types that are supported in all regions and
* can erase the entire flash memory.
*/
- map->uniform_erase_type &= erase_type;
+ uniform_erase_type &= erase_type;
offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
region[i].size;
}
+ map->uniform_erase_type = spi_nor_sort_erase_mask(map,
+ uniform_erase_type);
+
spi_nor_region_mark_end(&region[i - 1]);
return 0;
@@ -3020,9 +3077,9 @@ static int spi_nor_parse_smpt(struct spi_nor *nor,
for (i = 0; i < smpt_header->length; i++)
smpt[i] = le32_to_cpu(smpt[i]);
- sector_map = spi_nor_get_map_in_use(nor, smpt);
- if (!sector_map) {
- ret = -EINVAL;
+ sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
+ if (IS_ERR(sector_map)) {
+ ret = PTR_ERR(sector_map);
goto out;
}
@@ -3125,7 +3182,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
if (err)
goto exit;
- /* Parse other parameter headers. */
+ /* Parse optional parameter tables. */
for (i = 0; i < header.nph; i++) {
param_header = &param_headers[i];
@@ -3138,8 +3195,17 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
break;
}
- if (err)
- goto exit;
+ if (err) {
+ dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
+ SFDP_PARAM_HEADER_ID(param_header));
+ /*
+ * Let's not drop all information we extracted so far
+ * if optional table parsers fail. In case of failing,
+ * each optional parser is responsible to roll back to
+ * the previously known spi_nor data.
+ */
+ err = 0;
+ }
}
exit:
@@ -3250,12 +3316,14 @@ static int spi_nor_init_params(struct spi_nor *nor,
memcpy(&sfdp_params, params, sizeof(sfdp_params));
memcpy(&prev_map, &nor->erase_map, sizeof(prev_map));
- if (spi_nor_parse_sfdp(nor, &sfdp_params))
+ if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+ nor->addr_width = 0;
/* restore previous erase map */
memcpy(&nor->erase_map, &prev_map,
sizeof(nor->erase_map));
- else
+ } else {
memcpy(params, &sfdp_params, sizeof(*params));
+ }
}
return 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ffa37adb7681..333387f1f1fe 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3112,13 +3112,13 @@ static int bond_slave_netdev_event(unsigned long event,
case NETDEV_CHANGE:
/* For 802.3ad mode only:
* Getting invalid Speed/Duplex values here will put slave
- * in weird state. So mark it as link-down for the time
+ * in weird state. So mark it as link-fail for the time
* being and let link-monitoring (miimon) set it right when
* correct speeds/duplex are available.
*/
if (bond_update_speed_duplex(slave) &&
BOND_MODE(bond) == BOND_MODE_8023AD)
- slave->link = BOND_LINK_DOWN;
+ slave->link = BOND_LINK_FAIL;
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_adapter_speed_duplex_changed(slave);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 49163570a63a..3b3f88ffab53 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -477,6 +477,34 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
}
EXPORT_SYMBOL_GPL(can_put_echo_skb);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb = priv->echo_skb[idx];
+ struct canfd_frame *cf;
+
+ if (idx >= priv->echo_skb_max) {
+ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+ __func__, idx, priv->echo_skb_max);
+ return NULL;
+ }
+
+ if (!skb) {
+ netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
+ __func__, idx);
+ return NULL;
+ }
+
+ /* Using "struct canfd_frame::len" for the frame
+ * length is supported on both CAN and CANFD frames.
+ */
+ cf = (struct canfd_frame *)skb->data;
+ *len_ptr = cf->len;
+ priv->echo_skb[idx] = NULL;
+
+ return skb;
+}
+
/*
* Get the skb from the stack and loop it back locally
*
@@ -486,22 +514,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
*/
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
{
- struct can_priv *priv = netdev_priv(dev);
-
- BUG_ON(idx >= priv->echo_skb_max);
-
- if (priv->echo_skb[idx]) {
- struct sk_buff *skb = priv->echo_skb[idx];
- struct can_frame *cf = (struct can_frame *)skb->data;
- u8 dlc = cf->can_dlc;
+ struct sk_buff *skb;
+ u8 len;
- netif_rx(priv->echo_skb[idx]);
- priv->echo_skb[idx] = NULL;
+ skb = __can_get_echo_skb(dev, idx, &len);
+ if (!skb)
+ return 0;
- return dlc;
- }
+ netif_rx(skb);
- return 0;
+ return len;
}
EXPORT_SYMBOL_GPL(can_get_echo_skb);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 8e972ef08637..75ce11395ee8 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -135,13 +135,12 @@
/* FLEXCAN interrupt flag register (IFLAG) bits */
/* Errata ERR005829 step7: Reserve first valid MB */
-#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
-#define FLEXCAN_TX_MB_OFF_FIFO 9
+#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
-#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1
-#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1)
-#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63
-#define FLEXCAN_IFLAG_MB(x) BIT(x)
+#define FLEXCAN_TX_MB 63
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST (FLEXCAN_TX_MB - 1)
+#define FLEXCAN_IFLAG_MB(x) BIT(x & 0x1f)
#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
@@ -259,9 +258,7 @@ struct flexcan_priv {
struct can_rx_offload offload;
struct flexcan_regs __iomem *regs;
- struct flexcan_mb __iomem *tx_mb;
struct flexcan_mb __iomem *tx_mb_reserved;
- u8 tx_mb_idx;
u32 reg_ctrl_default;
u32 reg_imask1_default;
u32 reg_imask2_default;
@@ -515,6 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
const struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->regs;
struct can_frame *cf = (struct can_frame *)skb->data;
u32 can_id;
u32 data;
@@ -537,17 +535,17 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
if (cf->can_dlc > 0) {
data = be32_to_cpup((__be32 *)&cf->data[0]);
- priv->write(data, &priv->tx_mb->data[0]);
+ priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[0]);
}
if (cf->can_dlc > 4) {
data = be32_to_cpup((__be32 *)&cf->data[4]);
- priv->write(data, &priv->tx_mb->data[1]);
+ priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[1]);
}
can_put_echo_skb(skb, dev, 0);
- priv->write(can_id, &priv->tx_mb->can_id);
- priv->write(ctrl, &priv->tx_mb->can_ctrl);
+ priv->write(can_id, &regs->mb[FLEXCAN_TX_MB].can_id);
+ priv->write(ctrl, &regs->mb[FLEXCAN_TX_MB].can_ctrl);
/* Errata ERR005829 step8:
* Write twice INACTIVE(0x8) code to first MB.
@@ -563,9 +561,13 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
{
struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->regs;
struct sk_buff *skb;
struct can_frame *cf;
bool rx_errors = false, tx_errors = false;
+ u32 timestamp;
+
+ timestamp = priv->read(&regs->timer) << 16;
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
@@ -612,17 +614,21 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
if (tx_errors)
dev->stats.tx_errors++;
- can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
+ can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
}
static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
{
struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->regs;
struct sk_buff *skb;
struct can_frame *cf;
enum can_state new_state, rx_state, tx_state;
int flt;
struct can_berr_counter bec;
+ u32 timestamp;
+
+ timestamp = priv->read(&regs->timer) << 16;
flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
@@ -652,7 +658,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev);
- can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
+ can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
}
static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
@@ -720,9 +726,14 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
priv->write(BIT(n - 32), &regs->iflag2);
} else {
priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
- priv->read(&regs->timer);
}
+ /* Read the Free Running Timer. It is optional but recommended
+ * to unlock Mailbox as soon as possible and make it available
+ * for reception.
+ */
+ priv->read(&regs->timer);
+
return 1;
}
@@ -732,9 +743,9 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
struct flexcan_regs __iomem *regs = priv->regs;
u32 iflag1, iflag2;
- iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default;
- iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default &
- ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+ iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default &
+ ~FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
+ iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default;
return (u64)iflag2 << 32 | iflag1;
}
@@ -746,11 +757,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
irqreturn_t handled = IRQ_NONE;
- u32 reg_iflag1, reg_esr;
+ u32 reg_iflag2, reg_esr;
enum can_state last_state = priv->can.state;
- reg_iflag1 = priv->read(&regs->iflag1);
-
/* reception interrupt */
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
u64 reg_iflag;
@@ -764,6 +773,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
break;
}
} else {
+ u32 reg_iflag1;
+
+ reg_iflag1 = priv->read(&regs->iflag1);
if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) {
handled = IRQ_HANDLED;
can_rx_offload_irq_offload_fifo(&priv->offload);
@@ -779,17 +791,22 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
}
}
+ reg_iflag2 = priv->read(&regs->iflag2);
+
/* transmission complete interrupt */
- if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) {
+ if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) {
+ u32 reg_ctrl = priv->read(&regs->mb[FLEXCAN_TX_MB].can_ctrl);
+
handled = IRQ_HANDLED;
- stats->tx_bytes += can_get_echo_skb(dev, 0);
+ stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload,
+ 0, reg_ctrl << 16);
stats->tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
/* after sending a RTR frame MB is in RX mode */
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &priv->tx_mb->can_ctrl);
- priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
+ &regs->mb[FLEXCAN_TX_MB].can_ctrl);
+ priv->write(FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB), &regs->iflag2);
netif_wake_queue(dev);
}
@@ -931,15 +948,13 @@ static int flexcan_chip_start(struct net_device *dev)
reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
- FLEXCAN_MCR_IDAM_C;
+ FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_MB);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
reg_mcr &= ~FLEXCAN_MCR_FEN;
- reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last);
- } else {
- reg_mcr |= FLEXCAN_MCR_FEN |
- FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
- }
+ else
+ reg_mcr |= FLEXCAN_MCR_FEN;
+
netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
priv->write(reg_mcr, &regs->mcr);
@@ -982,16 +997,17 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(reg_ctrl2, &regs->ctrl2);
}
- /* clear and invalidate all mailboxes first */
- for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
- priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
- &regs->mb[i].can_ctrl);
- }
-
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++)
+ for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
&regs->mb[i].can_ctrl);
+ }
+ } else {
+ /* clear and invalidate unused mailboxes first */
+ for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
+ priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
+ &regs->mb[i].can_ctrl);
+ }
}
/* Errata ERR005829: mark first TX mailbox as INACTIVE */
@@ -1000,7 +1016,7 @@ static int flexcan_chip_start(struct net_device *dev)
/* mark TX mailbox as INACTIVE */
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &priv->tx_mb->can_ctrl);
+ &regs->mb[FLEXCAN_TX_MB].can_ctrl);
/* acceptance mask/acceptance code (accept everything) */
priv->write(0x0, &regs->rxgmask);
@@ -1355,17 +1371,13 @@ static int flexcan_probe(struct platform_device *pdev)
priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP;
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP];
- } else {
- priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO;
+ else
priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO];
- }
- priv->tx_mb = &regs->mb[priv->tx_mb_idx];
- priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
- priv->reg_imask2_default = 0;
+ priv->reg_imask1_default = 0;
+ priv->reg_imask2_default = FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
priv->offload.mailbox_read = flexcan_mailbox_read;
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 11662f479e76..771a46083739 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -24,6 +24,9 @@
#define RCAR_CAN_DRV_NAME "rcar_can"
+#define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \
+ BIT(CLKR_CLKEXT))
+
/* Mailbox configuration:
* mailbox 60 - 63 - Rx FIFO mailboxes
* mailbox 56 - 59 - Tx FIFO mailboxes
@@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev)
goto fail_clk;
}
- if (clock_select >= ARRAY_SIZE(clock_names)) {
+ if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
err = -EINVAL;
dev_err(&pdev->dev, "invalid CAN clock selected\n");
goto fail_clk;
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index c7d05027a7a0..2ce4fa8698c7 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -211,7 +211,54 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb)
+int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp)
+{
+ struct can_rx_offload_cb *cb;
+ unsigned long flags;
+
+ if (skb_queue_len(&offload->skb_queue) >
+ offload->skb_queue_len_max)
+ return -ENOMEM;
+
+ cb = can_rx_offload_get_cb(skb);
+ cb->timestamp = timestamp;
+
+ spin_lock_irqsave(&offload->skb_queue.lock, flags);
+ __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
+ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+ can_rx_offload_schedule(offload);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
+
+unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp)
+{
+ struct net_device *dev = offload->dev;
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ u8 len;
+ int err;
+
+ skb = __can_get_echo_skb(dev, idx, &len);
+ if (!skb)
+ return 0;
+
+ err = can_rx_offload_queue_sorted(offload, skb, timestamp);
+ if (err) {
+ stats->rx_errors++;
+ stats->tx_fifo_errors++;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
+
+int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+ struct sk_buff *skb)
{
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max)
@@ -222,7 +269,7 @@ int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_b
return 0;
}
-EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb);
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
{
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 53e320c92a8b..ddaf46239e39 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -760,7 +760,7 @@ static int hi3110_open(struct net_device *net)
{
struct hi3110_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING;
+ unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_HIGH;
int ret;
ret = open_candev(net);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index b939a4c10b84..c89c7d4900d7 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -528,7 +528,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
context = &priv->tx_contexts[i];
context->echo_index = i;
- can_put_echo_skb(skb, netdev, context->echo_index);
++priv->active_tx_contexts;
if (priv->active_tx_contexts >= (int)dev->max_tx_urbs)
netif_stop_queue(netdev);
@@ -553,7 +552,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
dev_kfree_skb(skb);
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
- can_free_echo_skb(netdev, context->echo_index);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(netdev);
@@ -564,6 +562,8 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
context->priv = priv;
+ can_put_echo_skb(skb, netdev, context->echo_index);
+
usb_fill_bulk_urb(urb, dev->udev,
usb_sndbulkpipe(dev->udev,
dev->bulk_out->bEndpointAddress),
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index c084bae5ec0a..5fc0be564274 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -1019,6 +1019,11 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
new_state : CAN_STATE_ERROR_ACTIVE;
can_change_state(netdev, cf, tx_state, rx_state);
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF)
+ cf->can_id |= CAN_ERR_RESTARTED;
}
if (new_state == CAN_STATE_BUS_OFF) {
@@ -1028,11 +1033,6 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
can_bus_off(netdev);
}
-
- if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
- new_state < CAN_STATE_BUS_OFF)
- cf->can_id |= CAN_ERR_RESTARTED;
}
if (!skb) {
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 0678a38b1af4..f3d5bda012a1 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -35,10 +35,6 @@
#include <linux/slab.h>
#include <linux/usb.h>
-#include <linux/can.h>
-#include <linux/can/dev.h>
-#include <linux/can/error.h>
-
#define UCAN_DRIVER_NAME "ucan"
#define UCAN_MAX_RX_URBS 8
/* the CAN controller needs a while to enable/disable the bus */
@@ -1575,11 +1571,8 @@ err_firmware_needs_update:
/* disconnect the device */
static void ucan_disconnect(struct usb_interface *intf)
{
- struct usb_device *udev;
struct ucan_priv *up = usb_get_intfdata(intf);
- udev = interface_to_usbdev(intf);
-
usb_set_intfdata(intf, NULL);
if (up) {
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 54e0ca6ed730..86b6464b4525 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1117,11 +1117,6 @@ static int ksz_switch_init(struct ksz_device *dev)
{
int i;
- mutex_init(&dev->reg_mutex);
- mutex_init(&dev->stats_mutex);
- mutex_init(&dev->alu_mutex);
- mutex_init(&dev->vlan_mutex);
-
dev->ds->ops = &ksz_switch_ops;
for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
@@ -1206,6 +1201,11 @@ int ksz_switch_register(struct ksz_device *dev)
if (dev->pdata)
dev->chip_id = dev->pdata->chip_id;
+ mutex_init(&dev->reg_mutex);
+ mutex_init(&dev->stats_mutex);
+ mutex_init(&dev->alu_mutex);
+ mutex_init(&dev->vlan_mutex);
+
if (ksz_switch_detect(dev))
return -EINVAL;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index d721ccf7d8be..38e399e0f30e 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
if (err)
return err;
+ /* Keep the histogram mode bits */
+ val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 18956e7604a3..a70bb1bb90e7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1848,6 +1848,8 @@ static void ena_down(struct ena_adapter *adapter)
rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
if (rc)
dev_err(&adapter->pdev->dev, "Device reset failed\n");
+ /* stop submitting admin commands on a device that was reset */
+ ena_com_set_admin_running_state(adapter->ena_dev, false);
}
ena_destroy_all_io_queues(adapter);
@@ -1914,6 +1916,9 @@ static int ena_close(struct net_device *netdev)
netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
+ if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+ return 0;
+
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
@@ -2613,9 +2618,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
ena_down(adapter);
/* Stop the device from sending AENQ events (in case reset flag is set
- * and device is up, ena_close already reset the device
- * In case the reset flag is set and the device is up, ena_down()
- * already perform the reset, so it can be skipped.
+ * and device is up, ena_down() already reset the device.
*/
if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
@@ -2694,8 +2697,8 @@ err_device_destroy:
ena_com_abort_admin_commands(ena_dev);
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
- ena_com_mmio_reg_read_request_destroy(ena_dev);
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@@ -3452,6 +3455,8 @@ err_rss:
ena_com_rss_destroy(ena_dev);
err_free_msix:
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
+ /* stop submitting admin commands on a device that was reset */
+ ena_com_set_admin_running_state(ena_dev, false);
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_worker_destroy:
@@ -3498,18 +3503,12 @@ static void ena_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
- unregister_netdev(netdev);
-
- /* If the device is running then we want to make sure the device will be
- * reset to make sure no more events will be issued by the device.
- */
- if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
-
rtnl_lock();
ena_destroy_device(adapter, true);
rtnl_unlock();
+ unregister_netdev(netdev);
+
free_netdev(netdev);
ena_com_rss_destroy(ena_dev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 521873642339..dc8b6173d8d8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -45,7 +45,7 @@
#define DRV_MODULE_VER_MAJOR 2
#define DRV_MODULE_VER_MINOR 0
-#define DRV_MODULE_VER_SUBMINOR 1
+#define DRV_MODULE_VER_SUBMINOR 2
#define DRV_MODULE_NAME "ena"
#ifndef DRV_MODULE_VERSION
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index b4fc0ed5bce8..9d4899826823 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
prop = of_get_property(nd, "tpe-link-test?", NULL);
if (!prop)
- goto no_link_test;
+ goto node_put;
if (strcmp(prop, "true")) {
printk(KERN_NOTICE "SunLance: warning: overriding option "
@@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op,
"to ecd@skynet.be\n");
auxio_set_lte(AUXIO_LTE_ON);
}
+node_put:
+ of_node_put(nd);
no_link_test:
lp->auto_select = 1;
lp->tpe = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 6a633c70f603..99ef1daaa4d8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -407,13 +407,13 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 fc = aq_nic->aq_nic_cfg.flow_control;
pause->autoneg = 0;
- if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
- pause->rx_pause = 1;
- if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
- pause->tx_pause = 1;
+ pause->rx_pause = !!(fc & AQ_NIC_FC_RX);
+ pause->tx_pause = !!(fc & AQ_NIC_FC_TX);
+
}
static int aq_ethtool_set_pauseparam(struct net_device *ndev,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index e8689241204e..a1e70da358ca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -204,6 +204,10 @@ struct aq_hw_ops {
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
+ int (*hw_set_offload)(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+
+ int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
};
struct aq_fw_ops {
@@ -226,6 +230,8 @@ struct aq_fw_ops {
int (*update_stats)(struct aq_hw_s *self);
+ u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
+
int (*set_flow_control)(struct aq_hw_s *self);
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e3ae29e523f0..7c07eef275eb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -99,8 +99,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
bool is_lro = false;
+ int err = 0;
+
+ aq_cfg->features = features;
- if (aq_cfg->hw_features & NETIF_F_LRO) {
+ if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
is_lro = features & NETIF_F_LRO;
if (aq_cfg->is_lro != is_lro) {
@@ -112,8 +115,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
}
}
}
+ if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM)
+ err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
+ aq_cfg);
- return 0;
+ return err;
}
static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 5fed24446687..7abdc0952425 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -118,12 +118,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
}
cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
- cfg->hw_features = cfg->aq_hw_caps->hw_features;
+ cfg->features = cfg->aq_hw_caps->hw_features;
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
{
int err = self->aq_fw_ops->update_link_status(self->aq_hw);
+ u32 fc = 0;
if (err)
return err;
@@ -133,6 +134,15 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
AQ_CFG_DRV_NAME, self->link_status.mbps,
self->aq_hw->aq_link_status.mbps);
aq_nic_update_interrupt_moderation_settings(self);
+
+ /* Driver has to update flow control settings on RX block
+ * on any link event.
+ * We should query FW whether it negotiated FC.
+ */
+ if (self->aq_fw_ops->get_flow_control)
+ self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
+ if (self->aq_hw_ops->hw_set_fc)
+ self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
}
self->link_status = self->aq_hw->aq_link_status;
@@ -590,7 +600,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
}
}
- if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
+ if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
packet_filter |= IFF_MULTICAST;
self->mc_list.count = i;
self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
@@ -772,7 +782,9 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
- if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX)
+ /* Asym is when either RX or TX, but not both */
+ if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
+ !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Asym_Pause);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index c1582f4e8e1b..44ec47a3d60a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -23,7 +23,7 @@ struct aq_vec_s;
struct aq_nic_cfg_s {
const struct aq_hw_caps_s *aq_hw_caps;
- u64 hw_features;
+ u64 features;
u32 rxds; /* rx ring size, descriptors # */
u32 txds; /* tx ring size, descriptors # */
u32 vecs; /* vecs==allocated irqs */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 3db91446cc67..74550ccc7a20 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
return !!budget;
}
+static void aq_rx_checksum(struct aq_ring_s *self,
+ struct aq_ring_buff_s *buff,
+ struct sk_buff *skb)
+{
+ if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (unlikely(buff->is_cso_err)) {
+ ++self->stats.rx.errors;
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+ if (buff->is_ip_cso) {
+ __skb_incr_checksum_unnecessary(skb);
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ __skb_incr_checksum_unnecessary(skb);
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+}
+
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
@@ -267,18 +288,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
}
skb->protocol = eth_type_trans(skb, ndev);
- if (unlikely(buff->is_cso_err)) {
- ++self->stats.rx.errors;
- skb->ip_summed = CHECKSUM_NONE;
- } else {
- if (buff->is_ip_cso) {
- __skb_incr_checksum_unnecessary(skb);
- if (buff->is_udp_cso || buff->is_tcp_cso)
- __skb_incr_checksum_unnecessary(skb);
- } else {
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
+
+ aq_rx_checksum(self, buff, skb);
skb_set_hash(skb, buff->rss_hash,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 76d25d594a0f..f02592f43fe3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -100,12 +100,17 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
return err;
}
+static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
+{
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
+ return 0;
+}
+
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
u32 tc = 0U;
u32 buff_size = 0U;
unsigned int i_priority = 0U;
- bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -138,7 +143,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
/* QoS Rx buf size per TC */
tc = 0;
- is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_B0_RXBUF_MAX;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@@ -150,7 +154,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(buff_size *
(1024U / 32U) * 50U) /
100U, tc);
- hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+
+ hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
@@ -229,8 +234,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
- hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
+ NETIF_F_RXCSUM));
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
+ NETIF_F_RXCSUM));
/* LSO offloads*/
hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
@@ -655,9 +662,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
&ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
- unsigned int is_err = 1U;
unsigned int is_rx_check_sum_enabled = 0U;
unsigned int pkt_type = 0U;
+ u8 rx_stat = 0U;
if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
break;
@@ -665,35 +672,35 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->hw_head];
- is_err = (0x0000003CU & rxd_wb->status);
+ rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
- is_err &= ~0x20U; /* exclude validity bit */
pkt_type = 0xFFU & (rxd_wb->type >> 4);
- if (is_rx_check_sum_enabled) {
- if (0x0U == (pkt_type & 0x3U))
- buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
+ if (is_rx_check_sum_enabled & BIT(0) &&
+ (0x0U == (pkt_type & 0x3U)))
+ buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
+ if (is_rx_check_sum_enabled & BIT(1)) {
if (0x4U == (pkt_type & 0x1CU))
- buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
+ buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
+ !!(rx_stat & BIT(3));
else if (0x0U == (pkt_type & 0x1CU))
- buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
-
- /* Checksum offload workaround for small packets */
- if (rxd_wb->pkt_len <= 60) {
- buff->is_ip_cso = 0U;
- buff->is_cso_err = 0U;
- }
+ buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
+ !!(rx_stat & BIT(3));
+ }
+ buff->is_cso_err = !!(rx_stat & 0x6);
+ /* Checksum offload workaround for small packets */
+ if (unlikely(rxd_wb->pkt_len <= 60)) {
+ buff->is_ip_cso = 0U;
+ buff->is_cso_err = 0U;
}
-
- is_err &= ~0x18U;
dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
- if (is_err || rxd_wb->type & 0x1000U) {
- /* status error or DMA error */
+ if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
+ /* MAC error or DMA error */
buff->is_error = 1U;
} else {
if (self->aq_nic_cfg->is_rss) {
@@ -915,6 +922,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
+
+ /* Invalidate Descriptor Cache to prevent writing to the cached
+ * descriptors and to the data pointer of those descriptors
+ */
+ hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
+
return aq_hw_err_from_flags(self);
}
@@ -963,4 +976,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
+ .hw_set_fc = hw_atl_b0_set_fc,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index be0a3a90dfad..5502ec5f0f69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
+void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
+ init);
+}
+
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc, u32 buffer)
{
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 7056c7342afc..41f239928c15 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -325,6 +325,9 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc,
u32 buffer);
+/* set rdm rx dma descriptor cache init */
+void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
+
/* set rx xoff enable (per tc) */
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
u32 buffer);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 716674a9b729..a715fa317b1c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -293,6 +293,24 @@
/* default value of bitfield desc{d}_reset */
#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
+/* rdm_desc_init_i bitfield definitions
+ * preprocessor definitions for the bitfield rdm_desc_init_i.
+ * port="pif_rdm_desc_init_i"
+ */
+
+/* register address for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00
+/* bitmask for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff
+/* inverted bitmask for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000
+/* lower bit position of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0
+/* width of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32
+/* default value of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
+
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
* port="pif_rdm_int_desc_wrb_en_i"
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 096ca5730887..7de3220d9cab 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -30,6 +30,8 @@
#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
+#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL)
@@ -451,6 +453,24 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
return 0;
}
+static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
+
+ if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
+ if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+ *fcmode = AQ_NIC_FC_RX;
+ else
+ *fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX;
+ else
+ if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+ *fcmode = AQ_NIC_FC_TX;
+ else
+ *fcmode = 0;
+
+ return 0;
+}
+
const struct aq_fw_ops aq_fw_2x_ops = {
.init = aq_fw2x_init,
.deinit = aq_fw2x_deinit,
@@ -465,4 +485,5 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.set_eee_rate = aq_fw2x_set_eee_rate,
.get_eee_rate = aq_fw2x_get_eee_rate,
.set_flow_control = aq_fw2x_set_flow_control,
+ .get_flow_control = aq_fw2x_get_flow_control
};
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 78c5de467426..9d0e74f6b089 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -140,6 +140,5 @@ struct alx_priv {
};
extern const struct ethtool_ops alx_ethtool_ops;
-extern const char alx_drv_name[];
#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 7968c644ad86..c131cfc1b79d 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -49,7 +49,7 @@
#include "hw.h"
#include "reg.h"
-const char alx_drv_name[] = "alx";
+static const char alx_drv_name[] = "alx";
static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
{
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 4122553e224b..0e2d99c737e3 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1902,9 +1902,6 @@ static void bcm_sysport_netif_start(struct net_device *dev)
intrl2_1_mask_clear(priv, 0xffffffff);
else
intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
-
- /* Last call before we start the real business */
- netif_tx_start_all_queues(dev);
}
static void rbuf_init(struct bcm_sysport_priv *priv)
@@ -2048,6 +2045,8 @@ static int bcm_sysport_open(struct net_device *dev)
bcm_sysport_netif_start(dev);
+ netif_tx_start_all_queues(dev);
+
return 0;
out_clear_rx_int:
@@ -2071,7 +2070,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
struct bcm_sysport_priv *priv = netdev_priv(dev);
/* stop all software from updating hardware */
- netif_tx_stop_all_queues(dev);
+ netif_tx_disable(dev);
napi_disable(&priv->napi);
cancel_work_sync(&priv->dim.dim.work);
phy_stop(dev->phydev);
@@ -2658,12 +2657,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d)
if (!netif_running(dev))
return 0;
+ netif_device_detach(dev);
+
bcm_sysport_netif_stop(dev);
phy_suspend(dev->phydev);
- netif_device_detach(dev);
-
/* Disable UniMAC RX */
umac_enable_set(priv, CMD_RX_EN, 0);
@@ -2746,8 +2745,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
goto out_free_rx_ring;
}
- netif_device_attach(dev);
-
/* RX pipe enable */
topctrl_writel(priv, 0, RX_FLUSH_CNTL);
@@ -2788,6 +2785,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
bcm_sysport_netif_start(dev);
+ netif_device_attach(dev);
+
return 0;
out_free_rx_ring:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index be1506169076..0de487a8f0eb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2191,6 +2191,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
E1HVN_MAX)
+/* Following is the DMAE channel number allocation for the clients.
+ * MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively.
+ * Driver: 0-3 and 8-11 (for PF dmae operations)
+ * 4 and 12 (for stats requests)
+ */
+#define BNX2X_FW_DMAE_C 13 /* Channel for FW DMAE operations */
+
/* PCIE link and speed */
#define PCICFG_LINK_WIDTH 0x1f00000
#define PCICFG_LINK_WIDTH_SHIFT 20
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 3f4d2c8da21a..a9eaaf3e73a4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -6149,6 +6149,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode;
+ rdata->dmae_cmd_id = BNX2X_FW_DMAE_C;
rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port);
rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index dd85d790f638..d4c300117529 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1675,7 +1675,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else {
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
if (dev->features & NETIF_F_RXCSUM)
- cpr->rx_l4_csum_errors++;
+ bnapi->cp_ring.rx_l4_csum_errors++;
}
}
@@ -8714,6 +8714,26 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
+static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
+ u32 ring_id, u32 *prod, u32 *cons)
+{
+ struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_dbg_ring_info_get_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
+ req.ring_type = ring_type;
+ req.fw_ring_id = cpu_to_le32(ring_id);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ *prod = le32_to_cpu(resp->producer_index);
+ *cons = le32_to_cpu(resp->consumer_index);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
{
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
@@ -8821,6 +8841,11 @@ static void bnxt_timer(struct timer_list *t)
bnxt_queue_sp_work(bp);
}
}
+
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
+ set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+ }
bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -8851,6 +8876,44 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
bnxt_rtnl_unlock_sp(bp);
}
+static void bnxt_chk_missed_irq(struct bnxt *bp)
+{
+ int i;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ u32 fw_ring_id;
+ int j;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ for (j = 0; j < 2; j++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ u32 val[2];
+
+ if (!cpr2 || cpr2->has_more_work ||
+ !bnxt_has_work(bp, cpr2))
+ continue;
+
+ if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
+ cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
+ continue;
+ }
+ fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
+ bnxt_dbg_hwrm_ring_info_get(bp,
+ DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
+ fw_ring_id, &val[0], &val[1]);
+ cpr->missed_irqs++;
+ }
+ }
+}
+
static void bnxt_cfg_ntp_filters(struct bnxt *);
static void bnxt_sp_task(struct work_struct *work)
@@ -8930,6 +8993,9 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
bnxt_tc_flow_stats_work(bp);
+ if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
+ bnxt_chk_missed_irq(bp);
+
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting.
*/
@@ -10087,6 +10153,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
bnxt_hwrm_func_qcfg(bp);
+ bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
bnxt_ethtool_init(bp);
bnxt_dcb_init(bp);
@@ -10120,7 +10187,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
}
- bnxt_hwrm_vnic_qcaps(bp);
if (bnxt_rfs_supported(bp)) {
dev->hw_features |= NETIF_F_NTUPLE;
if (bnxt_rfs_capable(bp)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 498b373c992d..9e99d4ab3e06 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -798,6 +798,8 @@ struct bnxt_cp_ring_info {
u8 had_work_done:1;
u8 has_more_work:1;
+ u32 last_cp_raw_cons;
+
struct bnxt_coal rx_ring_coal;
u64 rx_packets;
u64 rx_bytes;
@@ -816,6 +818,7 @@ struct bnxt_cp_ring_info {
dma_addr_t hw_stats_map;
u32 hw_stats_ctx_id;
u64 rx_l4_csum_errors;
+ u64 missed_irqs;
struct bnxt_ring_struct cp_ring_struct;
@@ -1527,6 +1530,7 @@ struct bnxt {
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
#define BNXT_FLOW_STATS_SP_EVENT 15
#define BNXT_UPDATE_PHY_SP_EVENT 16
+#define BNXT_RING_COAL_NOW_SP_EVENT 17
struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 48078564f025..6cc69a58478a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -137,7 +137,7 @@ reset_coalesce:
return rc;
}
-#define BNXT_NUM_STATS 21
+#define BNXT_NUM_STATS 22
#define BNXT_RX_STATS_ENTRY(counter) \
{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
@@ -384,6 +384,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (k = 0; k < stat_fields; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]);
buf[j++] = cpr->rx_l4_csum_errors;
+ buf[j++] = cpr->missed_irqs;
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
@@ -468,6 +469,8 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: rx_l4_csum_errors", i);
buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: missed_irqs", i);
+ buf += ETH_GSTRING_LEN;
}
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
strcpy(buf, bnxt_sw_func_stats[i].string);
@@ -2942,8 +2945,8 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
record->asic_state = 0;
strlcpy(record->system_name, utsname()->nodename,
sizeof(record->system_name));
- record->year = cpu_to_le16(tm.tm_year);
- record->month = cpu_to_le16(tm.tm_mon);
+ record->year = cpu_to_le16(tm.tm_year + 1900);
+ record->month = cpu_to_le16(tm.tm_mon + 1);
record->day = cpu_to_le16(tm.tm_mday);
record->hour = cpu_to_le16(tm.tm_hour);
record->minute = cpu_to_le16(tm.tm_min);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index beee61292d5e..b59b382d34f9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -43,6 +43,9 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
if (ulp_id == BNXT_ROCE_ULP) {
unsigned int max_stat_ctxs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return -EOPNOTSUPP;
+
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
bp->num_stat_ctxs == max_stat_ctxs)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 20c1681bb1af..2d6f090bf644 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2855,7 +2855,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
- netif_tx_start_all_queues(dev);
bcmgenet_enable_tx_napi(priv);
/* Monitor link interrupts now */
@@ -2937,6 +2936,8 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_netif_start(dev);
+ netif_tx_start_all_queues(dev);
+
return 0;
err_irq1:
@@ -2958,7 +2959,7 @@ static void bcmgenet_netif_stop(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
bcmgenet_disable_tx_napi(priv);
- netif_tx_stop_all_queues(dev);
+ netif_tx_disable(dev);
/* Disable MAC receive */
umac_enable_set(priv, CMD_RX_EN, false);
@@ -3620,13 +3621,13 @@ static int bcmgenet_suspend(struct device *d)
if (!netif_running(dev))
return 0;
+ netif_device_detach(dev);
+
bcmgenet_netif_stop(dev);
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);
- netif_device_detach(dev);
-
/* Prepare the device for Wake-on-LAN and switch to the slow clock */
if (device_may_wakeup(d) && priv->wolopts) {
ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
@@ -3700,8 +3701,6 @@ static int bcmgenet_resume(struct device *d)
/* Always enable ring 16 - descriptor ring */
bcmgenet_enable_dma(priv, dma_ctrl);
- netif_device_attach(dev);
-
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
@@ -3710,6 +3709,8 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_netif_start(dev);
+ netif_device_attach(dev);
+
return 0;
out_clk_disable:
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 89295306f161..432c3b867084 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12422,6 +12422,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
{
struct tg3 *tp = netdev_priv(dev);
int i, irq_sync = 0, err = 0;
+ bool reset_phy = false;
if ((ering->rx_pending > tp->rx_std_ring_mask) ||
(ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
@@ -12453,7 +12454,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_restart_hw(tp, false);
+ /* Reset PHY to avoid PHY lock up */
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
+ reset_phy = true;
+
+ err = tg3_restart_hw(tp, reset_phy);
if (!err)
tg3_netif_start(tp);
}
@@ -12487,6 +12494,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
{
struct tg3 *tp = netdev_priv(dev);
int err = 0;
+ bool reset_phy = false;
if (tp->link_config.autoneg == AUTONEG_ENABLE)
tg3_warn_mgmt_link_flap(tp);
@@ -12556,7 +12564,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_restart_hw(tp, false);
+ /* Reset PHY to avoid PHY lock up */
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
+ reset_phy = true;
+
+ err = tg3_restart_hw(tp, reset_phy);
if (!err)
tg3_netif_start(tp);
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 768f584f8392..88f8a8fa93cd 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1784,6 +1784,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
bool if_up = netif_running(nic->netdev);
struct bpf_prog *old_prog;
bool bpf_attached = false;
+ int ret = 0;
/* For now just support only the usual MTU sized frames */
if (prog && (dev->mtu > 1500)) {
@@ -1817,8 +1818,12 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
if (nic->xdp_prog) {
/* Attach BPF program */
nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
- if (!IS_ERR(nic->xdp_prog))
+ if (!IS_ERR(nic->xdp_prog)) {
bpf_attached = true;
+ } else {
+ ret = PTR_ERR(nic->xdp_prog);
+ nic->xdp_prog = NULL;
+ }
}
/* Calculate Tx queues needed for XDP and network stack */
@@ -1830,7 +1835,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
netif_trans_update(nic->netdev);
}
- return 0;
+ return ret;
}
static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 187a249ff2d1..fcaf18fa3904 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -585,10 +585,12 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
if (!sq->dmem.base)
return;
- if (sq->tso_hdrs)
+ if (sq->tso_hdrs) {
dma_free_coherent(&nic->pdev->dev,
sq->dmem.q_len * TSO_HEADER_SIZE,
sq->tso_hdrs, sq->tso_hdrs_phys);
+ sq->tso_hdrs = NULL;
+ }
/* Free pending skbs in the queue */
smp_rmb();
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 75c1c5ed2387..e2cdfa75673f 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -67,7 +67,6 @@ config CHELSIO_T3
config CHELSIO_T4
tristate "Chelsio Communications T4/T5/T6 Ethernet support"
depends on PCI && (IPV6 || IPV6=n)
- depends on THERMAL || !THERMAL
select FW_LOADER
select MDIO
select ZLIB_DEFLATE
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 78e5d17a1d5f..91d8a885deba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -12,6 +12,4 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
-ifdef CONFIG_THERMAL
-cxgb4-objs += cxgb4_thermal.o
-endif
+cxgb4-$(CONFIG_THERMAL) += cxgb4_thermal.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 05a46926016a..d49db46254cd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5863,7 +5863,7 @@ fw_attach_fail:
if (!is_t4(adapter->params.chip))
cxgb4_ptp_init(adapter);
- if (IS_ENABLED(CONFIG_THERMAL) &&
+ if (IS_REACHABLE(CONFIG_THERMAL) &&
!is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
cxgb4_thermal_init(adapter);
@@ -5932,7 +5932,7 @@ static void remove_one(struct pci_dev *pdev)
if (!is_t4(adapter->params.chip))
cxgb4_ptp_stop(adapter);
- if (IS_ENABLED(CONFIG_THERMAL))
+ if (IS_REACHABLE(CONFIG_THERMAL))
cxgb4_thermal_remove(adapter);
/* If we allocated filters, free up state associated with any
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index ceec467f590d..949103db8a8a 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -660,7 +660,7 @@ static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq,
u64_stats_update_begin(&port->tx_stats_syncp);
port->tx_frag_stats[nfrags]++;
- u64_stats_update_end(&port->ir_stats_syncp);
+ u64_stats_update_end(&port->tx_stats_syncp);
}
}
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 570caeb8ee9e..084f24daf2b5 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -872,11 +872,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
struct net_device *netdev = dev_id;
struct ftmac100 *priv = netdev_priv(netdev);
- if (likely(netif_running(netdev))) {
- /* Disable interrupts for polling */
- ftmac100_disable_all_int(priv);
+ /* Disable interrupts for polling */
+ ftmac100_disable_all_int(priv);
+ if (likely(netif_running(netdev)))
napi_schedule(&priv->napi);
- }
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 3f96aa30068e..20fcf0d1c2ce 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -3760,7 +3760,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Hardware table is only clear when pf resets */
if (!(handle->flags & HNAE3_SUPPORT_VF)) {
ret = hns3_restore_vlan(netdev);
- return ret;
+ if (ret)
+ return ret;
}
ret = hns3_restore_fd_rules(netdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index aa5cb9834d73..494e562fe8c7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1168,14 +1168,14 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
*/
static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
{
- struct hclge_vport *vport = hdev->vport;
- u32 i, k, qs_bitmap;
- int ret;
+ int i;
for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
- qs_bitmap = 0;
+ u32 qs_bitmap = 0;
+ int k, ret;
for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hclge_vport *vport = &hdev->vport[k];
u16 qs_id = vport->qs_offset + tc;
u8 grp, sub_grp;
@@ -1185,8 +1185,6 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
HCLGE_BP_SUB_GRP_ID_S);
if (i == grp)
qs_bitmap |= (1 << sub_grp);
-
- vport++;
}
ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 7893beffcc71..c0203a0d5e3b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -485,8 +485,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
for (j = 0; j < rx_pool->size; j++) {
if (rx_pool->rx_buff[j].skb) {
- dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
- rx_pool->rx_buff[i].skb = NULL;
+ dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
+ rx_pool->rx_buff[j].skb = NULL;
}
}
@@ -1103,20 +1103,15 @@ static int ibmvnic_open(struct net_device *netdev)
return 0;
}
- mutex_lock(&adapter->reset_lock);
-
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
- if (rc) {
- mutex_unlock(&adapter->reset_lock);
+ if (rc)
return rc;
- }
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
- mutex_unlock(&adapter->reset_lock);
return rc;
}
}
@@ -1124,8 +1119,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = __ibmvnic_open(netdev);
netif_carrier_on(netdev);
- mutex_unlock(&adapter->reset_lock);
-
return rc;
}
@@ -1269,10 +1262,8 @@ static int ibmvnic_close(struct net_device *netdev)
return 0;
}
- mutex_lock(&adapter->reset_lock);
rc = __ibmvnic_close(netdev);
ibmvnic_cleanup(netdev);
- mutex_unlock(&adapter->reset_lock);
return rc;
}
@@ -1545,7 +1536,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
- if (adapter->vlan_header_insertion) {
+ if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
}
@@ -1746,6 +1737,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
struct ibmvnic_rwi *rwi, u32 reset_state)
{
u64 old_num_rx_queues, old_num_tx_queues;
+ u64 old_num_rx_slots, old_num_tx_slots;
struct net_device *netdev = adapter->netdev;
int i, rc;
@@ -1757,6 +1749,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
+ old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
+ old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
ibmvnic_cleanup(netdev);
@@ -1819,21 +1813,20 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (rc)
return rc;
} else if (adapter->req_rx_queues != old_num_rx_queues ||
- adapter->req_tx_queues != old_num_tx_queues) {
- adapter->map_id = 1;
+ adapter->req_tx_queues != old_num_tx_queues ||
+ adapter->req_rx_add_entries_per_subcrq !=
+ old_num_rx_slots ||
+ adapter->req_tx_entries_per_subcrq !=
+ old_num_tx_slots) {
release_rx_pools(adapter);
release_tx_pools(adapter);
- rc = init_rx_pools(netdev);
- if (rc)
- return rc;
- rc = init_tx_pools(netdev);
- if (rc)
- return rc;
-
release_napi(adapter);
- rc = init_napi(adapter);
+ release_vpd_data(adapter);
+
+ rc = init_resources(adapter);
if (rc)
return rc;
+
} else {
rc = reset_tx_pools(adapter);
if (rc)
@@ -1917,17 +1910,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
adapter->state = VNIC_PROBED;
return 0;
}
- /* netif_set_real_num_xx_queues needs to take rtnl lock here
- * unless wait_for_reset is set, in which case the rtnl lock
- * has already been taken before initializing the reset
- */
- if (!adapter->wait_for_reset) {
- rtnl_lock();
- rc = init_resources(adapter);
- rtnl_unlock();
- } else {
- rc = init_resources(adapter);
- }
+
+ rc = init_resources(adapter);
if (rc)
return rc;
@@ -1986,13 +1970,21 @@ static void __ibmvnic_reset(struct work_struct *work)
struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
+ bool we_lock_rtnl = false;
u32 reset_state;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
netdev = adapter->netdev;
- mutex_lock(&adapter->reset_lock);
+ /* netif_set_real_num_xx_queues needs to take rtnl lock here
+ * unless wait_for_reset is set, in which case the rtnl lock
+ * has already been taken before initializing the reset
+ */
+ if (!adapter->wait_for_reset) {
+ rtnl_lock();
+ we_lock_rtnl = true;
+ }
reset_state = adapter->state;
rwi = get_next_rwi(adapter);
@@ -2020,12 +2012,11 @@ static void __ibmvnic_reset(struct work_struct *work)
if (rc) {
netdev_dbg(adapter->netdev, "Reset failed\n");
free_all_rwi(adapter);
- mutex_unlock(&adapter->reset_lock);
- return;
}
adapter->resetting = false;
- mutex_unlock(&adapter->reset_lock);
+ if (we_lock_rtnl)
+ rtnl_unlock();
}
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
@@ -4768,7 +4759,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
- mutex_init(&adapter->reset_lock);
mutex_init(&adapter->rwi_lock);
adapter->resetting = false;
@@ -4840,8 +4830,8 @@ static int ibmvnic_remove(struct vio_dev *dev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
adapter->state = VNIC_REMOVING;
- unregister_netdev(netdev);
- mutex_lock(&adapter->reset_lock);
+ rtnl_lock();
+ unregister_netdevice(netdev);
release_resources(adapter);
release_sub_crqs(adapter, 1);
@@ -4852,7 +4842,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
adapter->state = VNIC_REMOVED;
- mutex_unlock(&adapter->reset_lock);
+ rtnl_unlock();
device_remove_file(&dev->dev, &dev_attr_failover);
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 18103b811d4d..99c4f8d331ce 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1075,7 +1075,7 @@ struct ibmvnic_adapter {
struct tasklet_struct tasklet;
enum vnic_state state;
enum ibmvnic_reset_reason reset_reason;
- struct mutex reset_lock, rwi_lock;
+ struct mutex rwi_lock;
struct list_head rwi_list;
struct work_struct ibmvnic_reset;
bool resetting;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index bc71a21c1dc2..21c2688d6308 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -12249,6 +12249,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_SCTP_CRC |
@@ -12266,13 +12268,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
/* record features VLANs can make use of */
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
- netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
-
hw_features = hw_enc_features |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
+
netdev->hw_features |= hw_features;
netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 4c4b5717a627..b8548370f1c7 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -76,6 +76,8 @@ extern const char ice_drv_ver[];
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
+#define ICE_MAX_RESET_WAIT 20
+
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
@@ -189,7 +191,6 @@ struct ice_vsi {
u64 tx_linearize;
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS);
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
unsigned int current_netdev_flags;
u32 tx_restart;
u32 tx_busy;
@@ -369,5 +370,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+void ice_napi_del(struct ice_vsi *vsi);
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 8cd6a2401fd9..554fd707a6d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -811,6 +811,9 @@ void ice_deinit_hw(struct ice_hw *hw)
/* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false);
ice_shutdown_all_ctrlq(hw);
+
+ /* Clear VSI contexts if not already cleared */
+ ice_clear_all_vsi_ctx(hw);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 96923580f2a6..648acdb4c644 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1517,10 +1517,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
}
if (!test_bit(__ICE_DOWN, pf->state)) {
- /* Give it a little more time to try to come back */
+ /* Give it a little more time to try to come back. If still
+ * down, restart autoneg link or reinitialize the interface.
+ */
msleep(75);
if (!test_bit(__ICE_DOWN, pf->state))
return ice_nway_reset(netdev);
+
+ ice_down(vsi);
+ ice_up(vsi);
}
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 5fdea6ec7675..596b9fb1c510 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -242,6 +242,8 @@
#define GLNVM_ULD 0x000B6008
#define GLNVM_ULD_CORER_DONE_M BIT(3)
#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
+#define GLPCI_CNF2 0x000BE004
+#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
#define PF_FUNC_RID_FUNC_NUM_S 0
#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 5bacad01f0c9..1041fa2a7767 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1997,7 +1997,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
if (status) {
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
- ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status,
+ ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
vsi->back->hw.adminq.sq_last_status);
goto err_out;
}
@@ -2458,6 +2458,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
* on this wq
*/
if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
+ ice_napi_del(vsi);
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 05993451147a..333312a1d595 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1465,7 +1465,7 @@ skip_req_irq:
* ice_napi_del - Remove NAPI handler for the VSI
* @vsi: VSI for which NAPI handler is to be removed
*/
-static void ice_napi_del(struct ice_vsi *vsi)
+void ice_napi_del(struct ice_vsi *vsi)
{
int v_idx;
@@ -1622,7 +1622,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- int ret;
if (vid >= VLAN_N_VID) {
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@@ -1635,7 +1634,8 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid)) {
- ret = ice_cfg_vlan_pruning(vsi, true);
+ int ret = ice_cfg_vlan_pruning(vsi, true);
+
if (ret)
return ret;
}
@@ -1644,12 +1644,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
*/
- ret = ice_vsi_add_vlan(vsi, vid);
-
- if (!ret)
- set_bit(vid, vsi->active_vlans);
-
- return ret;
+ return ice_vsi_add_vlan(vsi, vid);
}
/**
@@ -1677,8 +1672,6 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
if (status)
return status;
- clear_bit(vid, vsi->active_vlans);
-
/* Disable VLAN pruning when VLAN 0 is removed */
if (unlikely(!vid))
status = ice_cfg_vlan_pruning(vsi, false);
@@ -2002,6 +1995,22 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
+ * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
+ * @pf: pointer to the PF structure
+ *
+ * There is no error returned here because the driver should be able to handle
+ * 128 Byte cache lines, so we only print a warning in case issues are seen,
+ * specifically with Tx.
+ */
+static void ice_verify_cacheline_size(struct ice_pf *pf)
+{
+ if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
+ dev_warn(&pf->pdev->dev,
+ "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
+ ICE_CACHE_LINE_BYTES);
+}
+
+/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in ice_pci_tbl
@@ -2151,6 +2160,8 @@ static int ice_probe(struct pci_dev *pdev,
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+ ice_verify_cacheline_size(pf);
+
return 0;
err_alloc_sw_unroll:
@@ -2182,6 +2193,12 @@ static void ice_remove(struct pci_dev *pdev)
if (!pf)
return;
+ for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
+ if (!ice_is_reset_in_progress(pf->state))
+ break;
+ msleep(100);
+ }
+
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
@@ -2510,31 +2527,6 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
}
/**
- * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
- * @vsi: the VSI being brought back up
- */
-static int ice_restore_vlan(struct ice_vsi *vsi)
-{
- int err;
- u16 vid;
-
- if (!vsi->netdev)
- return -EINVAL;
-
- err = ice_vsi_vlan_setup(vsi);
- if (err)
- return err;
-
- for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
- err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
- if (err)
- break;
- }
-
- return err;
-}
-
-/**
* ice_vsi_cfg - Setup the VSI
* @vsi: the VSI being configured
*
@@ -2546,7 +2538,9 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (vsi->netdev) {
ice_set_rx_mode(vsi->netdev);
- err = ice_restore_vlan(vsi);
+
+ err = ice_vsi_vlan_setup(vsi);
+
if (err)
return err;
}
@@ -3296,7 +3290,7 @@ static void ice_rebuild(struct ice_pf *pf)
struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
enum ice_status ret;
- int err;
+ int err, i;
if (test_bit(__ICE_DOWN, pf->state))
goto clear_recovery;
@@ -3370,6 +3364,22 @@ static void ice_rebuild(struct ice_pf *pf)
}
ice_reset_all_vfs(pf, true);
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ bool link_up;
+
+ if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
+ continue;
+ ice_get_link_status(pf->vsi[i]->port_info, &link_up);
+ if (link_up) {
+ netif_carrier_on(pf->vsi[i]->netdev);
+ netif_tx_wake_all_queues(pf->vsi[i]->netdev);
+ } else {
+ netif_carrier_off(pf->vsi[i]->netdev);
+ netif_tx_stop_all_queues(pf->vsi[i]->netdev);
+ }
+ }
+
/* if we get here, reset flow is successful */
clear_bit(__ICE_RESET_FAILED, pf->state);
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 33403f39f1b3..40c9c6558956 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -348,6 +348,18 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
}
/**
+ * ice_clear_all_vsi_ctx - clear all the VSI context entries
+ * @hw: pointer to the hw struct
+ */
+void ice_clear_all_vsi_ctx(struct ice_hw *hw)
+{
+ u16 i;
+
+ for (i = 0; i < ICE_MAX_VSI; i++)
+ ice_clear_vsi_ctx(hw, i);
+}
+
+/**
* ice_add_vsi - add VSI context to the hardware and VSI handle list
* @hw: pointer to the hw struct
* @vsi_handle: unique VSI handle provided by drivers
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index b88d96a1ef69..d5ef0bd58bf9 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -190,6 +190,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
+void ice_clear_all_vsi_ctx(struct ice_hw *hw);
+/* Switch config */
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
/* Switch/bridge related commands */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 5dae968d853e..fe5bbabbb41e 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1520,7 +1520,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
/* update gso_segs and bytecount */
first->gso_segs = skb_shinfo(skb)->gso_segs;
- first->bytecount = (first->gso_segs - 1) * off->header_len;
+ first->bytecount += (first->gso_segs - 1) * off->header_len;
cd_tso_len = skb->len - off->header_len;
cd_mss = skb_shinfo(skb)->gso_size;
@@ -1556,15 +1556,15 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* magnitude greater than our largest possible GSO size.
*
* This would then be implemented as:
- * return (((size >> 12) * 85) >> 8) + 1;
+ * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
*
* Since multiplication and division are commutative, we can reorder
* operations into:
- * return ((size * 85) >> 20) + 1;
+ * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
*/
static unsigned int ice_txd_use_count(unsigned int size)
{
- return ((size * 85) >> 20) + 1;
+ return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
}
/**
@@ -1706,7 +1706,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
* + 1 desc for context descriptor,
* otherwise try next time
*/
- if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
+ ICE_DESCS_FOR_CTX_DESC)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 1d0f58bd389b..75d0eaf6c9dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -22,8 +22,21 @@
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
-/* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+/* We are assuming that the cache line is always 64 Bytes here for ice.
+ * In order to make sure that is a correct assumption there is a check in probe
+ * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
+ * size is 128 bytes. We do it this way because we do not want to read the
+ * GLPCI_CNF2 register or a variable containing the value on every pass through
+ * the Tx path.
+ */
+#define ICE_CACHE_LINE_BYTES 64
+#define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
+ sizeof(struct ice_tx_desc))
+#define ICE_DESCS_FOR_CTX_DESC 1
+#define ICE_DESCS_FOR_SKB_DATA_PTR 1
+/* Tx descriptors needed, worst case */
+#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
+ ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
#define ICE_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 12f9432abf11..f4dbc81c1988 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -92,12 +92,12 @@ struct ice_link_status {
u64 phy_type_low;
u16 max_frame_size;
u16 link_speed;
+ u16 req_speeds;
u8 lse_ena; /* Link Status Event notification */
u8 link_info;
u8 an_info;
u8 ext_info;
u8 pacing;
- u8 req_speeds;
/* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
* ice_aqc_get_phy_caps structure
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 45f10f8f01dc..e71065f9d391 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -348,7 +348,7 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
struct ice_vsi_ctx ctxt = { 0 };
enum ice_status status;
- ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
ICE_AQ_VSI_PVLAN_INSERT_PVID |
ICE_AQ_VSI_VLAN_EMOD_STR;
ctxt.info.pvid = cpu_to_le16(vid);
@@ -2171,7 +2171,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!ice_vsi_add_vlan(vsi, vid)) {
vf->num_vlan++;
- set_bit(vid, vsi->active_vlans);
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid))
@@ -2190,7 +2189,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
*/
if (!ice_vsi_kill_vlan(vsi, vid)) {
vf->num_vlan--;
- clear_bit(vid, vsi->active_vlans);
/* Disable VLAN pruning when removing VLAN 0 */
if (unlikely(!vid))
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 29ced6b74d36..2b95dc9c7a6a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -53,13 +53,15 @@
* 2^40 * 10^-9 / 60 = 18.3 minutes.
*
* SYSTIM is converted to real time using a timecounter. As
- * timecounter_cyc2time() allows old timestamps, the timecounter
- * needs to be updated at least once per half of the SYSTIM interval.
- * Scheduling of delayed work is not very accurate, so we aim for 8
- * minutes to be sure the actual interval is shorter than 9.16 minutes.
+ * timecounter_cyc2time() allows old timestamps, the timecounter needs
+ * to be updated at least once per half of the SYSTIM interval.
+ * Scheduling of delayed work is not very accurate, and also the NIC
+ * clock can be adjusted to run up to 6% faster and the system clock
+ * up to 10% slower, so we aim for 6 minutes to be sure the actual
+ * interval in the NIC time is shorter than 9.16 minutes.
*/
-#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8)
+#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 6)
#define IGB_PTP_TX_TIMEOUT (HZ * 15)
#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 8c5ba4b81fb7..2d4d10a017e5 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -512,7 +512,8 @@ static int xrx200_probe(struct platform_device *pdev)
err = register_netdev(net_dev);
if (err)
goto err_unprepare_clk;
- return err;
+
+ return 0;
err_unprepare_clk:
clk_disable_unprepare(priv->clk);
@@ -520,7 +521,7 @@ err_unprepare_clk:
err_uninit_dma:
xrx200_hw_cleanup(priv);
- return 0;
+ return err;
}
static int xrx200_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5bfd349bf41a..e5397c8197b9 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -494,7 +494,7 @@ struct mvneta_port {
#if defined(__LITTLE_ENDIAN)
struct mvneta_tx_desc {
u32 command; /* Options used by HW for packet transmitting.*/
- u16 reserverd1; /* csum_l4 (for future use) */
+ u16 reserved1; /* csum_l4 (for future use) */
u16 data_size; /* Data size of transmitted packet in bytes */
u32 buf_phys_addr; /* Physical addr of transmitted buffer */
u32 reserved2; /* hw_cmd - (for future use, PMT) */
@@ -519,7 +519,7 @@ struct mvneta_rx_desc {
#else
struct mvneta_tx_desc {
u16 data_size; /* Data size of transmitted packet in bytes */
- u16 reserverd1; /* csum_l4 (for future use) */
+ u16 reserved1; /* csum_l4 (for future use) */
u32 command; /* Options used by HW for packet transmitting.*/
u32 reserved2; /* hw_cmd - (for future use, PMT) */
u32 buf_phys_addr; /* Physical addr of transmitted buffer */
@@ -3343,7 +3343,6 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != PHY_INTERFACE_MODE_QSGMII &&
state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_2500BASEX &&
!phy_interface_mode_is_8023z(state->interface) &&
!phy_interface_mode_is_rgmii(state->interface)) {
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -3357,14 +3356,9 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
/* Asymmetric pause is unsupported */
phylink_set(mask, Pause);
- /* We cannot use 1Gbps when using the 2.5G interface. */
- if (state->interface == PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- } else {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
+ /* Half-duplex at speeds higher than 100Mbit is unsupported */
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
if (!phy_interface_mode_is_8023z(state->interface)) {
/* 10M and 100M are only supported in non-802.3z mode */
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index deef5a998985..9af34e03892c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -337,7 +337,7 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
int align, u32 skip_mask, u32 *puid)
{
- u32 uid;
+ u32 uid = 0;
u32 res;
struct mlx4_zone_allocator *zone_alloc = zone->allocator;
struct mlx4_zone_entry *curr_node;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1857ee0f0871..6f5153afcab4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -1006,7 +1006,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->packets++;
}
ring->bytes += tx_info->nr_bytes;
- netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
if (tx_info->inl)
@@ -1044,7 +1043,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(ring->tx_queue);
ring->queue_stopped++;
}
- send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue);
+
+ send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
+ tx_info->nr_bytes,
+ skb->xmit_more);
real_size = (real_size / 16) & 0x3f;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ebcd2778eeb3..23f1b5b512c2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -540,8 +540,8 @@ struct slave_list {
struct resource_allocator {
spinlock_t alloc_lock; /* protect quotas */
union {
- int res_reserved;
- int res_port_rsvd[MLX4_MAX_PORTS];
+ unsigned int res_reserved;
+ unsigned int res_port_rsvd[MLX4_MAX_PORTS];
};
union {
int res_free;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 2e84f10f59ba..1a11bc0e1612 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -363,6 +363,7 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
buf);
+ (*mpt_entry)->lkey = 0;
err = mlx4_SW2HW_MPT(dev, mailbox, key);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index d7fbd5b6ac95..118324802926 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -569,6 +569,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
+ unsigned int hw_mtu;
struct net_dim dim; /* Dynamic Interrupt Moderation */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 023dc4bccd28..4a37713023be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
*speed = mlx5e_port_ptys2speed(eth_proto_oper);
- if (!(*speed)) {
- mlx5_core_warn(mdev, "cannot get port speed\n");
+ if (!(*speed))
err = -EINVAL;
- }
return err;
}
@@ -258,7 +256,7 @@ static int mlx5e_fec_admin_field(u32 *pplm,
case 40000:
if (!write)
*fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_10g_40g);
+ fec_override_admin_10g_40g);
else
MLX5_SET(pplm_reg, pplm,
fec_override_admin_10g_40g, *fec_policy);
@@ -310,7 +308,7 @@ static int mlx5e_get_fec_cap_field(u32 *pplm,
case 10000:
case 40000:
*fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_10g_40g);
+ fec_override_cap_10g_40g);
break;
case 25000:
*fec_cap = MLX5_GET(pplm_reg, pplm,
@@ -394,12 +392,12 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
{
+ u8 fec_policy_nofec = BIT(MLX5E_FEC_NOFEC);
bool fec_mode_not_supp_in_speed = false;
- u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
- u32 current_fec_speed;
+ u8 fec_policy_auto = 0;
u8 fec_caps = 0;
int err;
int i;
@@ -415,23 +413,19 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
if (err)
return err;
- err = mlx5e_port_linkspeed(dev, &current_fec_speed);
- if (err)
- return err;
+ MLX5_SET(pplm_reg, out, local_port, 1);
- memset(in, 0, sz);
- MLX5_SET(pplm_reg, in, local_port, 1);
- for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) {
+ for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS; i++) {
mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]);
- /* policy supported for link speed */
- if (!!(fec_caps & fec_policy)) {
- mlx5e_fec_admin_field(in, &fec_policy, 1,
+ /* policy supported for link speed, or policy is auto */
+ if (fec_caps & fec_policy || fec_policy == fec_policy_auto) {
+ mlx5e_fec_admin_field(out, &fec_policy, 1,
fec_supported_speeds[i]);
} else {
- if (fec_supported_speeds[i] == current_fec_speed)
- return -EOPNOTSUPP;
- mlx5e_fec_admin_field(in, &no_fec_policy, 1,
- fec_supported_speeds[i]);
+ /* turn off FEC if supported. Else, leave it the same */
+ if (fec_caps & fec_policy_nofec)
+ mlx5e_fec_admin_field(out, &fec_policy_nofec, 1,
+ fec_supported_speeds[i]);
fec_mode_not_supp_in_speed = true;
}
}
@@ -441,5 +435,5 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
"FEC policy 0x%x is not supported for some speeds",
fec_policy);
- return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1);
+ return mlx5_core_access_reg(dev, out, sz, out, sz, MLX5_REG_PPLM, 0, 1);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index c047da8752da..eac245a93f91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
int err;
err = mlx5e_port_linkspeed(priv->mdev, &speed);
- if (err)
+ if (err) {
+ mlx5_core_warn(priv->mdev, "cannot get port speed\n");
return 0;
+ }
xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3e770abfd802..25c1c4f96841 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -843,8 +843,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
Autoneg);
- err = get_fec_supported_advertised(mdev, link_ksettings);
- if (err)
+ if (get_fec_supported_advertised(mdev, link_ksettings))
netdev_dbg(netdev, "%s: FEC caps query failed: %d\n",
__func__, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1243edbedc9e..871313d6b34d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -502,6 +502,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->channel = c;
rq->ix = c->ix;
rq->mdev = mdev;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->channel_stats[c->ix].rq;
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
@@ -1623,13 +1624,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
int err;
u32 i;
+ err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+ if (err)
+ return err;
+
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
&cq->wq_ctrl);
if (err)
return err;
- mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
-
mcq->cqe_sz = 64;
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
@@ -1687,6 +1690,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
int eqn;
int err;
+ err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+ if (err)
+ return err;
+
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
@@ -1700,8 +1707,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
- mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
-
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
@@ -1921,6 +1926,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
int err;
int eqn;
+ err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
+ if (err)
+ return err;
+
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
@@ -1937,7 +1946,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
- mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
c->irq_desc = irq_to_desc(irq);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
@@ -3574,6 +3582,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
return 0;
}
+#ifdef CONFIG_MLX5_ESWITCH
static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3586,6 +3595,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
return 0;
}
+#endif
static int set_feature_rx_all(struct net_device *netdev, bool enable)
{
@@ -3684,7 +3694,9 @@ static int mlx5e_set_features(struct net_device *netdev,
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
+#ifdef CONFIG_MLX5_ESWITCH
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
+#endif
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
@@ -3755,10 +3767,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
}
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
- reset = reset && (ppw_old != ppw_new);
+ reset = reset && (is_linear || (ppw_old != ppw_new));
}
if (!reset) {
@@ -4678,7 +4691,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
+#ifdef CONFIG_MLX5_ESWITCH
netdev->hw_features |= NETIF_F_HW_TC;
+#endif
#ifdef CONFIG_MLX5_EN_ARFS
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
@@ -5004,11 +5019,21 @@ err_free_netdev:
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
const struct mlx5e_profile *profile;
+ int max_nch;
int err;
profile = priv->profile;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ /* max number of channels may have changed */
+ max_nch = mlx5e_get_max_num_channels(priv->mdev);
+ if (priv->channels.params.num_channels > max_nch) {
+ mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
+ priv->channels.params.num_channels = max_nch;
+ mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, max_nch);
+ }
+
err = profile->init_tx(priv);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 79638dcbae78..16985ca3248d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1104,6 +1104,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u32 frag_size;
bool consumed;
+ /* Check packet size. Note LRO doesn't use linear SKB */
+ if (unlikely(cqe_bcnt > rq->hw_mtu)) {
+ rq->stats->oversize_pkts_sw_drop++;
+ return NULL;
+ }
+
va = page_address(di->page) + head_offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 35ded91203f5..4382ef85488c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
return 1;
}
-#ifdef CONFIG_INET
-/* loopback test */
-#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN)
-static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
-#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
-
struct mlx5ehdr {
__be32 version;
__be64 magic;
- char text[ETH_GSTRING_LEN];
};
+#ifdef CONFIG_INET
+/* loopback test */
+#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\
+ sizeof(struct udphdr) + sizeof(struct mlx5ehdr))
+#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
+
static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
{
struct sk_buff *skb = NULL;
@@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
struct ethhdr *ethh;
struct udphdr *udph;
struct iphdr *iph;
- int datalen, iplen;
-
- datalen = MLX5E_TEST_PKT_SIZE -
- (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph));
+ int iplen;
skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
if (!skb) {
@@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
/* Fill UDP header */
udph->source = htons(9);
udph->dest = htons(9); /* Discard Protocol */
- udph->len = htons(datalen + sizeof(struct udphdr));
+ udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr));
udph->check = 0;
/* Fill IP header */
@@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
iph->ttl = 32;
iph->version = 4;
iph->protocol = IPPROTO_UDP;
- iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen;
+ iplen = sizeof(struct iphdr) + sizeof(struct udphdr) +
+ sizeof(struct mlx5ehdr);
iph->tot_len = htons(iplen);
iph->frag_off = 0;
iph->saddr = 0;
@@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
mlxh = skb_put(skb, sizeof(*mlxh));
mlxh->version = 0;
mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
- strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
- datalen -= sizeof(*mlxh);
- skb_put_zero(skb, datalen);
skb->csum = 0;
skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1e55b9c27ffc..3e99d0728b2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -83,6 +83,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
@@ -161,6 +162,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
+ s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
@@ -1189,6 +1191,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 77f74ce11280..3f8e870ef4c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -96,6 +96,7 @@ struct mlx5e_sw_stats {
u64 rx_wqe_err;
u64 rx_mpwqe_filler_cqes;
u64 rx_mpwqe_filler_strides;
+ u64 rx_oversize_pkts_sw_drop;
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
@@ -193,6 +194,7 @@ struct mlx5e_rq_stats {
u64 wqe_err;
u64 mpwqe_filler_cqes;
u64 mpwqe_filler_strides;
+ u64 oversize_pkts_sw_drop;
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 608025ca5c04..fca6f4132c91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1447,31 +1447,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
inner_headers);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- struct flow_dissector_key_eth_addrs *key =
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ FLOW_DISSECTOR_KEY_BASIC,
f->key);
- struct flow_dissector_key_eth_addrs *mask =
+ struct flow_dissector_key_basic *mask =
skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ FLOW_DISSECTOR_KEY_BASIC,
f->mask);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+ ntohs(mask->n_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ntohs(key->n_proto));
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
- dmac_47_16),
- mask->dst);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- dmac_47_16),
- key->dst);
-
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
- smac_47_16),
- mask->src);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- smac_47_16),
- key->src);
-
- if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
+ if (mask->n_proto)
*match_level = MLX5_MATCH_L2;
}
@@ -1505,9 +1495,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_L2;
}
- } else {
+ } else if (*match_level != MLX5_MATCH_NONE) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+ *match_level = MLX5_MATCH_L2;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -1545,21 +1536,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_dissector_key_eth_addrs *key =
skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
f->key);
- struct flow_dissector_key_basic *mask =
+ struct flow_dissector_key_eth_addrs *mask =
skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
f->mask);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
- ntohs(mask->n_proto));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ntohs(key->n_proto));
- if (mask->n_proto)
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dmac_47_16),
+ mask->dst);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dmac_47_16),
+ key->dst);
+
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ smac_47_16),
+ mask->src);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ smac_47_16),
+ key->src);
+
+ if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
*match_level = MLX5_MATCH_L2;
}
@@ -1586,10 +1587,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
/* the HW doesn't need L3 inline to match on frag=no */
if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
- *match_level = MLX5_INLINE_MODE_L2;
+ *match_level = MLX5_MATCH_L2;
/* *** L2 attributes parsing up to here *** */
else
- *match_level = MLX5_INLINE_MODE_IP;
+ *match_level = MLX5_MATCH_L3;
}
}
@@ -2979,7 +2980,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
return -EOPNOTSUPP;
- if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 515e3d6de051..5a22c5874f3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule {
};
static const struct rhashtable_params rhash_sa = {
- .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
- .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
+ /* Keep out "cmd" field from the key as it's
+ * value is not constant during the lifetime
+ * of the key object.
+ */
+ .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
+ FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
+ .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
+ FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
.head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
.automatic_shrinking = true,
.min_size = 1,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index b59953daf8b4..11dabd62e2c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -560,9 +560,9 @@ static int mlx5i_close(struct net_device *netdev)
netif_carrier_off(epriv->netdev);
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
- mlx5i_uninit_underlay_qp(epriv);
mlx5e_deactivate_priv_channels(epriv);
mlx5e_close_channels(&epriv->channels);
+ mlx5i_uninit_underlay_qp(epriv);
unlock:
mutex_unlock(&epriv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index a2df12b79f8e..9bec940330a4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3568,7 +3568,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
burst_size = 7;
break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
- is_bytes = true;
rate = 4 * 1024;
burst_size = 4;
break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 8e8fa823d611..69966dfc6e3d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -191,7 +191,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
static void
qed_dcbx_set_params(struct qed_dcbx_results *p_data,
struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- bool enable, u8 prio, u8 tc,
+ bool app_tlv, bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type,
enum qed_pci_personality personality)
{
@@ -210,7 +210,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
p_data->arr[type].dont_add_vlan0 = true;
/* QM reconf data */
- if (p_hwfn->hw_info.personality == personality)
+ if (app_tlv && p_hwfn->hw_info.personality == personality)
qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
/* Configure dcbx vlan priority in doorbell block for roce EDPM */
@@ -225,7 +225,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
static void
qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- bool enable, u8 prio, u8 tc,
+ bool app_tlv, bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type)
{
enum qed_pci_personality personality;
@@ -240,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
personality = qed_dcbx_app_update[i].personality;
- qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
+ qed_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable,
prio, tc, type, personality);
}
}
@@ -319,8 +319,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
enable = true;
}
- qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
- priority, tc, type);
+ qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true,
+ enable, priority, tc, type);
}
}
@@ -341,7 +341,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
continue;
enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
- qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
+ qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false, enable,
priority, tc, type);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 7ceb2b97538d..88a8576ca9ce 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -185,6 +185,10 @@ void qed_resc_free(struct qed_dev *cdev)
qed_iscsi_free(p_hwfn);
qed_ooo_free(p_hwfn);
}
+
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn))
+ qed_rdma_info_free(p_hwfn);
+
qed_iov_free(p_hwfn);
qed_l2_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
@@ -481,8 +485,16 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
/* Can't have multiple flags set here */
- if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
+ if (bitmap_weight((unsigned long *)&pq_flags,
+ sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
+ DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
+ goto err;
+ }
+
+ if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
+ DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
goto err;
+ }
switch (pq_flags) {
case PQ_FLAGS_RLS:
@@ -506,8 +518,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
}
err:
- DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
- return NULL;
+ return &qm_info->start_pq;
}
/* save pq index in qm info */
@@ -531,20 +542,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
{
u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
+ if (max_tc == 0) {
+ DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
+ PQ_FLAGS_MCOS);
+ return p_hwfn->qm_info.start_pq;
+ }
+
if (tc > max_tc)
DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
- return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+ return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
}
u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
{
u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
+ if (max_vf == 0) {
+ DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
+ PQ_FLAGS_VFS);
+ return p_hwfn->qm_info.start_pq;
+ }
+
if (vf > max_vf)
DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
- return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+ return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
}
u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc)
@@ -1081,6 +1104,12 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
}
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
+ rc = qed_rdma_info_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ }
+
/* DMA info initialization */
rc = qed_dmae_info_alloc(p_hwfn);
if (rc)
@@ -2102,11 +2131,8 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
if (!p_ptt)
return -EAGAIN;
- /* If roce info is allocated it means roce is initialized and should
- * be enabled in searcher.
- */
if (p_hwfn->p_rdma_info &&
- p_hwfn->b_rdma_enabled_in_prs)
+ p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs)
qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
/* Re-open incoming traffic */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index cc1b373c0ace..46dc93d3b9b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
"Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
fcoe_pf_params->num_cqs,
p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
- return -EINVAL;
+ rc = -EINVAL;
+ goto err;
}
p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
@@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
if (rc)
- return rc;
+ goto err;
cxt_info.iid = dummy_cid;
rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
if (rc) {
DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
dummy_cid);
- return rc;
+ goto err;
}
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
@@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
return rc;
+
+err:
+ qed_sp_destroy_request(p_hwfn, p_ent);
+ return rc;
}
static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 0f0aba793352..b22f464ea3fa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -992,6 +992,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
*/
do {
index = p_sb_attn->sb_index;
+ /* finish reading index before the loop condition */
+ dma_rmb();
attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
} while (index != p_sb_attn->sb_index);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 1135387bd99d..4f8a685d1a55 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
"Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
p_params->num_queues,
p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 82a1bd1f8a8c..67c02ea93906 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) {
- /* Return spq entry which is taken in qed_sp_init_request()*/
- qed_spq_return_entry(p_hwfn, p_ent);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
@@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"%d is not supported yet\n",
p_filter_cmd->opcode);
+ qed_sp_destroy_request(p_hwfn, *pp_ent);
return -EINVAL;
}
@@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
} else {
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc)
- return rc;
+ goto err;
if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
&abs_rx_q_id);
if (rc)
- return rc;
+ goto err;
p_ramrod->rx_qid_valid = 1;
p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
@@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
(u64)p_params->addr, p_params->length);
return qed_spq_post(p_hwfn, p_ent, NULL);
+
+err:
+ qed_sp_destroy_request(p_hwfn, p_ent);
+ return rc;
}
int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 35fd0db6a677..fff7f04d4525 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1782,9 +1782,9 @@ static int qed_drain(struct qed_dev *cdev)
return -EBUSY;
}
rc = qed_mcp_drain(hwfn, ptt);
+ qed_ptt_release(hwfn, ptt);
if (rc)
return rc;
- qed_ptt_release(hwfn, ptt);
}
return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index f40f654398a0..a96364df4320 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1944,9 +1944,12 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_speed_mask)
{
u32 transceiver_type, transceiver_state;
+ int ret;
- qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
- &transceiver_type);
+ ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
+ &transceiver_type);
+ if (ret)
+ return ret;
if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
false)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index c71391b9c757..7873d6dfd91f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -140,22 +140,34 @@ static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
}
-static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_rdma_start_in_params *params)
+int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_info *p_rdma_info;
- u32 num_cons, num_tasks;
- int rc = -ENOMEM;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
-
- /* Allocate a struct with current pf rdma info */
p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
if (!p_rdma_info)
- return rc;
+ return -ENOMEM;
+
+ spin_lock_init(&p_rdma_info->lock);
p_hwfn->p_rdma_info = p_rdma_info;
+ return 0;
+}
+
+void qed_rdma_info_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->p_rdma_info);
+ p_hwfn->p_rdma_info = NULL;
+}
+
+static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ u32 num_cons, num_tasks;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
+
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
p_rdma_info->proto = PROTOCOLID_IWARP;
else
@@ -183,7 +195,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Allocate a struct with device params and fill it */
p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
if (!p_rdma_info->dev)
- goto free_rdma_info;
+ return rc;
/* Allocate a struct with port params and fill it */
p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
@@ -298,8 +310,6 @@ free_rdma_port:
kfree(p_rdma_info->port);
free_rdma_dev:
kfree(p_rdma_info->dev);
-free_rdma_info:
- kfree(p_rdma_info);
return rc;
}
@@ -370,8 +380,6 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
kfree(p_rdma_info->port);
kfree(p_rdma_info->dev);
-
- kfree(p_rdma_info);
}
static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
@@ -679,8 +687,6 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
- spin_lock_init(&p_hwfn->p_rdma_info->lock);
-
qed_rdma_init_devinfo(p_hwfn, params);
qed_rdma_init_port(p_hwfn);
qed_rdma_init_events(p_hwfn, params);
@@ -727,7 +733,7 @@ static int qed_rdma_stop(void *rdma_cxt)
/* Disable RoCE search */
qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
p_hwfn->b_rdma_enabled_in_prs = false;
-
+ p_hwfn->p_rdma_info->active = 0;
qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
@@ -1236,7 +1242,8 @@ qed_rdma_create_qp(void *rdma_cxt,
u8 max_stats_queues;
int rc;
- if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
+ if (!rdma_cxt || !in_params || !out_params ||
+ !p_hwfn->p_rdma_info->active) {
DP_ERR(p_hwfn->cdev,
"qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
rdma_cxt, in_params, out_params);
@@ -1514,6 +1521,7 @@ qed_rdma_register_tid(void *rdma_cxt,
default:
rc = -EINVAL;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
SET_FIELD(p_ramrod->flags1,
@@ -1801,8 +1809,8 @@ bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
{
bool result;
- /* if rdma info has not been allocated, naturally there are no qps */
- if (!p_hwfn->p_rdma_info)
+ /* if rdma wasn't activated yet, naturally there are no qps */
+ if (!p_hwfn->p_rdma_info->active)
return false;
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
@@ -1848,7 +1856,7 @@ static int qed_rdma_start(void *rdma_cxt,
if (!p_ptt)
goto err;
- rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
+ rc = qed_rdma_alloc(p_hwfn);
if (rc)
goto err1;
@@ -1857,6 +1865,7 @@ static int qed_rdma_start(void *rdma_cxt,
goto err2;
qed_ptt_release(p_hwfn, p_ptt);
+ p_hwfn->p_rdma_info->active = 1;
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
index 6f722ee8ee94..3689fe3e5935 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -102,6 +102,7 @@ struct qed_rdma_info {
u16 max_queue_zones;
enum protocol_type proto;
struct qed_iwarp_info iwarp;
+ u8 active:1;
};
struct qed_rdma_qp {
@@ -176,10 +177,14 @@ struct qed_rdma_qp {
#if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn);
+void qed_rdma_info_free(struct qed_hwfn *p_hwfn);
#else
static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) {}
+static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;}
+static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {}
#endif
int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index f9167d1354bb..e49fada85410 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
rc);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index e95431f6acd4..3157c0d99441 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -167,6 +167,9 @@ struct qed_spq_entry {
enum spq_mode comp_mode;
struct qed_spq_comp_cb comp_cb;
struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
+
+ /* Posted entry for unlimited list entry in EBLOCK mode */
+ struct qed_spq_entry *post_ent;
};
struct qed_eq {
@@ -396,6 +399,17 @@ struct qed_sp_init_data {
struct qed_spq_comp_cb *p_comp_data;
};
+/**
+ * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
+ * Should be called on in error flows after initializing the SPQ entry
+ * and before posting it.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent);
+
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
u8 cmd,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 77b6248ad3b9..888274fa208b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -47,6 +47,19 @@
#include "qed_sp.h"
#include "qed_sriov.h"
+void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ /* qed_spq_get_entry() can either get an entry from the free_pool,
+ * or, if no entries are left, allocate a new entry and add it to
+ * the unlimited_pending list.
+ */
+ if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
+ kfree(p_ent);
+ else
+ qed_spq_return_entry(p_hwfn, p_ent);
+}
+
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
@@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
case QED_SPQ_MODE_BLOCK:
if (!p_data->p_comp_data)
- return -EINVAL;
+ goto err;
p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
break;
@@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
default:
DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
p_ent->comp_mode);
- return -EINVAL;
+ goto err;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
@@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
return 0;
+
+err:
+ qed_sp_destroy_request(p_hwfn, p_ent);
+
+ return -EINVAL;
}
static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index c4a6274dd625..0a9c5bb0fa48 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = qed_mcp_drain(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
if (rc) {
DP_NOTICE(p_hwfn, "MCP drain failed\n");
goto err;
@@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/* Retry after drain */
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (!rc)
- goto out;
+ return 0;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
- if (comp_done->done == 1)
+ if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
-out:
- qed_ptt_release(p_hwfn, p_ptt);
- return 0;
-
+ return 0;
+ }
err:
- qed_ptt_release(p_hwfn, p_ptt);
DP_NOTICE(p_hwfn,
"Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid),
@@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
/* EBLOCK responsible to free the allocated p_ent */
if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
kfree(p_ent);
+ else
+ p_ent->post_ent = p_en2;
p_ent = p_en2;
}
@@ -767,6 +767,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
SPQ_HIGH_PRI_RESERVE_DEFAULT);
}
+/* Avoid overriding of SPQ entries when getting out-of-order completions, by
+ * marking the completions in a bitmap and increasing the chain consumer only
+ * for the first successive completed entries.
+ */
+static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
+{
+ u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ __set_bit(pos, p_spq->p_comp_bitmap);
+ while (test_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap)) {
+ __clear_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap);
+ p_spq->comp_bitmap_idx++;
+ qed_chain_return_produced(&p_spq->chain);
+ }
+}
+
int qed_spq_post(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent, u8 *fw_return_code)
{
@@ -824,11 +843,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
p_ent->queue == &p_spq->unlimited_pending);
if (p_ent->queue == &p_spq->unlimited_pending) {
- /* This is an allocated p_ent which does not need to
- * return to pool.
- */
+ struct qed_spq_entry *p_post_ent = p_ent->post_ent;
+
kfree(p_ent);
- return rc;
+
+ /* Return the entry which was actually posted */
+ p_ent = p_post_ent;
}
if (rc)
@@ -842,7 +862,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
spq_post_fail2:
spin_lock_bh(&p_spq->lock);
list_del(&p_ent->list);
- qed_chain_return_produced(&p_spq->chain);
+ qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
spq_post_fail:
/* return to the free pool */
@@ -874,25 +894,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
spin_lock_bh(&p_spq->lock);
list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
if (p_ent->elem.hdr.echo == echo) {
- u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
-
list_del(&p_ent->list);
-
- /* Avoid overriding of SPQ entries when getting
- * out-of-order completions, by marking the completions
- * in a bitmap and increasing the chain consumer only
- * for the first successive completed entries.
- */
- __set_bit(pos, p_spq->p_comp_bitmap);
-
- while (test_bit(p_spq->comp_bitmap_idx,
- p_spq->p_comp_bitmap)) {
- __clear_bit(p_spq->comp_bitmap_idx,
- p_spq->p_comp_bitmap);
- p_spq->comp_bitmap_idx++;
- qed_chain_return_produced(&p_spq->chain);
- }
-
+ qed_spq_comp_bmap_update(p_hwfn, echo);
p_spq->comp_count++;
found = p_ent;
break;
@@ -931,11 +934,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
QED_MSG_SPQ,
"Got a completion without a callback function\n");
- if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
- (found->queue == &p_spq->unlimited_pending))
+ if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
/* EBLOCK is responsible for returning its own entry into the
- * free list, unless it originally added the entry into the
- * unlimited pending list.
+ * free list.
*/
qed_spq_return_entry(p_hwfn, found);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 9b08a9d9e151..ca6290fa0f30 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
default:
DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
p_hwfn->hw_info.personality);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 9647578cbe6a..14f26bf3b388 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -459,7 +459,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
struct qlcnic_host_tx_ring *tx_ring)
{
- u8 l4proto, opcode = 0, hdr_len = 0;
+ u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0;
u16 flags = 0, vlan_tci = 0;
int copied, offset, copy_len, size;
struct cmd_desc_type0 *hwdesc;
@@ -472,14 +472,16 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
flags = QLCNIC_FLAGS_VLAN_TAGGED;
vlan_tci = ntohs(vh->h_vlan_TCI);
protocol = ntohs(vh->h_vlan_encapsulated_proto);
+ tag_vlan = 1;
} else if (skb_vlan_tag_present(skb)) {
flags = QLCNIC_FLAGS_VLAN_OOB;
vlan_tci = skb_vlan_tag_get(skb);
+ tag_vlan = 1;
}
if (unlikely(adapter->tx_pvid)) {
- if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+ if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
return -EIO;
- if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+ if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED))
goto set_flags;
flags = QLCNIC_FLAGS_VLAN_OOB;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 0afc3d335d56..d11c16aeb19a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct net_device *real_dev,
struct rmnet_endpoint *ep)
{
- struct rmnet_priv *priv;
+ struct rmnet_priv *priv = netdev_priv(rmnet_dev);
int rc;
if (ep->egress_dev)
@@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
rmnet_dev->hw_features |= NETIF_F_SG;
+ priv->real_dev = real_dev;
+
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;
@@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
- priv = netdev_priv(rmnet_dev);
priv->mux_id = id;
- priv->real_dev = real_dev;
netdev_dbg(rmnet_dev, "rmnet dev created\n");
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index b1b305f8f414..272b9ca66314 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -365,7 +365,8 @@ struct dma_features {
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
#define BUF_SIZE_16KiB 16384
-#define BUF_SIZE_8KiB 8192
+/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */
+#define BUF_SIZE_8KiB 8188
#define BUF_SIZE_4KiB 4096
#define BUF_SIZE_2KiB 2048
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index ca9d7e48034c..40d6356a7e73 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -31,7 +31,7 @@
/* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
{
- p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
+ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
<< ERDES1_BUFFER2_SIZE_SHIFT)
& ERDES1_BUFFER2_SIZE_MASK);
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 77914c89d749..5ef91a790f9d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -262,7 +262,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
{
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
+ p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index abc3f85270cd..d8c5bc412219 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -140,7 +140,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
static int set_16kib_bfsize(int mtu)
{
int ret = 0;
- if (unlikely(mtu >= BUF_SIZE_8KiB))
+ if (unlikely(mtu > BUF_SIZE_8KiB))
ret = BUF_SIZE_16KiB;
return ret;
}
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index 3b7f10a5f06a..c5cae8e74dc4 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
*
* Copyright (c) 2018 Maciej W. Rozycki
@@ -56,7 +56,7 @@
#define DRV_VERSION "v.1.1.4"
#define DRV_RELDATE "Oct 6 2018"
-static char version[] =
+static const char version[] =
DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n";
MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
@@ -784,7 +784,7 @@ err_rx:
static void fza_tx_smt(struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
- struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr;
+ struct fza_buffer_tx __iomem *smt_tx_ptr;
int i, len;
u32 own;
@@ -799,6 +799,7 @@ static void fza_tx_smt(struct net_device *dev)
if (!netif_queue_stopped(dev)) {
if (dev_nit_active(dev)) {
+ struct fza_buffer_tx *skb_data_ptr;
struct sk_buff *skb;
/* Length must be a multiple of 4 as only word
diff --git a/drivers/net/fddi/defza.h b/drivers/net/fddi/defza.h
index b06acf32738e..93bda61be8e3 100644
--- a/drivers/net/fddi/defza.h
+++ b/drivers/net/fddi/defza.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0+ */
/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
*
* Copyright (c) 2018 Maciej W. Rozycki
@@ -235,6 +235,7 @@ struct fza_ring_cmd {
#define FZA_RING_CMD 0x200400 /* command ring address */
#define FZA_RING_CMD_SIZE 0x40 /* command descriptor ring
* size
+ */
/* Command constants. */
#define FZA_RING_CMD_MASK 0x7fffffff
#define FZA_RING_CMD_NOP 0x00000000 /* nop */
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index e86ea105c802..704537010453 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -92,7 +92,7 @@ static int bcm54612e_config_init(struct phy_device *phydev)
return 0;
}
-static int bcm5481x_config(struct phy_device *phydev)
+static int bcm54xx_config_clock_delay(struct phy_device *phydev)
{
int rc, val;
@@ -429,7 +429,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
ret = genphy_config_aneg(phydev);
/* Then we can set up the delay. */
- bcm5481x_config(phydev);
+ bcm54xx_config_clock_delay(phydev);
if (of_property_read_bool(np, "enet-phy-lane-swap")) {
/* Lane Swap - Undocumented register...magic! */
@@ -442,6 +442,19 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
return ret;
}
+static int bcm54616s_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Aneg firsly. */
+ ret = genphy_config_aneg(phydev);
+
+ /* Then we can set up the delay. */
+ bcm54xx_config_clock_delay(phydev);
+
+ return ret;
+}
+
static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set)
{
int val;
@@ -636,6 +649,7 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
+ .config_aneg = bcm54616s_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 33265747bf39..0fbcedcdf6e2 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -63,7 +63,7 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
* assume the pin serves as pull-up. If direction is
* output, the default value is high.
*/
- gpiod_set_value(bitbang->mdo, 1);
+ gpiod_set_value_cansleep(bitbang->mdo, 1);
return;
}
@@ -78,7 +78,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- return gpiod_get_value(bitbang->mdio);
+ return gpiod_get_value_cansleep(bitbang->mdio);
}
static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -87,9 +87,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
container_of(ctrl, struct mdio_gpio_info, ctrl);
if (bitbang->mdo)
- gpiod_set_value(bitbang->mdo, what);
+ gpiod_set_value_cansleep(bitbang->mdo, what);
else
- gpiod_set_value(bitbang->mdio, what);
+ gpiod_set_value_cansleep(bitbang->mdio, what);
}
static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -97,7 +97,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- gpiod_set_value(bitbang->mdc, what);
+ gpiod_set_value_cansleep(bitbang->mdc, what);
}
static const struct mdiobb_ops mdio_gpio_ops = {
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index a2e59f4f6f01..7cae17517744 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -810,17 +810,13 @@ static int vsc85xx_default_config(struct phy_device *phydev)
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
mutex_lock(&phydev->lock);
- rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
- if (rc < 0)
- goto out_unlock;
- reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL);
- reg_val &= ~(RGMII_RX_CLK_DELAY_MASK);
- reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS);
- phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val);
+ reg_val = RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS;
+
+ rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
+ MSCC_PHY_RGMII_CNTL, RGMII_RX_CLK_DELAY_MASK,
+ reg_val);
-out_unlock:
- rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
mutex_unlock(&phydev->lock);
return rc;
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 7fc8508b5231..271e8adc39f1 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -220,7 +220,7 @@ static struct phy_driver realtek_drvs[] = {
.flags = PHY_HAS_INTERRUPT,
}, {
.phy_id = 0x001cc816,
- .name = "RTL8201F 10/100Mbps Ethernet",
+ .name = "RTL8201F Fast Ethernet",
.phy_id_mask = 0x001fffff,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index db633ae9f784..364f514d56d8 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -985,8 +985,6 @@ static void team_port_disable(struct team *team,
team->en_port_count--;
team_queue_override_port_del(team, port);
team_adjust_ops(team);
- team_notify_peers(team);
- team_mcast_rejoin(team);
team_lower_state_changed(port);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 060135ceaf0e..e244f5d7512a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1536,6 +1536,7 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
if (!rx_batched || (!more && skb_queue_empty(queue))) {
local_bh_disable();
+ skb_record_rx_queue(skb, tfile->queue_index);
netif_receive_skb(skb);
local_bh_enable();
return;
@@ -1555,8 +1556,11 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
struct sk_buff *nskb;
local_bh_disable();
- while ((nskb = __skb_dequeue(&process_queue)))
+ while ((nskb = __skb_dequeue(&process_queue))) {
+ skb_record_rx_queue(nskb, tfile->queue_index);
netif_receive_skb(nskb);
+ }
+ skb_record_rx_queue(skb, tfile->queue_index);
netif_receive_skb(skb);
local_bh_enable();
}
@@ -2451,6 +2455,7 @@ build:
if (!rcu_dereference(tun->steering_prog))
rxhash = __skb_get_hash_symmetric(skb);
+ skb_record_rx_queue(skb, tfile->queue_index);
netif_receive_skb(skb);
stats = get_cpu_ptr(tun->pcpu_stats);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 262e7a3c23cb..f2d01cb6f958 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1321,6 +1321,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
dev->net->flags |= IFF_MULTICAST;
dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
+ dev->net->min_mtu = ETH_MIN_MTU;
+ dev->net->max_mtu = ETH_DATA_LEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
pdata->dev = dev;
@@ -1598,6 +1600,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
return ret;
}
+ cancel_delayed_work_sync(&pdata->carrier_check);
+
if (pdata->suspend_flags) {
netdev_warn(dev->net, "error during last resume\n");
pdata->suspend_flags = 0;
@@ -1840,6 +1844,11 @@ done:
*/
if (ret && PMSG_IS_AUTO(message))
usbnet_resume(intf);
+
+ if (ret)
+ schedule_delayed_work(&pdata->carrier_check,
+ CARRIER_CHECK_DELAY);
+
return ret;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3e2c041d76ac..cecfd77c9f3c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -70,7 +70,8 @@ static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_TSO4,
VIRTIO_NET_F_GUEST_TSO6,
VIRTIO_NET_F_GUEST_ECN,
- VIRTIO_NET_F_GUEST_UFO
+ VIRTIO_NET_F_GUEST_UFO,
+ VIRTIO_NET_F_GUEST_CSUM
};
struct virtnet_stat_desc {
@@ -2334,9 +2335,6 @@ static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
if (!vi->guest_offloads)
return 0;
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
- offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM;
-
return virtnet_set_guest_offloads(vi, offloads);
}
@@ -2346,8 +2344,6 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
if (!vi->guest_offloads)
return 0;
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
- offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM;
return virtnet_set_guest_offloads(vi, offloads);
}
@@ -2365,8 +2361,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
&& (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
- virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) {
- NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first");
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a1c2801ded10..7e49342bae38 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -6867,7 +6867,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 bitmap;
if (drop) {
- if (vif->type == NL80211_IFTYPE_STATION) {
+ if (vif && vif->type == NL80211_IFTYPE_STATION) {
bitmap = ~(1 << WMI_MGMT_TID);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 1e3b5f4a4cf9..f23cb2f3d296 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1251,6 +1251,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ath_vif *avp = (void *)vif->drv_priv;
struct ath_node *an = &avp->mcast_node;
+ mutex_lock(&sc->mutex);
if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
if (sc->cur_chan->nvifs >= 1) {
mutex_unlock(&sc->mutex);
@@ -1259,8 +1260,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
sc->tx99_vif = vif;
}
- mutex_lock(&sc->mutex);
-
ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
sc->cur_chan->nvifs++;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 230a378c26fc..7f0a5bade70a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -6005,7 +6005,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
* for subsequent chanspecs.
*/
channel->flags = IEEE80211_CHAN_NO_HT40 |
- IEEE80211_CHAN_NO_80MHZ;
+ IEEE80211_CHAN_NO_80MHZ |
+ IEEE80211_CHAN_NO_160MHZ;
ch.bw = BRCMU_CHAN_BW_20;
cfg->d11inf.encchspec(&ch);
chaninfo = ch.chspec;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
index e7584b842dce..eb5db94f5745 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
@@ -193,6 +193,9 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
}
break;
case BRCMU_CHSPEC_D11AC_BW_160:
+ ch->bw = BRCMU_CHAN_BW_160;
+ ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
+ BRCMU_CHSPEC_D11AC_SB_SHIFT);
switch (ch->sb) {
case BRCMU_CHAN_SB_LLL:
ch->control_ch_num -= CH_70MHZ_APART;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 2439e98431ee..7492dfb6729b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -26,6 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -81,7 +83,7 @@
#define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2)
#define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \
ACPI_SAR_TABLE_SIZE + 3)
-#define ACPI_WGDS_WIFI_DATA_SIZE 18
+#define ACPI_WGDS_WIFI_DATA_SIZE 19
#define ACPI_WRDD_WIFI_DATA_SIZE 2
#define ACPI_SPLC_WIFI_DATA_SIZE 2
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 6b95d0e75889..2b8b50a77990 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -154,7 +154,11 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
struct dentry *dbgfs_dir);
-void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt);
+static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt)
+{
+ kfree(fwrt->dump.d3_debug_data);
+ fwrt->dump.d3_debug_data = NULL;
+}
void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index dade206d5511..2ba890445c35 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -893,7 +893,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
- ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE);
+ ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
@@ -928,6 +928,11 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
return -ENOENT;
}
+static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
+{
+ return -ENOENT;
+}
+
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
return 0;
@@ -954,8 +959,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm,
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
ret);
- /* if not available, don't fail and don't bother with EWRD */
- return 0;
+ /*
+ * If not available, don't fail and don't bother with EWRD.
+ * Return 1 to tell that we can't use WGDS either.
+ */
+ return 1;
}
ret = iwl_mvm_sar_get_ewrd_table(mvm);
@@ -968,9 +976,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
/* choose profile 1 (WRDS) as default for both chains */
ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
- /* if we don't have profile 0 from BIOS, just skip it */
+ /*
+ * If we don't have profile 0 from BIOS, just skip it. This
+ * means that SAR Geo will not be enabled either, even if we
+ * have other valid profiles.
+ */
if (ret == -ENOENT)
- return 0;
+ return 1;
return ret;
}
@@ -1168,11 +1180,19 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
ret = iwl_mvm_sar_init(mvm);
- if (ret)
- goto error;
+ if (ret == 0) {
+ ret = iwl_mvm_sar_geo_init(mvm);
+ } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
+ /*
+ * If basic SAR is not available, we check for WGDS,
+ * which should *not* be available either. If it is
+ * available, issue an error, because we can't use SAR
+ * Geo without basic SAR.
+ */
+ IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
+ }
- ret = iwl_mvm_sar_geo_init(mvm);
- if (ret)
+ if (ret < 0)
goto error;
iwl_mvm_leds_sync(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 505b0385d800..00f831d88366 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -301,8 +301,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
goto out;
}
- if (changed)
- *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
+ if (changed) {
+ u32 status = le32_to_cpu(resp->status);
+
+ *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
+ status == MCC_RESP_ILLEGAL);
+ }
regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
__le32_to_cpu(resp->n_channels),
@@ -4444,10 +4448,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
- return;
-
/* if beacon filtering isn't on mac80211 does it anyway */
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 3633f27d048a..6fc5cc1f2b5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -539,9 +539,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
}
IWL_DEBUG_LAR(mvm,
- "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
- status, mcc, mcc >> 8, mcc & 0xff,
- !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
+ "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
+ status, mcc, mcc >> 8, mcc & 0xff, n_channels);
exit:
iwl_free_resp(&cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 0e2092526fae..af3fba10abc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -858,6 +858,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_thermal_exit(mvm);
out_free:
iwl_fw_flush_dump(&mvm->fwrt);
+ iwl_fw_runtime_free(&mvm->fwrt);
if (iwlmvm_mod_params.init_dbg)
return op_mode;
@@ -910,6 +911,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_mvm_tof_clean(mvm);
+ iwl_fw_runtime_free(&mvm->fwrt);
mutex_destroy(&mvm->mutex);
mutex_destroy(&mvm->d0i3_suspend_mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index 0ccbcd7e887d..c30d8f5bbf2a 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -1,6 +1,12 @@
config MT76_CORE
tristate
+config MT76_LEDS
+ bool
+ depends on MT76_CORE
+ depends on LEDS_CLASS=y || MT76_CORE=LEDS_CLASS
+ default y
+
config MT76_USB
tristate
depends on MT76_CORE
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 2a699e8b79bf..7d219ff2d480 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -345,9 +345,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
mt76_check_sband(dev, NL80211_BAND_2GHZ);
mt76_check_sband(dev, NL80211_BAND_5GHZ);
- ret = mt76_led_init(dev);
- if (ret)
- return ret;
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ ret = mt76_led_init(dev);
+ if (ret)
+ return ret;
+ }
return ieee80211_register_hw(hw);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 47c42c607964..7806963b1905 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -71,7 +71,6 @@ struct mt76x02_dev {
struct mac_address macaddr_list[8];
struct mutex phy_mutex;
- struct mutex mutex;
u8 txdone_seq;
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 3824290b219d..fd125722d1fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -507,8 +507,10 @@ int mt76x2_register_device(struct mt76x02_dev *dev)
mt76x2_dfs_init_detector(dev);
/* init led callbacks */
- dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
+ }
ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
ARRAY_SIZE(mt76x02_rates));
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 034a06295668..3f001bd6806c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -272,9 +272,9 @@ mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
if (val != ~0 && val > 0xffff)
return -EINVAL;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
mt76x2_mac_set_tx_protection(dev, val);
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
return 0;
}
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 4c2154b9e6a3..bd10165d7eec 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -285,7 +285,7 @@ static int wl1271_probe(struct sdio_func *func,
struct resource res[2];
mmc_pm_flag_t mmcflags;
int ret = -ENOMEM;
- int irq, wakeirq;
+ int irq, wakeirq, num_irqs;
const char *chip_family;
/* We are only able to handle the wlan function */
@@ -353,12 +353,17 @@ static int wl1271_probe(struct sdio_func *func,
irqd_get_trigger_type(irq_get_irq_data(irq));
res[0].name = "irq";
- res[1].start = wakeirq;
- res[1].flags = IORESOURCE_IRQ |
- irqd_get_trigger_type(irq_get_irq_data(wakeirq));
- res[1].name = "wakeirq";
- ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+ if (wakeirq > 0) {
+ res[1].start = wakeirq;
+ res[1].flags = IORESOURCE_IRQ |
+ irqd_get_trigger_type(irq_get_irq_data(wakeirq));
+ res[1].name = "wakeirq";
+ num_irqs = 2;
+ } else {
+ num_irqs = 1;
+ }
+ ret = platform_device_add_resources(glue->core, res, num_irqs);
if (ret) {
dev_err(glue->dev, "can't add resources\n");
goto out_dev_put;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2e65be8b1387..559d567693b8 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1519,8 +1519,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->ndev)
nvme_nvm_update_nvm_info(ns);
#ifdef CONFIG_NVME_MULTIPATH
- if (ns->head->disk)
+ if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
+ blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+ }
#endif
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 0b70c8bab045..54032c466636 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -152,6 +152,7 @@ struct nvme_fc_ctrl {
bool ioq_live;
bool assoc_active;
+ atomic_t err_work_active;
u64 association_id;
struct list_head ctrl_list; /* rport->ctrl_list */
@@ -160,6 +161,7 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set tag_set;
struct delayed_work connect_work;
+ struct work_struct err_work;
struct kref ref;
u32 flags;
@@ -1531,6 +1533,10 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
int i;
+ /* ensure we've initialized the ops once */
+ if (!(aen_op->flags & FCOP_FLAGS_AEN))
+ return;
+
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
__nvme_fc_abort_op(ctrl, aen_op);
}
@@ -2049,7 +2055,25 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{
- /* only proceed if in LIVE state - e.g. on first error */
+ int active;
+
+ /*
+ * if an error (io timeout, etc) while (re)connecting,
+ * it's an error on creating the new association.
+ * Start the error recovery thread if it hasn't already
+ * been started. It is expected there could be multiple
+ * ios hitting this path before things are cleaned up.
+ */
+ if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
+ active = atomic_xchg(&ctrl->err_work_active, 1);
+ if (!active && !schedule_work(&ctrl->err_work)) {
+ atomic_set(&ctrl->err_work_active, 0);
+ WARN_ON(1);
+ }
+ return;
+ }
+
+ /* Otherwise, only proceed if in LIVE state - e.g. on first error */
if (ctrl->ctrl.state != NVME_CTRL_LIVE)
return;
@@ -2814,6 +2838,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@@ -2866,23 +2891,30 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
}
static void
-nvme_fc_reset_ctrl_work(struct work_struct *work)
+__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
{
- struct nvme_fc_ctrl *ctrl =
- container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
- int ret;
-
- nvme_stop_ctrl(&ctrl->ctrl);
+ nvme_stop_keep_alive(&ctrl->ctrl);
/* will block will waiting for io to terminate */
nvme_fc_delete_association(ctrl);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
+ !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: error_recovery: Couldn't change state "
"to CONNECTING\n", ctrl->cnum);
- return;
- }
+}
+
+static void
+nvme_fc_reset_ctrl_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
+ int ret;
+
+ __nvme_fc_terminate_io(ctrl);
+
+ nvme_stop_ctrl(&ctrl->ctrl);
if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
ret = nvme_fc_create_association(ctrl);
@@ -2897,6 +2929,24 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
ctrl->cnum);
}
+static void
+nvme_fc_connect_err_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, err_work);
+
+ __nvme_fc_terminate_io(ctrl);
+
+ atomic_set(&ctrl->err_work_active, 0);
+
+ /*
+ * Rescheduling the connection after recovering
+ * from the io error is left to the reconnect work
+ * item, which is what should have stalled waiting on
+ * the io that had the error that scheduled this work.
+ */
+}
+
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.name = "fc",
.module = THIS_MODULE,
@@ -3007,6 +3057,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->cnum = idx;
ctrl->ioq_live = false;
ctrl->assoc_active = false;
+ atomic_set(&ctrl->err_work_active, 0);
init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev);
@@ -3014,6 +3065,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+ INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@@ -3103,6 +3155,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
cancel_work_sync(&ctrl->ctrl.reset_work);
+ cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->connect_work);
ctrl->ctrl.opts = NULL;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5e3cc8c59a39..9901afd804ce 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -285,6 +285,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(q, 512);
+ blk_set_stacking_limits(&q->limits);
/* we need to propagate up the VMC settings */
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index f4efe289dc7b..a5f9bbce863f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -420,7 +420,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
struct pci_dev *p2p_dev;
int ret;
- if (!ctrl->p2p_client)
+ if (!ctrl->p2p_client || !ns->use_p2pmem)
return;
if (ns->p2p_dev) {
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index ddce100be57a..3f7971d3706d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -122,7 +122,6 @@ struct nvmet_rdma_device {
int inline_page_count;
};
-static struct workqueue_struct *nvmet_rdma_delete_wq;
static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
@@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
- flush_workqueue(nvmet_rdma_delete_wq);
+ flush_scheduled_work();
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) {
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
/* Destroying rdma_cm id is not needed here */
return 0;
}
@@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) {
rdma_disconnect(queue->cm_id);
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
}
}
@@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx);
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
}
/**
@@ -1656,17 +1655,8 @@ static int __init nvmet_rdma_init(void)
if (ret)
goto err_ib_client;
- nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
- if (!nvmet_rdma_delete_wq) {
- ret = -ENOMEM;
- goto err_unreg_transport;
- }
-
return 0;
-err_unreg_transport:
- nvmet_unregister_transport(&nvmet_rdma_ops);
err_ib_client:
ib_unregister_client(&nvmet_rdma_ib_client);
return ret;
@@ -1674,7 +1664,6 @@ err_ib_client:
static void __exit nvmet_rdma_exit(void)
{
- destroy_workqueue(nvmet_rdma_delete_wq);
nvmet_unregister_transport(&nvmet_rdma_ops);
ib_unregister_client(&nvmet_rdma_ib_client);
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 9b18ce90f907..27f67dfa649d 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -44,6 +44,7 @@ struct nvmem_cell {
int bytes;
int bit_offset;
int nbits;
+ struct device_node *np;
struct nvmem_device *nvmem;
struct list_head node;
};
@@ -298,6 +299,7 @@ static void nvmem_cell_drop(struct nvmem_cell *cell)
mutex_lock(&nvmem_mutex);
list_del(&cell->node);
mutex_unlock(&nvmem_mutex);
+ of_node_put(cell->np);
kfree(cell->name);
kfree(cell);
}
@@ -530,6 +532,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
return -ENOMEM;
cell->nvmem = nvmem;
+ cell->np = of_node_get(child);
cell->offset = be32_to_cpup(addr++);
cell->bytes = be32_to_cpup(addr);
cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
@@ -960,14 +963,13 @@ out:
#if IS_ENABLED(CONFIG_OF)
static struct nvmem_cell *
-nvmem_find_cell_by_index(struct nvmem_device *nvmem, int index)
+nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
{
struct nvmem_cell *cell = NULL;
- int i = 0;
mutex_lock(&nvmem_mutex);
list_for_each_entry(cell, &nvmem->cells, node) {
- if (index == i++)
+ if (np == cell->np)
break;
}
mutex_unlock(&nvmem_mutex);
@@ -1011,7 +1013,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
if (IS_ERR(nvmem))
return ERR_CAST(nvmem);
- cell = nvmem_find_cell_by_index(nvmem, index);
+ cell = nvmem_find_cell_by_node(nvmem, cell_np);
if (!cell) {
__nvmem_device_put(nvmem);
return ERR_PTR(-ENOENT);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 0f27fad9fe94..5592437bb3d1 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -149,9 +149,11 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
* set by the driver.
*/
mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
- dev->bus_dma_mask = mask;
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
+ /* ...but only set bus mask if we found valid dma-ranges earlier */
+ if (!ret)
+ dev->bus_dma_mask = mask;
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 35c64a4295e0..fe6b13608e51 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -104,9 +104,14 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map)
distance = of_read_number(matrix, 1);
matrix++;
+ if ((nodea == nodeb && distance != LOCAL_DISTANCE) ||
+ (nodea != nodeb && distance <= LOCAL_DISTANCE)) {
+ pr_err("Invalid distance[node%d -> node%d] = %d\n",
+ nodea, nodeb, distance);
+ return -EINVAL;
+ }
+
numa_set_distance(nodea, nodeb, distance);
- pr_debug("distance[node%d -> node%d] = %d\n",
- nodea, nodeb, distance);
/* Set default distance of node B->A same as A->B */
if (nodeb > nodea)
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index 9e5a9a3112c9..3f4fb4dbbe33 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -288,7 +288,10 @@ static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
int ret;
vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
- new_supply_vbb->u_volt);
+ new_supply_vdd->u_volt);
+
+ if (new_supply_vdd->u_volt_min < vdd_uv)
+ new_supply_vdd->u_volt_min = vdd_uv;
/* Scaling up? Scale voltage before frequency */
if (freq > old_freq) {
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 2a4aa6468579..921db6f80340 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -793,15 +793,10 @@ static void pci_acpi_setup(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct acpi_device *adev = ACPI_COMPANION(dev);
- int node;
if (!adev)
return;
- node = acpi_get_node(adev->handle);
- if (node != NUMA_NO_NODE)
- set_dev_node(dev, node);
-
pci_acpi_optimize_delay(pci_dev, adev->handle);
pci_acpi_add_pm_notifier(adev, pci_dev);
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 4ceb06f8a33c..4edeb4cae72a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -830,7 +830,7 @@ static struct meson_bank meson_gxbb_periphs_banks[] = {
static struct meson_bank meson_gxbb_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
- BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 7dae1d7bf6b0..158f618f1695 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -807,7 +807,7 @@ static struct meson_bank meson_gxl_periphs_banks[] = {
static struct meson_bank meson_gxl_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
- BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index f8b778a7d471..53d449076dee 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -192,7 +192,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
- ret = regmap_update_bits(pc->reg_pull, reg,
+ ret = regmap_update_bits(pc->reg_pullen, reg,
BIT(bit), 0);
if (ret)
return ret;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index c6d79315218f..86466173114d 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -1053,7 +1053,7 @@ static struct meson_bank meson8_cbus_banks[] = {
static struct meson_bank meson8_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
- BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
static struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index bb2a30964fc6..647ad15d5c3c 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -906,7 +906,7 @@ static struct meson_bank meson8b_cbus_banks[] = {
static struct meson_bank meson8b_aobus_banks[] = {
/* name first lastc irq pullen pull dir out in */
- BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index e79f2a181ad2..b9ec4a16db1f 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -50,8 +50,10 @@ static int __init rtc_hctosys(void)
tv64.tv_sec = rtc_tm_to_time64(&tm);
#if BITS_PER_LONG == 32
- if (tv64.tv_sec > INT_MAX)
+ if (tv64.tv_sec > INT_MAX) {
+ err = -ERANGE;
goto err_read;
+ }
#endif
err = do_settimeofday64(&tv64);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index df0c5776d49b..a5a19ff10535 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -257,6 +257,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char rtc_control;
+ /* This not only a rtc_op, but also called directly */
if (!is_valid_irq(cmos->irq))
return -EIO;
@@ -452,6 +453,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
unsigned char mon, mday, hrs, min, sec, rtc_control;
int ret;
+ /* This not only a rtc_op, but also called directly */
if (!is_valid_irq(cmos->irq))
return -EIO;
@@ -516,9 +518,6 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned long flags;
- if (!is_valid_irq(cmos->irq))
- return -EINVAL;
-
spin_lock_irqsave(&rtc_lock, flags);
if (enabled)
@@ -579,6 +578,12 @@ static const struct rtc_class_ops cmos_rtc_ops = {
.alarm_irq_enable = cmos_alarm_irq_enable,
};
+static const struct rtc_class_ops cmos_rtc_ops_no_alarm = {
+ .read_time = cmos_read_time,
+ .set_time = cmos_set_time,
+ .proc = cmos_procfs,
+};
+
/*----------------------------------------------------------------*/
/*
@@ -855,9 +860,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
goto cleanup1;
}
+
+ cmos_rtc.rtc->ops = &cmos_rtc_ops;
+ } else {
+ cmos_rtc.rtc->ops = &cmos_rtc_ops_no_alarm;
}
- cmos_rtc.rtc->ops = &cmos_rtc_ops;
cmos_rtc.rtc->nvram_old_abi = true;
retval = rtc_register_device(cmos_rtc.rtc);
if (retval)
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 9f99a0966550..7cb786d76e3c 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -303,6 +303,9 @@ static int pcf2127_i2c_gather_write(void *context,
memcpy(buf + 1, val, val_size);
ret = i2c_master_send(client, buf, val_size + 1);
+
+ kfree(buf);
+
if (ret != val_size + 1)
return ret < 0 ? ret : -EIO;
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index f96ec68af2e5..dcbf5c857743 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -415,9 +415,9 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
break;
clear_bit_inv(bit, bv);
+ ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
barrier();
smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
- ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
}
if (ism->sba->e) {
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6843bc7ee9f2..04e294d1d16d 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -87,6 +87,18 @@ struct qeth_dbf_info {
#define SENSE_RESETTING_EVENT_BYTE 1
#define SENSE_RESETTING_EVENT_FLAG 0x80
+static inline u32 qeth_get_device_id(struct ccw_device *cdev)
+{
+ struct ccw_dev_id dev_id;
+ u32 id;
+
+ ccw_device_get_id(cdev, &dev_id);
+ id = dev_id.devno;
+ id |= (u32) (dev_id.ssid << 16);
+
+ return id;
+}
+
/*
* Common IO related definitions
*/
@@ -97,7 +109,8 @@ struct qeth_dbf_info {
#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
-#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev)
+#define CCW_DEVID(cdev) (qeth_get_device_id(cdev))
+#define CARD_DEVID(card) (CCW_DEVID(CARD_RDEV(card)))
/**
* card stuff
@@ -830,6 +843,11 @@ struct qeth_trap_id {
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
+static inline bool qeth_netdev_is_registered(struct net_device *dev)
+{
+ return dev->netdev_ops != NULL;
+}
+
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements)
{
@@ -973,7 +991,7 @@ int qeth_wait_for_threads(struct qeth_card *, unsigned long);
int qeth_do_run_thread(struct qeth_card *, unsigned long);
void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
-int qeth_core_hardsetup_card(struct qeth_card *);
+int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
@@ -1028,11 +1046,6 @@ int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
void qeth_trace_features(struct qeth_card *);
void qeth_close_dev(struct qeth_card *);
-int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
- long,
- int (*reply_cb)(struct qeth_card *,
- struct qeth_reply *, unsigned long),
- void *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
enum qeth_ipa_funcs,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3274f13aad57..4bce5ae65a55 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -167,6 +167,8 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
return "OSD_1000";
case QETH_LINK_TYPE_10GBIT_ETH:
return "OSD_10GIG";
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ return "OSD_25GIG";
case QETH_LINK_TYPE_LANE_ETH100:
return "OSD_FE_LANE";
case QETH_LINK_TYPE_LANE_TR:
@@ -554,8 +556,8 @@ static int __qeth_issue_next_read(struct qeth_card *card)
if (!iob) {
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
- "available\n", dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n",
+ CARD_DEVID(card));
return -ENOMEM;
}
qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
@@ -563,8 +565,8 @@ static int __qeth_issue_next_read(struct qeth_card *card)
rc = ccw_device_start(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0);
if (rc) {
- QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
- "rc=%i\n", dev_name(&card->gdev->dev), rc);
+ QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
+ rc, CARD_DEVID(card));
atomic_set(&channel->irq_pending, 0);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
@@ -613,16 +615,14 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
const char *ipa_name;
int com = cmd->hdr.command;
ipa_name = qeth_get_ipa_cmd_name(com);
+
if (rc)
- QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
- "x%X \"%s\"\n",
- ipa_name, com, dev_name(&card->gdev->dev),
- QETH_CARD_IFNAME(card), rc,
- qeth_get_ipa_msg(rc));
+ QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
+ ipa_name, com, CARD_DEVID(card), rc,
+ qeth_get_ipa_msg(rc));
else
- QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
- ipa_name, com, dev_name(&card->gdev->dev),
- QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
+ ipa_name, com, CARD_DEVID(card));
}
static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
@@ -711,7 +711,7 @@ static int qeth_check_idx_response(struct qeth_card *card,
QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
if ((buffer[2] & 0xc0) == 0xc0) {
- QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#02x\n",
+ QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
buffer[4]);
QETH_CARD_TEXT(card, 2, "ckidxres");
QETH_CARD_TEXT(card, 2, " idxterm");
@@ -972,8 +972,8 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
QETH_CARD_TEXT(card, 2, "CGENCHK");
dev_warn(&cdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
- dev_name(&cdev->dev), dstat, cstat);
+ QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
+ CCW_DEVID(cdev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
return 1;
@@ -1013,8 +1013,8 @@ static long qeth_check_irb_error(struct qeth_card *card,
switch (PTR_ERR(irb)) {
case -EIO:
- QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
- dev_name(&cdev->dev));
+ QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
+ CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
break;
@@ -1031,8 +1031,8 @@ static long qeth_check_irb_error(struct qeth_card *card,
}
break;
default:
- QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
- dev_name(&cdev->dev), PTR_ERR(irb));
+ QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
+ PTR_ERR(irb), CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT(card, 2, " rc???");
}
@@ -1114,9 +1114,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
dev_warn(&channel->ccwdev->dev,
"The qeth device driver failed to recover "
"an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
- "0x%X dstat 0x%X\n",
- dev_name(&channel->ccwdev->dev), cstat, dstat);
+ QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
+ CCW_DEVID(channel->ccwdev), cstat,
+ dstat);
print_hex_dump(KERN_WARNING, "qeth: irb ",
DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
print_hex_dump(KERN_WARNING, "qeth: sense data ",
@@ -1890,8 +1890,8 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
if (channel->state != CH_STATE_ACTIVATING) {
dev_warn(&channel->ccwdev->dev, "The qeth device driver"
" failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
- dev_name(&channel->ccwdev->dev));
+ QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
+ CCW_DEVID(channel->ccwdev));
QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
return -ETIME;
}
@@ -1926,17 +1926,15 @@ static void qeth_idx_write_cb(struct qeth_card *card,
"The adapter is used exclusively by another "
"host\n");
else
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
- " negative reply\n",
- dev_name(&channel->ccwdev->dev));
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
+ CCW_DEVID(channel->ccwdev));
goto out;
}
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
- "function level mismatch (sent: 0x%x, received: "
- "0x%x)\n", dev_name(&channel->ccwdev->dev),
- card->info.func_level, temp);
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+ CCW_DEVID(channel->ccwdev),
+ card->info.func_level, temp);
goto out;
}
channel->state = CH_STATE_UP;
@@ -1973,9 +1971,8 @@ static void qeth_idx_read_cb(struct qeth_card *card,
"insufficient authorization\n");
break;
default:
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
- " negative reply\n",
- dev_name(&channel->ccwdev->dev));
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
+ CCW_DEVID(channel->ccwdev));
}
QETH_CARD_TEXT_(card, 2, "idxread%c",
QETH_IDX_ACT_CAUSE_CODE(iob->data));
@@ -1984,10 +1981,9 @@ static void qeth_idx_read_cb(struct qeth_card *card,
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if (temp != qeth_peer_func_level(card->info.func_level)) {
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
- "level mismatch (sent: 0x%x, received: 0x%x)\n",
- dev_name(&channel->ccwdev->dev),
- card->info.func_level, temp);
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+ CCW_DEVID(channel->ccwdev),
+ card->info.func_level, temp);
goto out;
}
memcpy(&card->token.issuer_rm_r,
@@ -2096,9 +2092,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
(addr_t) iob, 0, 0, event_timeout);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
- QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
- "ccw_device_start rc = %i\n",
- dev_name(&channel->ccwdev->dev), rc);
+ QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
+ CARD_DEVID(card), rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
spin_lock_irq(&card->lock);
list_del_init(&reply->list);
@@ -2853,8 +2848,8 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
} else {
dev_warn(&card->gdev->dev,
"The qeth driver ran out of channel command buffers\n");
- QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
- dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers",
+ CARD_DEVID(card));
}
return iob;
@@ -2989,10 +2984,9 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
return 0;
default:
if (cmd->hdr.return_code) {
- QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
- "rc=%d\n",
- dev_name(&card->gdev->dev),
- cmd->hdr.return_code);
+ QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
+ CARD_DEVID(card),
+ cmd->hdr.return_code);
return 0;
}
}
@@ -3004,8 +2998,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
} else
- QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
- "\n", dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
+ CARD_DEVID(card));
return 0;
}
@@ -4297,10 +4291,9 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
cmd->data.setadapterparms.hdr.return_code);
if (cmd->data.setadapterparms.hdr.return_code !=
SET_ACCESS_CTRL_RC_SUCCESS)
- QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
- card->gdev->dev.kobj.name,
- access_ctrl_req->subcmd_code,
- cmd->data.setadapterparms.hdr.return_code);
+ QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
+ access_ctrl_req->subcmd_code, CARD_DEVID(card),
+ cmd->data.setadapterparms.hdr.return_code);
switch (cmd->data.setadapterparms.hdr.return_code) {
case SET_ACCESS_CTRL_RC_SUCCESS:
if (card->options.isolation == ISOLATION_MODE_NONE) {
@@ -4312,14 +4305,14 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
}
break;
case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
- QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already "
- "deactivated\n", dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
+ CARD_DEVID(card));
if (fallback)
card->options.isolation = card->options.prev_isolation;
break;
case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
- QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already"
- " activated\n", dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
+ CARD_DEVID(card));
if (fallback)
card->options.isolation = card->options.prev_isolation;
break;
@@ -4405,10 +4398,8 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
rc = qeth_setadpparms_set_access_ctrl(card,
card->options.isolation, fallback);
if (rc) {
- QETH_DBF_MESSAGE(3,
- "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
- card->gdev->dev.kobj.name,
- rc);
+ QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
+ rc, CARD_DEVID(card));
rc = -EOPNOTSUPP;
}
} else if (card->options.isolation != ISOLATION_MODE_NONE) {
@@ -4443,7 +4434,8 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
rc = BMCR_FULLDPLX;
if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
(card->info.link_type != QETH_LINK_TYPE_OSN) &&
- (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
+ (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
+ (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
rc |= BMCR_SPEED100;
break;
case MII_BMSR: /* Basic mode status register */
@@ -4634,8 +4626,8 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
qeth_snmp_command_cb, (void *)&qinfo);
if (rc)
- QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
- QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
+ CARD_DEVID(card), rc);
else {
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
@@ -4869,8 +4861,8 @@ static void qeth_determine_capabilities(struct qeth_card *card)
rc = qeth_read_conf_data(card, (void **) &prcd, &length);
if (rc) {
- QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
- dev_name(&card->gdev->dev), rc);
+ QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
+ CARD_DEVID(card), rc);
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_offline;
}
@@ -5086,7 +5078,7 @@ static struct ccw_driver qeth_ccw_driver = {
.remove = ccwgroup_remove_ccwdev,
};
-int qeth_core_hardsetup_card(struct qeth_card *card)
+int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
{
int retries = 3;
int rc;
@@ -5096,8 +5088,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
qeth_update_from_chp_desc(card);
retry:
if (retries < 3)
- QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
- dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
+ CARD_DEVID(card));
rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
@@ -5161,13 +5153,20 @@ retriable:
if (rc == IPA_RC_LAN_OFFLINE) {
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
- netif_carrier_off(card->dev);
+ *carrier_ok = false;
} else {
rc = -ENODEV;
goto out;
}
} else {
- netif_carrier_on(card->dev);
+ *carrier_ok = true;
+ }
+
+ if (qeth_netdev_is_registered(card->dev)) {
+ if (*carrier_ok)
+ netif_carrier_on(card->dev);
+ else
+ netif_carrier_off(card->dev);
}
card->options.ipa4.supported_funcs = 0;
@@ -5201,8 +5200,8 @@ retriable:
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
"an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n",
- dev_name(&card->gdev->dev), rc);
+ QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
+ CARD_DEVID(card), rc);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
@@ -5481,11 +5480,12 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
}
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
-int qeth_send_setassparms(struct qeth_card *card,
- struct qeth_cmd_buffer *iob, __u16 len, long data,
- int (*reply_cb)(struct qeth_card *,
- struct qeth_reply *, unsigned long),
- void *reply_param)
+static int qeth_send_setassparms(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob, u16 len,
+ long data, int (*reply_cb)(struct qeth_card *,
+ struct qeth_reply *,
+ unsigned long),
+ void *reply_param)
{
int rc;
struct qeth_ipa_cmd *cmd;
@@ -5501,7 +5501,6 @@ int qeth_send_setassparms(struct qeth_card *card,
rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_send_setassparms);
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
@@ -6170,8 +6169,14 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
WARN_ON_ONCE(1);
}
- /* fallthrough from high to low, to select all legal speeds: */
+ /* partially does fall through, to also select lower speeds */
switch (maxspeed) {
+ case SPEED_25000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 25000baseSR_Full);
+ break;
case SPEED_10000:
ethtool_link_ksettings_add_link_mode(cmd, supported,
10000baseT_Full);
@@ -6254,6 +6259,10 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
cmd->base.speed = SPEED_10000;
cmd->base.port = PORT_FIBRE;
break;
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ cmd->base.speed = SPEED_25000;
+ cmd->base.port = PORT_FIBRE;
+ break;
default:
cmd->base.speed = SPEED_10;
cmd->base.port = PORT_TP;
@@ -6320,6 +6329,9 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
case CARD_INFO_PORTS_10G:
cmd->base.speed = SPEED_10000;
break;
+ case CARD_INFO_PORTS_25G:
+ cmd->base.speed = SPEED_25000;
+ break;
}
return 0;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index e85090467afe..3e54be201b27 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -90,6 +90,7 @@ enum qeth_link_types {
QETH_LINK_TYPE_GBIT_ETH = 0x03,
QETH_LINK_TYPE_OSN = 0x04,
QETH_LINK_TYPE_10GBIT_ETH = 0x10,
+ QETH_LINK_TYPE_25GBIT_ETH = 0x12,
QETH_LINK_TYPE_LANE_ETH100 = 0x81,
QETH_LINK_TYPE_LANE_TR = 0x82,
QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
@@ -347,6 +348,7 @@ enum qeth_card_info_port_speed {
CARD_INFO_PORTS_100M = 0x00000006,
CARD_INFO_PORTS_1G = 0x00000007,
CARD_INFO_PORTS_10G = 0x00000008,
+ CARD_INFO_PORTS_25G = 0x0000000A,
};
/* (SET)DELIP(M) IPA stuff ***************************************************/
@@ -436,7 +438,7 @@ struct qeth_ipacmd_setassparms {
__u32 flags_32bit;
struct qeth_ipa_caps caps;
struct qeth_checksum_cmd chksum;
- struct qeth_arp_cache_entry add_arp_entry;
+ struct qeth_arp_cache_entry arp_entry;
struct qeth_arp_query_data query_arp;
struct qeth_tso_start_data tso;
__u8 ip[16];
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 23aaf373f631..2914a1a69f83 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -146,11 +146,11 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Wmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc == -EEXIST)
- QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n",
- mac, QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n",
+ CARD_DEVID(card));
else if (rc)
- QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n",
- mac, QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_MESSAGE(2, "Failed to register MAC on device %x: %d\n",
+ CARD_DEVID(card), rc);
return rc;
}
@@ -163,8 +163,8 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Rmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc)
- QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n",
- mac, QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_MESSAGE(2, "Failed to delete MAC on device %u: %d\n",
+ CARD_DEVID(card), rc);
return rc;
}
@@ -260,9 +260,9 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "L2sdvcb");
if (cmd->hdr.return_code) {
- QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x.\n",
+ QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n",
cmd->data.setdelvlan.vlan_id,
- QETH_CARD_IFNAME(card), cmd->hdr.return_code);
+ CARD_DEVID(card), cmd->hdr.return_code);
QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
}
@@ -455,8 +455,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
rc = qeth_vm_request_mac(card);
if (!rc)
goto out;
- QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %s: x%x\n",
- CARD_BUS_ID(card), rc);
+ QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
+ CARD_DEVID(card), rc);
QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc);
/* fall back to alternative mechanism: */
}
@@ -468,8 +468,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
rc = qeth_setadpparms_change_macaddr(card);
if (!rc)
goto out;
- QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n",
- CARD_BUS_ID(card), rc);
+ QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
+ CARD_DEVID(card), rc);
QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
/* fall back once more: */
}
@@ -826,7 +826,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l2_set_offline(cgdev);
- unregister_netdev(card->dev);
+ if (qeth_netdev_is_registered(card->dev))
+ unregister_netdev(card->dev);
}
static const struct ethtool_ops qeth_l2_ethtool_ops = {
@@ -862,11 +863,11 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_set_features = qeth_set_features
};
-static int qeth_l2_setup_netdev(struct qeth_card *card)
+static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
{
int rc;
- if (card->dev->netdev_ops)
+ if (qeth_netdev_is_registered(card->dev))
return 0;
card->dev->priv_flags |= IFF_UNICAST_FLT;
@@ -919,6 +920,9 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
qeth_l2_request_initial_mac(card);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
rc = register_netdev(card->dev);
+ if (!rc && carrier_ok)
+ netif_carrier_on(card->dev);
+
if (rc)
card->dev->netdev_ops = NULL;
return rc;
@@ -949,6 +953,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
enum qeth_card_states recover_flag;
+ bool carrier_ok;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
@@ -956,7 +961,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
recover_flag = card->state;
- rc = qeth_core_hardsetup_card(card);
+ rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
@@ -967,7 +972,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n");
- rc = qeth_l2_setup_netdev(card);
+ rc = qeth_l2_setup_netdev(card, carrier_ok);
if (rc)
goto out_remove;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0b161cc1fd2e..f08b745c2007 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -278,9 +278,6 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
QETH_CARD_TEXT(card, 4, "clearip");
- if (recover && card->options.sniffer)
- return;
-
spin_lock_bh(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
@@ -494,9 +491,8 @@ int qeth_l3_setrouting_v4(struct qeth_card *card)
QETH_PROT_IPV4);
if (rc) {
card->options.route4.type = NO_ROUTER;
- QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
- " on %s. Type set to 'no router'.\n", rc,
- QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
+ rc, CARD_DEVID(card));
}
return rc;
}
@@ -518,9 +514,8 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
QETH_PROT_IPV6);
if (rc) {
card->options.route6.type = NO_ROUTER;
- QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
- " on %s. Type set to 'no router'.\n", rc,
- QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
+ rc, CARD_DEVID(card));
}
return rc;
}
@@ -663,6 +658,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
int rc = 0;
int cnt = 3;
+ if (card->options.sniffer)
+ return 0;
if (addr->proto == QETH_PROT_IPV4) {
QETH_CARD_TEXT(card, 2, "setaddr4");
@@ -697,6 +694,9 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
{
int rc = 0;
+ if (card->options.sniffer)
+ return 0;
+
if (addr->proto == QETH_PROT_IPV4) {
QETH_CARD_TEXT(card, 2, "deladdr4");
QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
@@ -1070,8 +1070,8 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
}
break;
default:
- QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
- cmd->data.diagass.action, QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n",
+ cmd->data.diagass.action, CARD_DEVID(card));
}
return 0;
@@ -1517,32 +1517,25 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
qeth_l3_handle_promisc_mode(card);
}
-static const char *qeth_l3_arp_get_error_cause(int *rc)
+static int qeth_l3_arp_makerc(int rc)
{
- switch (*rc) {
- case QETH_IPA_ARP_RC_FAILED:
- *rc = -EIO;
- return "operation failed";
+ switch (rc) {
+ case IPA_RC_SUCCESS:
+ return 0;
case QETH_IPA_ARP_RC_NOTSUPP:
- *rc = -EOPNOTSUPP;
- return "operation not supported";
- case QETH_IPA_ARP_RC_OUT_OF_RANGE:
- *rc = -EINVAL;
- return "argument out of range";
case QETH_IPA_ARP_RC_Q_NOTSUPP:
- *rc = -EOPNOTSUPP;
- return "query operation not supported";
+ return -EOPNOTSUPP;
+ case QETH_IPA_ARP_RC_OUT_OF_RANGE:
+ return -EINVAL;
case QETH_IPA_ARP_RC_Q_NO_DATA:
- *rc = -ENOENT;
- return "no query data available";
+ return -ENOENT;
default:
- return "unknown error";
+ return -EIO;
}
}
static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
{
- int tmp;
int rc;
QETH_CARD_TEXT(card, 3, "arpstnoe");
@@ -1560,13 +1553,10 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
no_entries);
- if (rc) {
- tmp = rc;
- QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on "
- "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
- return rc;
+ if (rc)
+ QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n",
+ CARD_DEVID(card), rc);
+ return qeth_l3_arp_makerc(rc);
}
static __u32 get_arp_entry_size(struct qeth_card *card,
@@ -1716,7 +1706,6 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- int tmp;
int rc;
QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
@@ -1735,15 +1724,10 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
rc = qeth_l3_send_ipa_arp_cmd(card, iob,
QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
qeth_l3_arp_query_cb, (void *)qinfo);
- if (rc) {
- tmp = rc;
- QETH_DBF_MESSAGE(2,
- "Error while querying ARP cache on %s: %s "
- "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
-
- return rc;
+ if (rc)
+ QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
+ CARD_DEVID(card), rc);
+ return qeth_l3_arp_makerc(rc);
}
static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
@@ -1793,15 +1777,18 @@ out:
return rc;
}
-static int qeth_l3_arp_add_entry(struct qeth_card *card,
- struct qeth_arp_cache_entry *entry)
+static int qeth_l3_arp_modify_entry(struct qeth_card *card,
+ struct qeth_arp_cache_entry *entry,
+ enum qeth_arp_process_subcmds arp_cmd)
{
+ struct qeth_arp_cache_entry *cmd_entry;
struct qeth_cmd_buffer *iob;
- char buf[16];
- int tmp;
int rc;
- QETH_CARD_TEXT(card, 3, "arpadent");
+ if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY)
+ QETH_CARD_TEXT(card, 3, "arpadd");
+ else
+ QETH_CARD_TEXT(card, 3, "arpdel");
/*
* currently GuestLAN only supports the ARP assist function
@@ -1814,71 +1801,25 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
return -EOPNOTSUPP;
}
- iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_ARP_ADD_ENTRY,
- sizeof(struct qeth_arp_cache_entry),
- QETH_PROT_IPV4);
+ iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd,
+ sizeof(*cmd_entry), QETH_PROT_IPV4);
if (!iob)
return -ENOMEM;
- rc = qeth_send_setassparms(card, iob,
- sizeof(struct qeth_arp_cache_entry),
- (unsigned long) entry,
- qeth_setassparms_cb, NULL);
- if (rc) {
- tmp = rc;
- qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
- QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
- "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
- return rc;
-}
-
-static int qeth_l3_arp_remove_entry(struct qeth_card *card,
- struct qeth_arp_cache_entry *entry)
-{
- struct qeth_cmd_buffer *iob;
- char buf[16] = {0, };
- int tmp;
- int rc;
- QETH_CARD_TEXT(card, 3, "arprment");
+ cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry;
+ ether_addr_copy(cmd_entry->macaddr, entry->macaddr);
+ memcpy(cmd_entry->ipaddr, entry->ipaddr, 4);
+ rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
+ if (rc)
+ QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n",
+ arp_cmd, CARD_DEVID(card), rc);
- /*
- * currently GuestLAN only supports the ARP assist function
- * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
- * thus we say EOPNOTSUPP for this ARP function
- */
- if (card->info.guestlan)
- return -EOPNOTSUPP;
- if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
- return -EOPNOTSUPP;
- }
- memcpy(buf, entry, 12);
- iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_ARP_REMOVE_ENTRY,
- 12,
- QETH_PROT_IPV4);
- if (!iob)
- return -ENOMEM;
- rc = qeth_send_setassparms(card, iob,
- 12, (unsigned long)buf,
- qeth_setassparms_cb, NULL);
- if (rc) {
- tmp = rc;
- memset(buf, 0, 16);
- qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
- QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
- " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
- return rc;
+ return qeth_l3_arp_makerc(rc);
}
static int qeth_l3_arp_flush_cache(struct qeth_card *card)
{
int rc;
- int tmp;
QETH_CARD_TEXT(card, 3, "arpflush");
@@ -1894,19 +1835,17 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
}
rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
- if (rc) {
- tmp = rc;
- QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s "
- "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
- return rc;
+ if (rc)
+ QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n",
+ CARD_DEVID(card), rc);
+ return qeth_l3_arp_makerc(rc);
}
static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_arp_cache_entry arp_entry;
+ enum qeth_arp_process_subcmds arp_cmd;
int rc = 0;
switch (cmd) {
@@ -1925,27 +1864,16 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
break;
case SIOC_QETH_ARP_ADD_ENTRY:
- if (!capable(CAP_NET_ADMIN)) {
- rc = -EPERM;
- break;
- }
- if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
- sizeof(struct qeth_arp_cache_entry)))
- rc = -EFAULT;
- else
- rc = qeth_l3_arp_add_entry(card, &arp_entry);
- break;
case SIOC_QETH_ARP_REMOVE_ENTRY:
- if (!capable(CAP_NET_ADMIN)) {
- rc = -EPERM;
- break;
- }
- if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
- sizeof(struct qeth_arp_cache_entry)))
- rc = -EFAULT;
- else
- rc = qeth_l3_arp_remove_entry(card, &arp_entry);
- break;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry)))
+ return -EFAULT;
+
+ arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ?
+ IPA_CMD_ASS_ARP_ADD_ENTRY :
+ IPA_CMD_ASS_ARP_REMOVE_ENTRY;
+ return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd);
case SIOC_QETH_ARP_FLUSH_CACHE:
if (!capable(CAP_NET_ADMIN)) {
rc = -EPERM;
@@ -2383,12 +2311,12 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_neigh_setup = qeth_l3_neigh_setup,
};
-static int qeth_l3_setup_netdev(struct qeth_card *card)
+static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
{
unsigned int headroom;
int rc;
- if (card->dev->netdev_ops)
+ if (qeth_netdev_is_registered(card->dev))
return 0;
if (card->info.type == QETH_CARD_TYPE_OSD ||
@@ -2457,6 +2385,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
rc = register_netdev(card->dev);
+ if (!rc && carrier_ok)
+ netif_carrier_on(card->dev);
+
out:
if (rc)
card->dev->netdev_ops = NULL;
@@ -2497,7 +2428,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l3_set_offline(cgdev);
- unregister_netdev(card->dev);
+ if (qeth_netdev_is_registered(card->dev))
+ unregister_netdev(card->dev);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
}
@@ -2507,6 +2439,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
enum qeth_card_states recover_flag;
+ bool carrier_ok;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
@@ -2514,14 +2447,14 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
recover_flag = card->state;
- rc = qeth_core_hardsetup_card(card);
+ rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
- rc = qeth_l3_setup_netdev(card);
+ rc = qeth_l3_setup_netdev(card, carrier_ok);
if (rc)
goto out_remove;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index f07444d30b21..640cd1b31a18 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -578,6 +578,7 @@ config SCSI_MYRB
config SCSI_MYRS
tristate "Mylex DAC960/DAC1100 PCI RAID Controller (SCSI Interface)"
depends on PCI
+ depends on !CPU_BIG_ENDIAN || COMPILE_TEST
select RAID_ATTRS
help
This driver adds support for the Mylex DAC960, AcceleRAID, and
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 8429c855701f..01c23d27f290 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -1198,7 +1198,7 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
out:
if (!hostdata->selecting)
- return NULL;
+ return false;
hostdata->selecting = NULL;
return ret;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index f0e457e6884e..8df822a4a1bd 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -904,11 +904,9 @@ static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
struct hisi_sas_slot *s, *s1, *s2 = NULL;
- struct list_head *dq_list;
int dlvry_queue = dq->id;
int wp;
- dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index cc36b6473e98..77a85ead483e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1670,11 +1670,9 @@ static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
struct hisi_sas_slot *s, *s1, *s2 = NULL;
- struct list_head *dq_list;
int dlvry_queue = dq->id;
int wp;
- dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index bd4ce38b98d2..a369450a1fa7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -886,11 +886,9 @@ static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
struct hisi_sas_slot *s, *s1, *s2 = NULL;
- struct list_head *dq_list;
int dlvry_queue = dq->id;
int wp;
- dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 0c8005bb0f53..34d311a7dbef 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -698,6 +698,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
+ else
+ nrport = NULL;
spin_unlock(&phba->hbalock);
if (!nrport)
continue;
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index aeb282f617c5..0642f2d0a3bb 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1049,7 +1049,8 @@ static int myrb_get_hba_config(struct myrb_hba *cb)
enquiry2->fw.firmware_type = '0';
enquiry2->fw.turn_id = 0;
}
- sprintf(cb->fw_version, "%d.%02d-%c-%02d",
+ snprintf(cb->fw_version, sizeof(cb->fw_version),
+ "%d.%02d-%c-%02d",
enquiry2->fw.major_version,
enquiry2->fw.minor_version,
enquiry2->fw.firmware_type,
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 0264a2e2bc19..b8d54ef8cf6d 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -163,9 +163,12 @@ static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
dma_addr_t ctlr_info_addr;
union myrs_sgl *sgl;
unsigned char status;
- struct myrs_ctlr_info old;
+ unsigned short ldev_present, ldev_critical, ldev_offline;
+
+ ldev_present = cs->ctlr_info->ldev_present;
+ ldev_critical = cs->ctlr_info->ldev_critical;
+ ldev_offline = cs->ctlr_info->ldev_offline;
- memcpy(&old, cs->ctlr_info, sizeof(struct myrs_ctlr_info));
ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
sizeof(struct myrs_ctlr_info),
DMA_FROM_DEVICE);
@@ -198,9 +201,9 @@ static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
cs->ctlr_info->rbld_active +
cs->ctlr_info->exp_active != 0)
cs->needs_update = true;
- if (cs->ctlr_info->ldev_present != old.ldev_present ||
- cs->ctlr_info->ldev_critical != old.ldev_critical ||
- cs->ctlr_info->ldev_offline != old.ldev_offline)
+ if (cs->ctlr_info->ldev_present != ldev_present ||
+ cs->ctlr_info->ldev_critical != ldev_critical ||
+ cs->ctlr_info->ldev_offline != ldev_offline)
shost_printk(KERN_INFO, cs->host,
"Logical drive count changes (%d/%d/%d)\n",
cs->ctlr_info->ldev_critical,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 6fe20c27acc1..eb59c796a795 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4763,6 +4763,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport->loop_id = FC_NO_LOOP_ID;
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
fcport->supported_classes = FC_COS_UNSPECIFIED;
+ fcport->fp_speed = PORT_SPEED_UNKNOWN;
fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 518f15141170..b658b9a5eb1e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -67,7 +67,7 @@ module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xplogiabsentdevice,
"Option to enable PLOGI to devices that are not present after "
"a Fabric scan. This is needed for several broken switches. "
- "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
+ "Default is 0 - no PLOGI. 1 - perform PLOGI.");
int ql2xloginretrycount = 0;
module_param(ql2xloginretrycount, int, S_IRUGO);
@@ -1749,7 +1749,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
static void
__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
- int cnt;
+ int cnt, status;
unsigned long flags;
srb_t *sp;
scsi_qla_host_t *vha = qp->vha;
@@ -1799,10 +1799,16 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
if (!sp_get(sp)) {
spin_unlock_irqrestore
(qp->qp_lock_ptr, flags);
- qla2xxx_eh_abort(
+ status = qla2xxx_eh_abort(
GET_CMD_SP(sp));
spin_lock_irqsave
(qp->qp_lock_ptr, flags);
+ /*
+ * Get rid of extra reference caused
+ * by early exit from qla2xxx_eh_abort
+ */
+ if (status == FAST_IO_FAIL)
+ atomic_dec(&sp->ref_count);
}
}
sp->done(sp, res);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c7fccbb8f554..fa6e0c3b3aa6 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -697,6 +697,12 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
*/
scsi_mq_uninit_cmd(cmd);
+ /*
+ * queue is still alive, so grab the ref for preventing it
+ * from being cleaned up during running queue.
+ */
+ percpu_ref_get(&q->q_usage_counter);
+
__blk_mq_end_request(req, error);
if (scsi_target(sdev)->single_lun ||
@@ -704,6 +710,8 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
kblockd_schedule_work(&sdev->requeue_work);
else
blk_mq_run_hw_queues(q, true);
+
+ percpu_ref_put(&q->q_usage_counter);
} else {
unsigned long flags;
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 46df707e6f2c..452e19f8fb47 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -20,6 +20,7 @@
#include "unipro.h"
#include "ufs-hisi.h"
#include "ufshci.h"
+#include "ufs_quirks.h"
static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
{
@@ -390,6 +391,14 @@ static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
{
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
+ pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
+ /* VS_DebugSaveConfigTime */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10);
+ /* sync length */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48);
+ }
+
/* update */
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
/* PA_TxSkip */
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 71f73d1d1ad1..5d2dfdb41a6f 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -131,4 +131,10 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
+/*
+ * Some UFS devices require VS_DebugSaveConfigTime is 0x10,
+ * enabling this quirk ensure this.
+ */
+#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9)
+
#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 23d7cca36ff0..f1c57cd33b5b 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -231,6 +231,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
+ UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
END_FIX
};
@@ -8099,13 +8101,6 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
-
- /*
- * Do not use blk-mq at this time because blk-mq does not support
- * runtime pm.
- */
- host->use_blk_mq = false;
-
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 7218fb963d0a..1382a8df6c75 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -777,9 +777,6 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
u8 la = txn->la;
bool usr_msg = false;
- if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
- return -EPROTONOSUPPORT;
-
if (txn->mt == SLIM_MSG_MT_CORE &&
(txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
index 4399d1873e2d..9be41089edde 100644
--- a/drivers/slimbus/slimbus.h
+++ b/drivers/slimbus/slimbus.h
@@ -61,12 +61,6 @@
#define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL 0x58
#define SLIM_MSG_MC_RECONFIGURE_NOW 0x5F
-/*
- * Clock pause flag to indicate that the reconfig message
- * corresponds to clock pause sequence
- */
-#define SLIM_MSG_CLK_PAUSE_SEQ_FLG (1U << 8)
-
/* Clock pause values per SLIMbus spec */
#define SLIM_CLK_FAST 0
#define SLIM_CLK_CONST_PHASE 1
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
index a53231b08d30..e3425bf082ae 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
@@ -310,6 +310,7 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
break;
}
+ /* fall through */
case IPIPEIF_SDRAM_YUV:
/* Set clock divider */
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 82558455384a..dd121f66fa2d 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -253,7 +253,7 @@ static const struct v4l2_m2m_ops cedrus_m2m_ops = {
static const struct media_device_ops cedrus_m2m_media_ops = {
.req_validate = cedrus_request_validate,
- .req_queue = vb2_m2m_request_queue,
+ .req_queue = v4l2_m2m_request_queue,
};
static int cedrus_probe(struct platform_device *pdev)
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
index 257030460fb6..d3e23dd70c1b 100644
--- a/drivers/staging/vboxvideo/vbox_drv.c
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -279,7 +279,6 @@ static struct drm_driver driver = {
.gem_free_object_unlocked = vbox_gem_free_object,
.dumb_create = vbox_dumb_create,
.dumb_map_offset = vbox_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
index 73395a7536c5..fa933d422951 100644
--- a/drivers/staging/vboxvideo/vbox_drv.h
+++ b/drivers/staging/vboxvideo/vbox_drv.h
@@ -99,8 +99,6 @@ struct vbox_private {
int fb_mtrr;
struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c
index 5ecfa7629173..b36ec019c332 100644
--- a/drivers/staging/vboxvideo/vbox_ttm.c
+++ b/drivers/staging/vboxvideo/vbox_ttm.c
@@ -35,61 +35,6 @@ static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct vbox_private, ttm.bdev);
}
-static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-/**
- * Adds the vbox memory manager object/structures to the global memory manager.
- */
-static int vbox_ttm_global_init(struct vbox_private *vbox)
-{
- struct drm_global_reference *global_ref;
- int ret;
-
- global_ref = &vbox->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &vbox_ttm_mem_global_init;
- global_ref->release = &vbox_ttm_mem_global_release;
- ret = drm_global_item_ref(global_ref);
- if (ret) {
- DRM_ERROR("Failed setting up TTM memory subsystem.\n");
- return ret;
- }
-
- vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
- global_ref = &vbox->ttm.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
-
- ret = drm_global_item_ref(global_ref);
- if (ret) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&vbox->ttm.mem_global_ref);
- return ret;
- }
-
- return 0;
-}
-
-/**
- * Removes the vbox memory manager object from the global memory manager.
- */
-static void vbox_ttm_global_release(struct vbox_private *vbox)
-{
- drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
- drm_global_item_unref(&vbox->ttm.mem_global_ref);
-}
-
static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct vbox_bo *bo;
@@ -227,18 +172,13 @@ int vbox_mm_init(struct vbox_private *vbox)
struct drm_device *dev = &vbox->ddev;
struct ttm_bo_device *bdev = &vbox->ttm.bdev;
- ret = vbox_ttm_global_init(vbox);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&vbox->ttm.bdev,
- vbox->ttm.bo_global_ref.ref.object,
&vbox_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
- goto err_ttm_global_release;
+ return ret;
}
ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
@@ -260,8 +200,6 @@ int vbox_mm_init(struct vbox_private *vbox)
err_device_release:
ttm_bo_device_release(&vbox->ttm.bdev);
-err_ttm_global_release:
- vbox_ttm_global_release(vbox);
return ret;
}
@@ -275,7 +213,6 @@ void vbox_mm_fini(struct vbox_private *vbox)
arch_phys_wc_del(vbox->fb_mtrr);
#endif
ttm_bo_device_release(&vbox->ttm.bdev);
- vbox_ttm_global_release(vbox);
}
void vbox_ttm_placement(struct vbox_bo *bo, int domain)
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index e31e4fc31aa1..2cfd61d62e97 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1778,7 +1778,7 @@ EXPORT_SYMBOL(target_submit_tmr);
void transport_generic_request_failure(struct se_cmd *cmd,
sense_reason_t sense_reason)
{
- int ret = 0;
+ int ret = 0, post_ret;
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
sense_reason);
@@ -1790,7 +1790,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
transport_complete_task_attr(cmd);
if (cmd->transport_complete_callback)
- cmd->transport_complete_callback(cmd, false, NULL);
+ cmd->transport_complete_callback(cmd, false, &post_ret);
if (transport_check_aborted_status(cmd, 1))
return;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ff6ba6d86cd8..cc56cb3b3eca 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1614,10 +1614,10 @@ static void sci_request_dma(struct uart_port *port)
hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
s->rx_timer.function = rx_timer_fn;
+ s->chan_rx_saved = s->chan_rx = chan;
+
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
sci_submit_rx(s);
-
- s->chan_rx_saved = s->chan_rx = chan;
}
}
@@ -3102,6 +3102,7 @@ static struct uart_driver sci_uart_driver = {
static int sci_remove(struct platform_device *dev)
{
struct sci_port *port = platform_get_drvdata(dev);
+ unsigned int type = port->port.type; /* uart_remove_... clears it */
sci_ports_in_use &= ~BIT(port->port.line);
uart_remove_one_port(&sci_uart_driver, &port->port);
@@ -3112,8 +3113,7 @@ static int sci_remove(struct platform_device *dev)
sysfs_remove_file(&dev->dev.kobj,
&dev_attr_rx_fifo_trigger.attr);
}
- if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB ||
- port->port.type == PORT_HSCIF) {
+ if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) {
sysfs_remove_file(&dev->dev.kobj,
&dev_attr_rx_fifo_timeout.attr);
}
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index 7576ceace571..f438eaa68246 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -77,7 +77,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
else
cbaud += 15;
}
- return baud_table[cbaud];
+ return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
}
EXPORT_SYMBOL(tty_termios_baud_rate);
@@ -113,7 +113,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
else
cbaud += 15;
}
- return baud_table[cbaud];
+ return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
#else /* IBSHIFT */
return tty_termios_baud_rate(termios);
#endif /* IBSHIFT */
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 55370e651db3..41ec8e5010f3 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1548,7 +1548,7 @@ static void csi_K(struct vc_data *vc, int vpar)
scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count);
vc->vc_need_wrap = 0;
if (con_should_update(vc))
- do_update_region(vc, (unsigned long) start, count);
+ do_update_region(vc, (unsigned long)(start + offset), count);
}
static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar positions */
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 85644669fbe7..0a357db4b31b 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -961,6 +961,8 @@ int __uio_register_device(struct module *owner,
if (ret)
goto err_uio_dev_add_attributes;
+ info->uio_dev = idev;
+
if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
/*
* Note that we deliberately don't use devm_request_irq
@@ -972,11 +974,12 @@ int __uio_register_device(struct module *owner,
*/
ret = request_irq(info->irq, uio_interrupt,
info->irq_flags, info->name, idev);
- if (ret)
+ if (ret) {
+ info->uio_dev = NULL;
goto err_request_irq;
+ }
}
- info->uio_dev = idev;
return 0;
err_request_irq:
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 47d75c20c211..1b68fed464cb 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1696,6 +1696,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
+ { USB_DEVICE(0x0572, 0x1349), /* Hiro (Conexant) USB MODEM H50228 */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
{ USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
.driver_info = QUIRK_CONTROL_LINE_STATE, },
{ USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index c6077d582d29..0f9381b69a3b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2794,6 +2794,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
int i, status;
u16 portchange, portstatus;
struct usb_port *port_dev = hub->ports[port1 - 1];
+ int reset_recovery_time;
if (!hub_is_superspeed(hub->hdev)) {
if (warm) {
@@ -2849,7 +2850,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
USB_PORT_FEAT_C_BH_PORT_RESET);
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_PORT_LINK_STATE);
- usb_clear_port_feature(hub->hdev, port1,
+
+ if (udev)
+ usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
/*
@@ -2885,11 +2888,18 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
done:
if (status == 0) {
- /* TRSTRCY = 10 ms; plus some extra */
if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM)
usleep_range(10000, 12000);
- else
- msleep(10 + 40);
+ else {
+ /* TRSTRCY = 10 ms; plus some extra */
+ reset_recovery_time = 10 + 40;
+
+ /* Hub needs extra delay after resetting its port. */
+ if (hub->hdev->quirks & USB_QUIRK_HUB_SLOW_RESET)
+ reset_recovery_time += 100;
+
+ msleep(reset_recovery_time);
+ }
if (udev) {
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 178d6c6063c0..f9ff03e6af93 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -128,6 +128,9 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
case 'n':
flags |= USB_QUIRK_DELAY_CTRL_MSG;
break;
+ case 'o':
+ flags |= USB_QUIRK_HUB_SLOW_RESET;
+ break;
/* Ignore unrecognized flag characters */
}
}
@@ -380,6 +383,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Terminus Technology Inc. Hub */
+ { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
+
/* Corsair K70 RGB */
{ USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -391,6 +397,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
USB_QUIRK_DELAY_CTRL_MSG },
+ /* Corsair K70 LUX RGB */
+ { USB_DEVICE(0x1b1c, 0x1b33), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Corsair K70 LUX */
{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -411,6 +420,11 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x2040, 0x7200), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Raydium Touchscreen */
+ { USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM },
+
+ { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
+
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index d257c541e51b..7afc10872f1f 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -120,6 +120,7 @@ static int dwc2_pci_probe(struct pci_dev *pci,
dwc2 = platform_device_alloc("dwc2", PLATFORM_DEVID_AUTO);
if (!dwc2) {
dev_err(dev, "couldn't allocate dwc2 device\n");
+ ret = -ENOMEM;
goto err;
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index becfbb87f791..2f2048aa5fde 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1499,6 +1499,7 @@ static int dwc3_probe(struct platform_device *pdev)
err5:
dwc3_event_buffers_cleanup(dwc);
+ dwc3_ulpi_exit(dwc);
err4:
dwc3_free_scratch_buffers(dwc);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 1286076a8890..842795856bf4 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -283,8 +283,10 @@ err:
static void dwc3_pci_remove(struct pci_dev *pci)
{
struct dwc3_pci *dwc = pci_get_drvdata(pci);
+ struct pci_dev *pdev = dwc->pci;
- gpiod_remove_lookup_table(&platform_bytcr_gpios);
+ if (pdev->device == PCI_DEVICE_ID_INTEL_BYT)
+ gpiod_remove_lookup_table(&platform_bytcr_gpios);
#ifdef CONFIG_PM
cancel_work_sync(&dwc->wakeup_work);
#endif
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 679c12e14522..9faad896b3a1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1081,7 +1081,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
/* Now prepare one extra TRB to align transfer size */
trb = &dep->trb_pool[dep->trb_enqueue];
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr,
- maxp - rem, false, 0,
+ maxp - rem, false, 1,
req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt);
@@ -1125,7 +1125,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
/* Now prepare one extra TRB to align transfer size */
trb = &dep->trb_pool[dep->trb_enqueue];
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem,
- false, 0, req->request.stream_id,
+ false, 1, req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt);
} else if (req->request.zero && req->request.length &&
@@ -1141,7 +1141,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
/* Now prepare one extra TRB to handle ZLP */
trb = &dep->trb_pool[dep->trb_enqueue];
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
- false, 0, req->request.stream_id,
+ false, 1, req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt);
} else {
@@ -2259,7 +2259,7 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
* with one TRB pending in the ring. We need to manually clear HWO bit
* from that TRB.
*/
- if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) {
+ if ((req->zero || req->unaligned) && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) {
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
return 1;
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 3ada83d81bda..31e8bf3578c8 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -215,7 +215,6 @@ struct ffs_io_data {
struct mm_struct *mm;
struct work_struct work;
- struct work_struct cancellation_work;
struct usb_ep *ep;
struct usb_request *req;
@@ -1073,31 +1072,22 @@ ffs_epfile_open(struct inode *inode, struct file *file)
return 0;
}
-static void ffs_aio_cancel_worker(struct work_struct *work)
-{
- struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
- cancellation_work);
-
- ENTER();
-
- usb_ep_dequeue(io_data->ep, io_data->req);
-}
-
static int ffs_aio_cancel(struct kiocb *kiocb)
{
struct ffs_io_data *io_data = kiocb->private;
- struct ffs_data *ffs = io_data->ffs;
+ struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
int value;
ENTER();
- if (likely(io_data && io_data->ep && io_data->req)) {
- INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
- queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
- value = -EINPROGRESS;
- } else {
+ spin_lock_irq(&epfile->ffs->eps_lock);
+
+ if (likely(io_data && io_data->ep && io_data->req))
+ value = usb_ep_dequeue(io_data->ep, io_data->req);
+ else
value = -EINVAL;
- }
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
return value;
}
diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
index 27f00160332e..3c4abb5a1c3f 100644
--- a/drivers/usb/host/xhci-histb.c
+++ b/drivers/usb/host/xhci-histb.c
@@ -325,14 +325,16 @@ static int xhci_histb_remove(struct platform_device *dev)
struct xhci_hcd_histb *histb = platform_get_drvdata(dev);
struct usb_hcd *hcd = histb->hcd;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct usb_hcd *shared_hcd = xhci->shared_hcd;
xhci->xhc_state |= XHCI_STATE_REMOVING;
- usb_remove_hcd(xhci->shared_hcd);
+ usb_remove_hcd(shared_hcd);
+ xhci->shared_hcd = NULL;
device_wakeup_disable(&dev->dev);
usb_remove_hcd(hcd);
- usb_put_hcd(xhci->shared_hcd);
+ usb_put_hcd(shared_hcd);
xhci_histb_host_disable(histb);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 12eea73d9f20..94aca1b5ac8a 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -876,7 +876,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
status |= USB_PORT_STAT_SUSPEND;
}
if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME &&
- !DEV_SUPERSPEED_ANY(raw_port_status)) {
+ !DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) {
if ((raw_port_status & PORT_RESET) ||
!(raw_port_status & PORT_PE))
return 0xffffffff;
@@ -921,7 +921,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
time_left = wait_for_completion_timeout(
&bus_state->rexit_done[wIndex],
msecs_to_jiffies(
- XHCI_MAX_REXIT_TIMEOUT));
+ XHCI_MAX_REXIT_TIMEOUT_MS));
spin_lock_irqsave(&xhci->lock, flags);
if (time_left) {
@@ -935,7 +935,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
} else {
int port_status = readl(port->addr);
xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
- XHCI_MAX_REXIT_TIMEOUT,
+ XHCI_MAX_REXIT_TIMEOUT_MS,
port_status);
status |= USB_PORT_STAT_SUSPEND;
clear_bit(wIndex, &bus_state->rexit_ports);
@@ -1474,15 +1474,18 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
unsigned long flags;
struct xhci_hub *rhub;
struct xhci_port **ports;
+ u32 portsc_buf[USB_MAXCHILDREN];
+ bool wake_enabled;
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
max_ports = rhub->num_ports;
bus_state = &xhci->bus_state[hcd_index(hcd)];
+ wake_enabled = hcd->self.root_hub->do_remote_wakeup;
spin_lock_irqsave(&xhci->lock, flags);
- if (hcd->self.root_hub->do_remote_wakeup) {
+ if (wake_enabled) {
if (bus_state->resuming_ports || /* USB2 */
bus_state->port_remote_wakeup) { /* USB3 */
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1490,26 +1493,36 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
return -EBUSY;
}
}
-
- port_index = max_ports;
+ /*
+ * Prepare ports for suspend, but don't write anything before all ports
+ * are checked and we know bus suspend can proceed
+ */
bus_state->bus_suspended = 0;
+ port_index = max_ports;
while (port_index--) {
- /* suspend the port if the port is not suspended */
u32 t1, t2;
- int slot_id;
t1 = readl(ports[port_index]->addr);
t2 = xhci_port_state_to_neutral(t1);
+ portsc_buf[port_index] = 0;
- if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
- xhci_dbg(xhci, "port %d not suspended\n", port_index);
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- port_index + 1);
- if (slot_id) {
+ /* Bail out if a USB3 port has a new device in link training */
+ if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+ bus_state->bus_suspended = 0;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
+ return -EBUSY;
+ }
+
+ /* suspend ports in U0, or bail out for new connect changes */
+ if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
+ if ((t1 & PORT_CSC) && wake_enabled) {
+ bus_state->bus_suspended = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_stop_device(xhci, slot_id, 1);
- spin_lock_irqsave(&xhci->lock, flags);
+ xhci_dbg(xhci, "Bus suspend bailout, port connect change\n");
+ return -EBUSY;
}
+ xhci_dbg(xhci, "port %d not suspended\n", port_index);
t2 &= ~PORT_PLS_MASK;
t2 |= PORT_LINK_STROBE | XDEV_U3;
set_bit(port_index, &bus_state->bus_suspended);
@@ -1518,7 +1531,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
* including the USB 3.0 roothub, but only if CONFIG_PM
* is enabled, so also enable remote wake here.
*/
- if (hcd->self.root_hub->do_remote_wakeup) {
+ if (wake_enabled) {
if (t1 & PORT_CONNECT) {
t2 |= PORT_WKOC_E | PORT_WKDISC_E;
t2 &= ~PORT_WKCONN_E;
@@ -1538,7 +1551,26 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
t1 = xhci_port_state_to_neutral(t1);
if (t1 != t2)
- writel(t2, ports[port_index]->addr);
+ portsc_buf[port_index] = t2;
+ }
+
+ /* write port settings, stopping and suspending ports if needed */
+ port_index = max_ports;
+ while (port_index--) {
+ if (!portsc_buf[port_index])
+ continue;
+ if (test_bit(port_index, &bus_state->bus_suspended)) {
+ int slot_id;
+
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ port_index + 1);
+ if (slot_id) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_stop_device(xhci, slot_id, 1);
+ spin_lock_irqsave(&xhci->lock, flags);
+ }
+ }
+ writel(portsc_buf[port_index], ports[port_index]->addr);
}
hcd->state = HC_STATE_SUSPENDED;
bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 71d0d33c3286..60987c787e44 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -590,12 +590,14 @@ static int xhci_mtk_remove(struct platform_device *dev)
struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev);
struct usb_hcd *hcd = mtk->hcd;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct usb_hcd *shared_hcd = xhci->shared_hcd;
- usb_remove_hcd(xhci->shared_hcd);
+ usb_remove_hcd(shared_hcd);
+ xhci->shared_hcd = NULL;
device_init_wakeup(&dev->dev, false);
usb_remove_hcd(hcd);
- usb_put_hcd(xhci->shared_hcd);
+ usb_put_hcd(shared_hcd);
usb_put_hcd(hcd);
xhci_mtk_sch_exit(mtk);
xhci_mtk_clks_disable(mtk);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 01c57055c0c5..a9515265db4d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -248,6 +248,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
+ if ((pdev->vendor == PCI_VENDOR_ID_BROADCOM ||
+ pdev->vendor == PCI_VENDOR_ID_CAVIUM) &&
+ pdev->device == 0x9026)
+ xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT;
+
if (xhci->quirks & XHCI_RESET_ON_RESUME)
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"QUIRK: Resetting on resume");
@@ -380,6 +385,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
if (xhci->shared_hcd) {
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
+ xhci->shared_hcd = NULL;
}
/* Workaround for spurious wakeups at shutdown with HSW */
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 32b5574ad5c5..ef09cb06212f 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -362,14 +362,16 @@ static int xhci_plat_remove(struct platform_device *dev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct clk *clk = xhci->clk;
struct clk *reg_clk = xhci->reg_clk;
+ struct usb_hcd *shared_hcd = xhci->shared_hcd;
xhci->xhc_state |= XHCI_STATE_REMOVING;
- usb_remove_hcd(xhci->shared_hcd);
+ usb_remove_hcd(shared_hcd);
+ xhci->shared_hcd = NULL;
usb_phy_shutdown(hcd->usb_phy);
usb_remove_hcd(hcd);
- usb_put_hcd(xhci->shared_hcd);
+ usb_put_hcd(shared_hcd);
clk_disable_unprepare(clk);
clk_disable_unprepare(reg_clk);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a8d92c90fb58..65750582133f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1521,6 +1521,35 @@ static void handle_device_notification(struct xhci_hcd *xhci,
usb_wakeup_notification(udev->parent, udev->portnum);
}
+/*
+ * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
+ * Controller.
+ * As per ThunderX2errata-129 USB 2 device may come up as USB 1
+ * If a connection to a USB 1 device is followed by another connection
+ * to a USB 2 device.
+ *
+ * Reset the PHY after the USB device is disconnected if device speed
+ * is less than HCD_USB3.
+ * Retry the reset sequence max of 4 times checking the PLL lock status.
+ *
+ */
+static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
+{
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ u32 pll_lock_check;
+ u32 retry_count = 4;
+
+ do {
+ /* Assert PHY reset */
+ writel(0x6F, hcd->regs + 0x1048);
+ udelay(10);
+ /* De-assert the PHY reset */
+ writel(0x7F, hcd->regs + 0x1048);
+ udelay(200);
+ pll_lock_check = readl(hcd->regs + 0x1070);
+ } while (!(pll_lock_check & 0x1) && --retry_count);
+}
+
static void handle_port_status(struct xhci_hcd *xhci,
union xhci_trb *event)
{
@@ -1556,6 +1585,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
goto cleanup;
}
+ /* We might get interrupts after shared_hcd is removed */
+ if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
+ xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
+ bogus_port_status = true;
+ goto cleanup;
+ }
+
hcd = port->rhub->hcd;
bus_state = &xhci->bus_state[hcd_index(hcd)];
hcd_portnum = port->hcd_portnum;
@@ -1639,7 +1675,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
* RExit to a disconnect state). If so, let the the driver know it's
* out of the RExit state.
*/
- if (!DEV_SUPERSPEED_ANY(portsc) &&
+ if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
test_and_clear_bit(hcd_portnum,
&bus_state->rexit_ports)) {
complete(&bus_state->rexit_done[hcd_portnum]);
@@ -1647,8 +1683,12 @@ static void handle_port_status(struct xhci_hcd *xhci,
goto cleanup;
}
- if (hcd->speed < HCD_USB3)
+ if (hcd->speed < HCD_USB3) {
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
+ if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
+ (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
+ xhci_cavium_reset_phy_quirk(xhci);
+ }
cleanup:
/* Update event ring dequeue pointer before dropping the lock */
@@ -2266,6 +2306,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
goto cleanup;
case COMP_RING_UNDERRUN:
case COMP_RING_OVERRUN:
+ case COMP_STOPPED_LENGTH_INVALID:
goto cleanup;
default:
xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 6b5db344de30..938ff06c0349 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1303,6 +1303,7 @@ static int tegra_xusb_remove(struct platform_device *pdev)
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
+ xhci->shared_hcd = NULL;
usb_remove_hcd(tegra->hcd);
usb_put_hcd(tegra->hcd);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 0420eefa647a..c928dbbff881 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -719,8 +719,6 @@ static void xhci_stop(struct usb_hcd *hcd)
/* Only halt host and free memory after both hcds are removed */
if (!usb_hcd_is_primary_hcd(hcd)) {
- /* usb core will free this hcd shortly, unset pointer */
- xhci->shared_hcd = NULL;
mutex_unlock(&xhci->mutex);
return;
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index bf0b3692dc9a..260b259b72bc 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1680,7 +1680,7 @@ struct xhci_bus_state {
* It can take up to 20 ms to transition from RExit to U0 on the
* Intel Lynx Point LP xHCI host.
*/
-#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000)
+#define XHCI_MAX_REXIT_TIMEOUT_MS 20
static inline unsigned int hcd_index(struct usb_hcd *hcd)
{
@@ -1849,6 +1849,7 @@ struct xhci_hcd {
#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
#define XHCI_ZERO_64B_REGS BIT_ULL(32)
#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
+#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index bd539f3058bc..85b48c6ddc7e 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -50,6 +50,7 @@ static const struct usb_device_id appledisplay_table[] = {
{ APPLEDISPLAY_DEVICE(0x9219) },
{ APPLEDISPLAY_DEVICE(0x921c) },
{ APPLEDISPLAY_DEVICE(0x921d) },
+ { APPLEDISPLAY_DEVICE(0x9222) },
{ APPLEDISPLAY_DEVICE(0x9236) },
/* Terminating entry */
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index e36d6c73c4a4..78118883f96c 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -23,6 +23,16 @@ config TYPEC_UCSI
if TYPEC_UCSI
+config UCSI_CCG
+ tristate "UCSI Interface Driver for Cypress CCGx"
+ depends on I2C
+ help
+ This driver enables UCSI support on platforms that expose a
+ Cypress CCGx Type-C controller over I2C interface.
+
+ To compile the driver as a module, choose M here: the module will be
+ called ucsi_ccg.
+
config UCSI_ACPI
tristate "UCSI ACPI Interface Driver"
depends on ACPI
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index 7afbea512207..2f4900b26210 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -8,3 +8,5 @@ typec_ucsi-y := ucsi.o
typec_ucsi-$(CONFIG_TRACING) += trace.o
obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
+
+obj-$(CONFIG_UCSI_CCG) += ucsi_ccg.o
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
new file mode 100644
index 000000000000..de8a43bdff68
--- /dev/null
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UCSI driver for Cypress CCGx Type-C controller
+ *
+ * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
+ * Author: Ajay Gupta <ajayg@nvidia.com>
+ *
+ * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
+ */
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <asm/unaligned.h>
+#include "ucsi.h"
+
+struct ucsi_ccg {
+ struct device *dev;
+ struct ucsi *ucsi;
+ struct ucsi_ppm ppm;
+ struct i2c_client *client;
+};
+
+#define CCGX_RAB_INTR_REG 0x06
+#define CCGX_RAB_UCSI_CONTROL 0x39
+#define CCGX_RAB_UCSI_CONTROL_START BIT(0)
+#define CCGX_RAB_UCSI_CONTROL_STOP BIT(1)
+#define CCGX_RAB_UCSI_DATA_BLOCK(offset) (0xf000 | ((offset) & 0xff))
+
+static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+{
+ struct i2c_client *client = uc->client;
+ const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
+ unsigned char buf[2];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0x0,
+ .len = sizeof(buf),
+ .buf = buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .buf = data,
+ },
+ };
+ u32 rlen, rem_len = len, max_read_len = len;
+ int status;
+
+ /* check any max_read_len limitation on i2c adapter */
+ if (quirks && quirks->max_read_len)
+ max_read_len = quirks->max_read_len;
+
+ while (rem_len > 0) {
+ msgs[1].buf = &data[len - rem_len];
+ rlen = min_t(u16, rem_len, max_read_len);
+ msgs[1].len = rlen;
+ put_unaligned_le16(rab, buf);
+ status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (status < 0) {
+ dev_err(uc->dev, "i2c_transfer failed %d\n", status);
+ return status;
+ }
+ rab += rlen;
+ rem_len -= rlen;
+ }
+
+ return 0;
+}
+
+static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+{
+ struct i2c_client *client = uc->client;
+ unsigned char *buf;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0x0,
+ }
+ };
+ int status;
+
+ buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ put_unaligned_le16(rab, buf);
+ memcpy(buf + sizeof(rab), data, len);
+
+ msgs[0].len = len + sizeof(rab);
+ msgs[0].buf = buf;
+
+ status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (status < 0) {
+ dev_err(uc->dev, "i2c_transfer failed %d\n", status);
+ kfree(buf);
+ return status;
+ }
+
+ kfree(buf);
+ return 0;
+}
+
+static int ucsi_ccg_init(struct ucsi_ccg *uc)
+{
+ unsigned int count = 10;
+ u8 data;
+ int status;
+
+ data = CCGX_RAB_UCSI_CONTROL_STOP;
+ status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ data = CCGX_RAB_UCSI_CONTROL_START;
+ status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ /*
+ * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
+ * register write will push response which must be cleared.
+ */
+ do {
+ status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ if (!data)
+ return 0;
+
+ status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ usleep_range(10000, 11000);
+ } while (--count);
+
+ return -ETIMEDOUT;
+}
+
+static int ucsi_ccg_send_data(struct ucsi_ccg *uc)
+{
+ u8 *ppm = (u8 *)uc->ppm.data;
+ int status;
+ u16 rab;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_out));
+ status = ccg_write(uc, rab, ppm +
+ offsetof(struct ucsi_data, message_out),
+ sizeof(uc->ppm.data->message_out));
+ if (status < 0)
+ return status;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, ctrl));
+ return ccg_write(uc, rab, ppm + offsetof(struct ucsi_data, ctrl),
+ sizeof(uc->ppm.data->ctrl));
+}
+
+static int ucsi_ccg_recv_data(struct ucsi_ccg *uc)
+{
+ u8 *ppm = (u8 *)uc->ppm.data;
+ int status;
+ u16 rab;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, cci));
+ status = ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, cci),
+ sizeof(uc->ppm.data->cci));
+ if (status < 0)
+ return status;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_in));
+ return ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, message_in),
+ sizeof(uc->ppm.data->message_in));
+}
+
+static int ucsi_ccg_ack_interrupt(struct ucsi_ccg *uc)
+{
+ int status;
+ unsigned char data;
+
+ status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ return ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+}
+
+static int ucsi_ccg_sync(struct ucsi_ppm *ppm)
+{
+ struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
+ int status;
+
+ status = ucsi_ccg_recv_data(uc);
+ if (status < 0)
+ return status;
+
+ /* ack interrupt to allow next command to run */
+ return ucsi_ccg_ack_interrupt(uc);
+}
+
+static int ucsi_ccg_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
+{
+ struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
+
+ ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
+ return ucsi_ccg_send_data(uc);
+}
+
+static irqreturn_t ccg_irq_handler(int irq, void *data)
+{
+ struct ucsi_ccg *uc = data;
+
+ ucsi_notify(uc->ucsi);
+
+ return IRQ_HANDLED;
+}
+
+static int ucsi_ccg_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ucsi_ccg *uc;
+ int status;
+ u16 rab;
+
+ uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
+ if (!uc)
+ return -ENOMEM;
+
+ uc->ppm.data = devm_kzalloc(dev, sizeof(struct ucsi_data), GFP_KERNEL);
+ if (!uc->ppm.data)
+ return -ENOMEM;
+
+ uc->ppm.cmd = ucsi_ccg_cmd;
+ uc->ppm.sync = ucsi_ccg_sync;
+ uc->dev = dev;
+ uc->client = client;
+
+ /* reset ccg device and initialize ucsi */
+ status = ucsi_ccg_init(uc);
+ if (status < 0) {
+ dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
+ return status;
+ }
+
+ status = devm_request_threaded_irq(dev, client->irq, NULL,
+ ccg_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ dev_name(dev), uc);
+ if (status < 0) {
+ dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
+ return status;
+ }
+
+ uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
+ if (IS_ERR(uc->ucsi)) {
+ dev_err(uc->dev, "ucsi_register_ppm failed\n");
+ return PTR_ERR(uc->ucsi);
+ }
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, version));
+ status = ccg_read(uc, rab, (u8 *)(uc->ppm.data) +
+ offsetof(struct ucsi_data, version),
+ sizeof(uc->ppm.data->version));
+ if (status < 0) {
+ ucsi_unregister_ppm(uc->ucsi);
+ return status;
+ }
+
+ i2c_set_clientdata(client, uc);
+ return 0;
+}
+
+static int ucsi_ccg_remove(struct i2c_client *client)
+{
+ struct ucsi_ccg *uc = i2c_get_clientdata(client);
+
+ ucsi_unregister_ppm(uc->ucsi);
+
+ return 0;
+}
+
+static const struct i2c_device_id ucsi_ccg_device_id[] = {
+ {"ccgx-ucsi", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
+
+static struct i2c_driver ucsi_ccg_driver = {
+ .driver = {
+ .name = "ucsi_ccg",
+ },
+ .probe = ucsi_ccg_probe,
+ .remove = ucsi_ccg_remove,
+ .id_table = ucsi_ccg_device_id,
+};
+
+module_i2c_driver(ucsi_ccg_driver);
+
+MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
+MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 8a3e8f61b991..799ae49774f5 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -31,7 +31,7 @@
#define hdmi_log(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__)
-static u8 hdmi_infoframe_checksum(u8 *ptr, size_t size)
+static u8 hdmi_infoframe_checksum(const u8 *ptr, size_t size)
{
u8 csum = 0;
size_t i;
@@ -68,8 +68,36 @@ int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
}
EXPORT_SYMBOL(hdmi_avi_infoframe_init);
+static int hdmi_avi_infoframe_check_only(const struct hdmi_avi_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_AVI ||
+ frame->version != 2 ||
+ frame->length != HDMI_AVI_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ if (frame->picture_aspect > HDMI_PICTURE_ASPECT_16_9)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
- * hdmi_avi_infoframe_pack() - write HDMI AVI infoframe to binary buffer
+ * hdmi_avi_infoframe_check() - check a HDMI AVI infoframe
+ * @frame: HDMI AVI infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame)
+{
+ return hdmi_avi_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_check);
+
+/**
+ * hdmi_avi_infoframe_pack_only() - write HDMI AVI infoframe to binary buffer
* @frame: HDMI AVI infoframe
* @buffer: destination buffer
* @size: size of buffer
@@ -82,20 +110,22 @@ EXPORT_SYMBOL(hdmi_avi_infoframe_init);
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
-ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
- size_t size)
+ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
+ void *buffer, size_t size)
{
u8 *ptr = buffer;
size_t length;
+ int ret;
+
+ ret = hdmi_avi_infoframe_check_only(frame);
+ if (ret)
+ return ret;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
if (size < length)
return -ENOSPC;
- if (frame->picture_aspect > HDMI_PICTURE_ASPECT_16_9)
- return -EINVAL;
-
memset(buffer, 0, size);
ptr[0] = frame->type;
@@ -152,6 +182,36 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
return length;
}
+EXPORT_SYMBOL(hdmi_avi_infoframe_pack_only);
+
+/**
+ * hdmi_avi_infoframe_pack() - check a HDMI AVI infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI AVI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_avi_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_avi_infoframe_pack_only(frame, buffer, size);
+}
EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
/**
@@ -178,8 +238,33 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
}
EXPORT_SYMBOL(hdmi_spd_infoframe_init);
+static int hdmi_spd_infoframe_check_only(const struct hdmi_spd_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_SPD ||
+ frame->version != 1 ||
+ frame->length != HDMI_SPD_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
- * hdmi_spd_infoframe_pack() - write HDMI SPD infoframe to binary buffer
+ * hdmi_spd_infoframe_check() - check a HDMI SPD infoframe
+ * @frame: HDMI SPD infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame)
+{
+ return hdmi_spd_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_check);
+
+/**
+ * hdmi_spd_infoframe_pack_only() - write HDMI SPD infoframe to binary buffer
* @frame: HDMI SPD infoframe
* @buffer: destination buffer
* @size: size of buffer
@@ -192,11 +277,16 @@ EXPORT_SYMBOL(hdmi_spd_infoframe_init);
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
-ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
- size_t size)
+ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame,
+ void *buffer, size_t size)
{
u8 *ptr = buffer;
size_t length;
+ int ret;
+
+ ret = hdmi_spd_infoframe_check_only(frame);
+ if (ret)
+ return ret;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
@@ -222,6 +312,36 @@ ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
return length;
}
+EXPORT_SYMBOL(hdmi_spd_infoframe_pack_only);
+
+/**
+ * hdmi_spd_infoframe_pack() - check a HDMI SPD infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI SPD infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_spd_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_spd_infoframe_pack_only(frame, buffer, size);
+}
EXPORT_SYMBOL(hdmi_spd_infoframe_pack);
/**
@@ -242,8 +362,33 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
}
EXPORT_SYMBOL(hdmi_audio_infoframe_init);
+static int hdmi_audio_infoframe_check_only(const struct hdmi_audio_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_AUDIO ||
+ frame->version != 1 ||
+ frame->length != HDMI_AUDIO_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
- * hdmi_audio_infoframe_pack() - write HDMI audio infoframe to binary buffer
+ * hdmi_audio_infoframe_check() - check a HDMI audio infoframe
+ * @frame: HDMI audio infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame)
+{
+ return hdmi_audio_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_check);
+
+/**
+ * hdmi_audio_infoframe_pack_only() - write HDMI audio infoframe to binary buffer
* @frame: HDMI audio infoframe
* @buffer: destination buffer
* @size: size of buffer
@@ -256,12 +401,17 @@ EXPORT_SYMBOL(hdmi_audio_infoframe_init);
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
-ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
- void *buffer, size_t size)
+ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size)
{
unsigned char channels;
u8 *ptr = buffer;
size_t length;
+ int ret;
+
+ ret = hdmi_audio_infoframe_check_only(frame);
+ if (ret)
+ return ret;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
@@ -297,6 +447,36 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
return length;
}
+EXPORT_SYMBOL(hdmi_audio_infoframe_pack_only);
+
+/**
+ * hdmi_audio_infoframe_pack() - check a HDMI Audio infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI Audio infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_audio_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_audio_infoframe_pack_only(frame, buffer, size);
+}
EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
/**
@@ -319,6 +499,7 @@ int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
* value
*/
frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID;
+ frame->length = 4;
return 0;
}
@@ -335,8 +516,42 @@ static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *fram
return 4;
}
+static int hdmi_vendor_infoframe_check_only(const struct hdmi_vendor_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_VENDOR ||
+ frame->version != 1 ||
+ frame->oui != HDMI_IEEE_OUI)
+ return -EINVAL;
+
+ /* only one of those can be supplied */
+ if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
+ return -EINVAL;
+
+ if (frame->length != hdmi_vendor_infoframe_length(frame))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * hdmi_vendor_infoframe_check() - check a HDMI vendor infoframe
+ * @frame: HDMI infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame)
+{
+ frame->length = hdmi_vendor_infoframe_length(frame);
+
+ return hdmi_vendor_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_check);
+
/**
- * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
+ * hdmi_vendor_infoframe_pack_only() - write a HDMI vendor infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
* @size: size of buffer
@@ -349,17 +564,16 @@ static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *fram
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
-ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
- void *buffer, size_t size)
+ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size)
{
u8 *ptr = buffer;
size_t length;
+ int ret;
- /* only one of those can be supplied */
- if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
- return -EINVAL;
-
- frame->length = hdmi_vendor_infoframe_length(frame);
+ ret = hdmi_vendor_infoframe_check_only(frame);
+ if (ret)
+ return ret;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
@@ -394,24 +608,134 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
return length;
}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_pack_only);
+
+/**
+ * hdmi_vendor_infoframe_pack() - check a HDMI Vendor infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI Vendor infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_vendor_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_vendor_infoframe_pack_only(frame, buffer, size);
+}
EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
+static int
+hdmi_vendor_any_infoframe_check_only(const union hdmi_vendor_any_infoframe *frame)
+{
+ if (frame->any.type != HDMI_INFOFRAME_TYPE_VENDOR ||
+ frame->any.version != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * hdmi_vendor_any_infoframe_check() - check a vendor infoframe
+ */
+static int
+hdmi_vendor_any_infoframe_check(union hdmi_vendor_any_infoframe *frame)
+{
+ int ret;
+
+ ret = hdmi_vendor_any_infoframe_check_only(frame);
+ if (ret)
+ return ret;
+
+ /* we only know about HDMI vendor infoframes */
+ if (frame->any.oui != HDMI_IEEE_OUI)
+ return -EINVAL;
+
+ return hdmi_vendor_infoframe_check(&frame->hdmi);
+}
+
/*
- * hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer
+ * hdmi_vendor_any_infoframe_pack_only() - write a vendor infoframe to binary buffer
*/
static ssize_t
-hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
- void *buffer, size_t size)
+hdmi_vendor_any_infoframe_pack_only(const union hdmi_vendor_any_infoframe *frame,
+ void *buffer, size_t size)
{
+ int ret;
+
+ ret = hdmi_vendor_any_infoframe_check_only(frame);
+ if (ret)
+ return ret;
+
/* we only know about HDMI vendor infoframes */
if (frame->any.oui != HDMI_IEEE_OUI)
return -EINVAL;
- return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size);
+ return hdmi_vendor_infoframe_pack_only(&frame->hdmi, buffer, size);
+}
+
+/*
+ * hdmi_vendor_any_infoframe_pack() - check a vendor infoframe,
+ * and write it to binary buffer
+ */
+static ssize_t
+hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_vendor_any_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_vendor_any_infoframe_pack_only(frame, buffer, size);
}
/**
- * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
+ * hdmi_infoframe_check() - check a HDMI infoframe
+ * @frame: HDMI infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int
+hdmi_infoframe_check(union hdmi_infoframe *frame)
+{
+ switch (frame->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return hdmi_avi_infoframe_check(&frame->avi);
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return hdmi_spd_infoframe_check(&frame->spd);
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ return hdmi_audio_infoframe_check(&frame->audio);
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return hdmi_vendor_any_infoframe_check(&frame->vendor);
+ default:
+ WARN(1, "Bad infoframe type %d\n", frame->any.type);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(hdmi_infoframe_check);
+
+/**
+ * hdmi_infoframe_pack_only() - write a HDMI infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
* @size: size of buffer
@@ -425,7 +749,56 @@ hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
* error code on failure.
*/
ssize_t
-hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
+hdmi_infoframe_pack_only(const union hdmi_infoframe *frame, void *buffer, size_t size)
+{
+ ssize_t length;
+
+ switch (frame->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ length = hdmi_avi_infoframe_pack_only(&frame->avi,
+ buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ length = hdmi_spd_infoframe_pack_only(&frame->spd,
+ buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ length = hdmi_audio_infoframe_pack_only(&frame->audio,
+ buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ length = hdmi_vendor_any_infoframe_pack_only(&frame->vendor,
+ buffer, size);
+ break;
+ default:
+ WARN(1, "Bad infoframe type %d\n", frame->any.type);
+ length = -EINVAL;
+ }
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_infoframe_pack_only);
+
+/**
+ * hdmi_infoframe_pack() - check a HDMI infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame,
+ void *buffer, size_t size)
{
ssize_t length;
@@ -471,7 +844,7 @@ static const char *hdmi_infoframe_type_get_name(enum hdmi_infoframe_type type)
static void hdmi_infoframe_log_header(const char *level,
struct device *dev,
- struct hdmi_any_infoframe *frame)
+ const struct hdmi_any_infoframe *frame)
{
hdmi_log("HDMI infoframe: %s, version %u, length %u\n",
hdmi_infoframe_type_get_name(frame->type),
@@ -673,10 +1046,10 @@ hdmi_content_type_get_name(enum hdmi_content_type content_type)
*/
static void hdmi_avi_infoframe_log(const char *level,
struct device *dev,
- struct hdmi_avi_infoframe *frame)
+ const struct hdmi_avi_infoframe *frame)
{
hdmi_infoframe_log_header(level, dev,
- (struct hdmi_any_infoframe *)frame);
+ (const struct hdmi_any_infoframe *)frame);
hdmi_log(" colorspace: %s\n",
hdmi_colorspace_get_name(frame->colorspace));
@@ -750,12 +1123,12 @@ static const char *hdmi_spd_sdi_get_name(enum hdmi_spd_sdi sdi)
*/
static void hdmi_spd_infoframe_log(const char *level,
struct device *dev,
- struct hdmi_spd_infoframe *frame)
+ const struct hdmi_spd_infoframe *frame)
{
u8 buf[17];
hdmi_infoframe_log_header(level, dev,
- (struct hdmi_any_infoframe *)frame);
+ (const struct hdmi_any_infoframe *)frame);
memset(buf, 0, sizeof(buf));
@@ -886,10 +1259,10 @@ hdmi_audio_coding_type_ext_get_name(enum hdmi_audio_coding_type_ext ctx)
*/
static void hdmi_audio_infoframe_log(const char *level,
struct device *dev,
- struct hdmi_audio_infoframe *frame)
+ const struct hdmi_audio_infoframe *frame)
{
hdmi_infoframe_log_header(level, dev,
- (struct hdmi_any_infoframe *)frame);
+ (const struct hdmi_any_infoframe *)frame);
if (frame->channels)
hdmi_log(" channels: %u\n", frame->channels - 1);
@@ -949,12 +1322,12 @@ hdmi_3d_structure_get_name(enum hdmi_3d_structure s3d_struct)
static void
hdmi_vendor_any_infoframe_log(const char *level,
struct device *dev,
- union hdmi_vendor_any_infoframe *frame)
+ const union hdmi_vendor_any_infoframe *frame)
{
- struct hdmi_vendor_infoframe *hvf = &frame->hdmi;
+ const struct hdmi_vendor_infoframe *hvf = &frame->hdmi;
hdmi_infoframe_log_header(level, dev,
- (struct hdmi_any_infoframe *)frame);
+ (const struct hdmi_any_infoframe *)frame);
if (frame->any.oui != HDMI_IEEE_OUI) {
hdmi_log(" not a HDMI vendor infoframe\n");
@@ -984,7 +1357,7 @@ hdmi_vendor_any_infoframe_log(const char *level,
*/
void hdmi_infoframe_log(const char *level,
struct device *dev,
- union hdmi_infoframe *frame)
+ const union hdmi_infoframe *frame)
{
switch (frame->any.type) {
case HDMI_INFOFRAME_TYPE_AVI:
@@ -1005,8 +1378,9 @@ EXPORT_SYMBOL(hdmi_infoframe_log);
/**
* hdmi_avi_infoframe_unpack() - unpack binary buffer to a HDMI AVI infoframe
- * @buffer: source buffer
* @frame: HDMI AVI infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
*
* Unpacks the information contained in binary @buffer into a structured
* @frame of the HDMI Auxiliary Video (AVI) information frame.
@@ -1016,11 +1390,14 @@ EXPORT_SYMBOL(hdmi_infoframe_log);
* Returns 0 on success or a negative error code on failure.
*/
static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
- void *buffer)
+ const void *buffer, size_t size)
{
- u8 *ptr = buffer;
+ const u8 *ptr = buffer;
int ret;
+ if (size < HDMI_INFOFRAME_SIZE(AVI))
+ return -EINVAL;
+
if (ptr[0] != HDMI_INFOFRAME_TYPE_AVI ||
ptr[1] != 2 ||
ptr[2] != HDMI_AVI_INFOFRAME_SIZE)
@@ -1068,8 +1445,9 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
/**
* hdmi_spd_infoframe_unpack() - unpack binary buffer to a HDMI SPD infoframe
- * @buffer: source buffer
* @frame: HDMI SPD infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
*
* Unpacks the information contained in binary @buffer into a structured
* @frame of the HDMI Source Product Description (SPD) information frame.
@@ -1079,11 +1457,14 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
* Returns 0 on success or a negative error code on failure.
*/
static int hdmi_spd_infoframe_unpack(struct hdmi_spd_infoframe *frame,
- void *buffer)
+ const void *buffer, size_t size)
{
- u8 *ptr = buffer;
+ const u8 *ptr = buffer;
int ret;
+ if (size < HDMI_INFOFRAME_SIZE(SPD))
+ return -EINVAL;
+
if (ptr[0] != HDMI_INFOFRAME_TYPE_SPD ||
ptr[1] != 1 ||
ptr[2] != HDMI_SPD_INFOFRAME_SIZE) {
@@ -1106,8 +1487,9 @@ static int hdmi_spd_infoframe_unpack(struct hdmi_spd_infoframe *frame,
/**
* hdmi_audio_infoframe_unpack() - unpack binary buffer to a HDMI AUDIO infoframe
- * @buffer: source buffer
* @frame: HDMI Audio infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
*
* Unpacks the information contained in binary @buffer into a structured
* @frame of the HDMI Audio information frame.
@@ -1117,11 +1499,14 @@ static int hdmi_spd_infoframe_unpack(struct hdmi_spd_infoframe *frame,
* Returns 0 on success or a negative error code on failure.
*/
static int hdmi_audio_infoframe_unpack(struct hdmi_audio_infoframe *frame,
- void *buffer)
+ const void *buffer, size_t size)
{
- u8 *ptr = buffer;
+ const u8 *ptr = buffer;
int ret;
+ if (size < HDMI_INFOFRAME_SIZE(AUDIO))
+ return -EINVAL;
+
if (ptr[0] != HDMI_INFOFRAME_TYPE_AUDIO ||
ptr[1] != 1 ||
ptr[2] != HDMI_AUDIO_INFOFRAME_SIZE) {
@@ -1151,8 +1536,9 @@ static int hdmi_audio_infoframe_unpack(struct hdmi_audio_infoframe *frame,
/**
* hdmi_vendor_infoframe_unpack() - unpack binary buffer to a HDMI vendor infoframe
- * @buffer: source buffer
* @frame: HDMI Vendor infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
*
* Unpacks the information contained in binary @buffer into a structured
* @frame of the HDMI Vendor information frame.
@@ -1163,14 +1549,17 @@ static int hdmi_audio_infoframe_unpack(struct hdmi_audio_infoframe *frame,
*/
static int
hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
- void *buffer)
+ const void *buffer, size_t size)
{
- u8 *ptr = buffer;
+ const u8 *ptr = buffer;
size_t length;
int ret;
u8 hdmi_video_format;
struct hdmi_vendor_infoframe *hvf = &frame->hdmi;
+ if (size < HDMI_INFOFRAME_HEADER_SIZE)
+ return -EINVAL;
+
if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR ||
ptr[1] != 1 ||
(ptr[2] != 4 && ptr[2] != 5 && ptr[2] != 6))
@@ -1178,6 +1567,9 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
length = ptr[2];
+ if (size < HDMI_INFOFRAME_HEADER_SIZE + length)
+ return -EINVAL;
+
if (hdmi_infoframe_checksum(buffer,
HDMI_INFOFRAME_HEADER_SIZE + length) != 0)
return -EINVAL;
@@ -1224,8 +1616,9 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
/**
* hdmi_infoframe_unpack() - unpack binary buffer to a HDMI infoframe
- * @buffer: source buffer
* @frame: HDMI infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
*
* Unpacks the information contained in binary buffer @buffer into a structured
* @frame of a HDMI infoframe.
@@ -1234,23 +1627,27 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
*
* Returns 0 on success or a negative error code on failure.
*/
-int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer)
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
+ const void *buffer, size_t size)
{
int ret;
- u8 *ptr = buffer;
+ const u8 *ptr = buffer;
+
+ if (size < HDMI_INFOFRAME_HEADER_SIZE)
+ return -EINVAL;
switch (ptr[0]) {
case HDMI_INFOFRAME_TYPE_AVI:
- ret = hdmi_avi_infoframe_unpack(&frame->avi, buffer);
+ ret = hdmi_avi_infoframe_unpack(&frame->avi, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_SPD:
- ret = hdmi_spd_infoframe_unpack(&frame->spd, buffer);
+ ret = hdmi_spd_infoframe_unpack(&frame->spd, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_AUDIO:
- ret = hdmi_audio_infoframe_unpack(&frame->audio, buffer);
+ ret = hdmi_audio_infoframe_unpack(&frame->audio, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_VENDOR:
- ret = hdmi_vendor_any_infoframe_unpack(&frame->vendor, buffer);
+ ret = hdmi_vendor_any_infoframe_unpack(&frame->vendor, buffer, size);
break;
default:
ret = -EINVAL;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index f15f89df1f36..7ea6fb6a2e5d 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -914,7 +914,7 @@ int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
ret = xenmem_reservation_increase(args->nr_pages, args->frames);
if (ret != args->nr_pages) {
- pr_debug("Failed to decrease reservation for DMA buffer\n");
+ pr_debug("Failed to increase reservation for DMA buffer\n");
ret = -EFAULT;
} else {
ret = 0;
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
index df1ed37c3269..de01a6d0059d 100644
--- a/drivers/xen/privcmd-buf.c
+++ b/drivers/xen/privcmd-buf.c
@@ -21,15 +21,9 @@
MODULE_LICENSE("GPL");
-static unsigned int limit = 64;
-module_param(limit, uint, 0644);
-MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
- "the privcmd-buf device per open file");
-
struct privcmd_buf_private {
struct mutex lock;
struct list_head list;
- unsigned int allocated;
};
struct privcmd_buf_vma_private {
@@ -60,13 +54,10 @@ static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
{
unsigned int i;
- vma_priv->file_priv->allocated -= vma_priv->n_pages;
-
list_del(&vma_priv->list);
for (i = 0; i < vma_priv->n_pages; i++)
- if (vma_priv->pages[i])
- __free_page(vma_priv->pages[i]);
+ __free_page(vma_priv->pages[i]);
kfree(vma_priv);
}
@@ -146,8 +137,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
unsigned int i;
int ret = 0;
- if (!(vma->vm_flags & VM_SHARED) || count > limit ||
- file_priv->allocated + count > limit)
+ if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
@@ -155,19 +145,15 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
if (!vma_priv)
return -ENOMEM;
- vma_priv->n_pages = count;
- count = 0;
- for (i = 0; i < vma_priv->n_pages; i++) {
+ for (i = 0; i < count; i++) {
vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!vma_priv->pages[i])
break;
- count++;
+ vma_priv->n_pages++;
}
mutex_lock(&file_priv->lock);
- file_priv->allocated += count;
-
vma_priv->file_priv = file_priv;
vma_priv->users = 1;
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 59970886690f..a7b44863d502 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -576,6 +576,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
{
signed long rtt2, timeout;
long ret;
+ bool stalled = false;
u64 rtt;
u32 life, last_life;
@@ -609,12 +610,20 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
if (timeout == 0 &&
- life == last_life && signal_pending(current))
+ life == last_life && signal_pending(current)) {
+ if (stalled)
break;
+ __set_current_state(TASK_RUNNING);
+ rxrpc_kernel_probe_life(call->net->socket, call->rxcall);
+ timeout = rtt2;
+ stalled = true;
+ continue;
+ }
if (life != last_life) {
timeout = rtt2;
last_life = life;
+ stalled = false;
}
timeout = schedule_timeout(timeout);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 80953528572d..68f322f600a0 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3163,6 +3163,9 @@ void btrfs_destroy_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
int __init btrfs_init_cachep(void);
void __cold btrfs_destroy_cachep(void);
+struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
+ struct btrfs_root *root, int *new,
+ struct btrfs_path *path);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *was_new);
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index b0ab41da91d1..3f0b6d1936e8 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1664,9 +1664,8 @@ static int cleaner_kthread(void *arg)
struct btrfs_root *root = arg;
struct btrfs_fs_info *fs_info = root->fs_info;
int again;
- struct btrfs_trans_handle *trans;
- do {
+ while (1) {
again = 0;
/* Make the cleaner go to sleep early. */
@@ -1715,42 +1714,16 @@ static int cleaner_kthread(void *arg)
*/
btrfs_delete_unused_bgs(fs_info);
sleep:
+ if (kthread_should_park())
+ kthread_parkme();
+ if (kthread_should_stop())
+ return 0;
if (!again) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!kthread_should_stop())
- schedule();
+ schedule();
__set_current_state(TASK_RUNNING);
}
- } while (!kthread_should_stop());
-
- /*
- * Transaction kthread is stopped before us and wakes us up.
- * However we might have started a new transaction and COWed some
- * tree blocks when deleting unused block groups for example. So
- * make sure we commit the transaction we started to have a clean
- * shutdown when evicting the btree inode - if it has dirty pages
- * when we do the final iput() on it, eviction will trigger a
- * writeback for it which will fail with null pointer dereferences
- * since work queues and other resources were already released and
- * destroyed by the time the iput/eviction/writeback is made.
- */
- trans = btrfs_attach_transaction(root);
- if (IS_ERR(trans)) {
- if (PTR_ERR(trans) != -ENOENT)
- btrfs_err(fs_info,
- "cleaner transaction attach returned %ld",
- PTR_ERR(trans));
- } else {
- int ret;
-
- ret = btrfs_commit_transaction(trans);
- if (ret)
- btrfs_err(fs_info,
- "cleaner open transaction commit returned %d",
- ret);
}
-
- return 0;
}
static int transaction_kthread(void *arg)
@@ -3931,6 +3904,13 @@ void close_ctree(struct btrfs_fs_info *fs_info)
int ret;
set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
+ /*
+ * We don't want the cleaner to start new transactions, add more delayed
+ * iputs, etc. while we're closing. We can't use kthread_stop() yet
+ * because that frees the task_struct, and the transaction kthread might
+ * still try to wake up the cleaner.
+ */
+ kthread_park(fs_info->cleaner_kthread);
/* wait for the qgroup rescan worker to stop */
btrfs_qgroup_wait_for_completion(fs_info, false);
@@ -3958,9 +3938,8 @@ void close_ctree(struct btrfs_fs_info *fs_info)
if (!sb_rdonly(fs_info->sb)) {
/*
- * If the cleaner thread is stopped and there are
- * block groups queued for removal, the deletion will be
- * skipped when we quit the cleaner thread.
+ * The cleaner kthread is stopped, so do one final pass over
+ * unused block groups.
*/
btrfs_delete_unused_bgs(fs_info);
@@ -4359,13 +4338,23 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
unpin = pinned_extents;
again:
while (1) {
+ /*
+ * The btrfs_finish_extent_commit() may get the same range as
+ * ours between find_first_extent_bit and clear_extent_dirty.
+ * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
+ * the same extent range.
+ */
+ mutex_lock(&fs_info->unused_bg_unpin_mutex);
ret = find_first_extent_bit(unpin, 0, &start, &end,
EXTENT_DIRTY, NULL);
- if (ret)
+ if (ret) {
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
break;
+ }
clear_extent_dirty(unpin, start, end);
btrfs_error_unpin_extent_range(fs_info, start, end);
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 4ba0aedc878b..74aa552f4793 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -75,7 +75,8 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
* sure NOFS is set to keep us from deadlocking.
*/
nofs_flag = memalloc_nofs_save();
- inode = btrfs_iget(fs_info->sb, &location, root, NULL);
+ inode = btrfs_iget_path(fs_info->sb, &location, root, NULL, path);
+ btrfs_release_path(path);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(inode))
return inode;
@@ -838,6 +839,25 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
path->search_commit_root = 1;
path->skip_locking = 1;
+ /*
+ * We must pass a path with search_commit_root set to btrfs_iget in
+ * order to avoid a deadlock when allocating extents for the tree root.
+ *
+ * When we are COWing an extent buffer from the tree root, when looking
+ * for a free extent, at extent-tree.c:find_free_extent(), we can find
+ * block group without its free space cache loaded. When we find one
+ * we must load its space cache which requires reading its free space
+ * cache's inode item from the root tree. If this inode item is located
+ * in the same leaf that we started COWing before, then we end up in
+ * deadlock on the extent buffer (trying to read lock it when we
+ * previously write locked it).
+ *
+ * It's safe to read the inode item using the commit root because
+ * block groups, once loaded, stay in memory forever (until they are
+ * removed) as well as their space caches once loaded. New block groups
+ * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
+ * we will never try to read their inode item while the fs is mounted.
+ */
inode = lookup_free_space_inode(fs_info, block_group, path);
if (IS_ERR(inode)) {
btrfs_free_path(path);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d3df5b52278c..9ea4c6f0352f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1531,12 +1531,11 @@ out_check:
}
btrfs_release_path(path);
- if (cur_offset <= end && cow_start == (u64)-1) {
+ if (cur_offset <= end && cow_start == (u64)-1)
cow_start = cur_offset;
- cur_offset = end;
- }
if (cow_start != (u64)-1) {
+ cur_offset = end;
ret = cow_file_range(inode, locked_page, cow_start, end, end,
page_started, nr_written, 1, NULL);
if (ret)
@@ -3570,10 +3569,11 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
/*
* read an inode from the btree into the in-memory inode
*/
-static int btrfs_read_locked_inode(struct inode *inode)
+static int btrfs_read_locked_inode(struct inode *inode,
+ struct btrfs_path *in_path)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_path *path;
+ struct btrfs_path *path = in_path;
struct extent_buffer *leaf;
struct btrfs_inode_item *inode_item;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -3589,15 +3589,18 @@ static int btrfs_read_locked_inode(struct inode *inode)
if (!ret)
filled = true;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
+ if (!path) {
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ }
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret) {
- btrfs_free_path(path);
+ if (path != in_path)
+ btrfs_free_path(path);
return ret;
}
@@ -3722,7 +3725,8 @@ cache_acl:
btrfs_ino(BTRFS_I(inode)),
root->root_key.objectid, ret);
}
- btrfs_free_path(path);
+ if (path != in_path)
+ btrfs_free_path(path);
if (!maybe_acls)
cache_no_acl(inode);
@@ -5644,8 +5648,9 @@ static struct inode *btrfs_iget_locked(struct super_block *s,
/* Get an inode object given its location and corresponding root.
* Returns in *is_new if the inode was read from disk
*/
-struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
- struct btrfs_root *root, int *new)
+struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
+ struct btrfs_root *root, int *new,
+ struct btrfs_path *path)
{
struct inode *inode;
@@ -5656,7 +5661,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
if (inode->i_state & I_NEW) {
int ret;
- ret = btrfs_read_locked_inode(inode);
+ ret = btrfs_read_locked_inode(inode, path);
if (!ret) {
inode_tree_add(inode);
unlock_new_inode(inode);
@@ -5678,6 +5683,12 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
return inode;
}
+struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
+ struct btrfs_root *root, int *new)
+{
+ return btrfs_iget_path(s, location, root, new, NULL);
+}
+
static struct inode *new_simple_dir(struct super_block *s,
struct btrfs_key *key,
struct btrfs_root *root)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 3ca6943827ef..802a628e9f7d 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3488,6 +3488,8 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
len = round_down(i_size_read(src), sz) - loff;
+ if (len == 0)
+ return 0;
olen = len;
}
}
@@ -4257,9 +4259,17 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
goto out_unlock;
if (len == 0)
olen = len = src->i_size - off;
- /* if we extend to eof, continue to block boundary */
- if (off + len == src->i_size)
+ /*
+ * If we extend to eof, continue to block boundary if and only if the
+ * destination end offset matches the destination file's size, otherwise
+ * we would be corrupting data by placing the eof block into the middle
+ * of a file.
+ */
+ if (off + len == src->i_size) {
+ if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
+ goto out_unlock;
len = ALIGN(src->i_size, bs) - off;
+ }
if (len == 0) {
ret = 0;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index b362b45dd757..cbc9d0d2c12d 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1916,7 +1916,7 @@ restore:
}
/* Used to sort the devices by max_avail(descending sort) */
-static int btrfs_cmp_device_free_bytes(const void *dev_info1,
+static inline int btrfs_cmp_device_free_bytes(const void *dev_info1,
const void *dev_info2)
{
if (((struct btrfs_device_info *)dev_info1)->max_avail >
@@ -1945,8 +1945,8 @@ static inline void btrfs_descending_sort_devices(
* The helper to calc the free space on the devices that can be used to store
* file data.
*/
-static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
- u64 *free_bytes)
+static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
+ u64 *free_bytes)
{
struct btrfs_device_info *devices_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index cab0b1f1f741..efcf89a8ba44 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -440,7 +440,7 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
type != (BTRFS_BLOCK_GROUP_METADATA |
BTRFS_BLOCK_GROUP_DATA)) {
block_group_err(fs_info, leaf, slot,
-"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llu or 0x%llx",
+"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
type, hweight64(type),
BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
BTRFS_BLOCK_GROUP_SYSTEM,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index e07f3376b7df..a5ce99a6c936 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4396,6 +4396,23 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
logged_end = end;
list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
+ /*
+ * Skip extents outside our logging range. It's important to do
+ * it for correctness because if we don't ignore them, we may
+ * log them before their ordered extent completes, and therefore
+ * we could log them without logging their respective checksums
+ * (the checksum items are added to the csum tree at the very
+ * end of btrfs_finish_ordered_io()). Also leave such extents
+ * outside of our range in the list, since we may have another
+ * ranged fsync in the near future that needs them. If an extent
+ * outside our range corresponds to a hole, log it to avoid
+ * leaving gaps between extents (fsck will complain when we are
+ * not using the NO_HOLES feature).
+ */
+ if ((em->start > end || em->start + em->len <= start) &&
+ em->block_start != EXTENT_MAP_HOLE)
+ continue;
+
list_del_init(&em->list);
/*
* Just an arbitrary number, this can be really CPU intensive
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 27cad84dab23..189df668b6a0 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1931,10 +1931,17 @@ static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
if (!prealloc_cf)
return -ENOMEM;
- /* Start by sync'ing the source file */
+ /* Start by sync'ing the source and destination files */
ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
- if (ret < 0)
+ if (ret < 0) {
+ dout("failed to write src file (%zd)\n", ret);
+ goto out;
+ }
+ ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
+ if (ret < 0) {
+ dout("failed to write dst file (%zd)\n", ret);
goto out;
+ }
/*
* We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 67a9aeb2f4ec..bd13a3267ae0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -80,12 +80,8 @@ static int parse_reply_info_in(void **p, void *end,
info->symlink = *p;
*p += info->symlink_len;
- if (features & CEPH_FEATURE_DIRLAYOUTHASH)
- ceph_decode_copy_safe(p, end, &info->dir_layout,
- sizeof(info->dir_layout), bad);
- else
- memset(&info->dir_layout, 0, sizeof(info->dir_layout));
-
+ ceph_decode_copy_safe(p, end, &info->dir_layout,
+ sizeof(info->dir_layout), bad);
ceph_decode_32_safe(p, end, info->xattr_len, bad);
ceph_decode_need(p, end, info->xattr_len, bad);
info->xattr_data = *p;
@@ -3182,10 +3178,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
recon_state.pagelist = pagelist;
if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
recon_state.msg_version = 3;
- else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
- recon_state.msg_version = 2;
else
- recon_state.msg_version = 1;
+ recon_state.msg_version = 2;
err = iterate_session_caps(session, encode_caps_cb, &recon_state);
if (err < 0)
goto fail;
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 32d4f13784ba..03f4d24db8fe 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -237,7 +237,8 @@ static bool check_quota_exceeded(struct inode *inode, enum quota_check_op op,
ceph_put_snap_realm(mdsc, realm);
realm = next;
}
- ceph_put_snap_realm(mdsc, realm);
+ if (realm)
+ ceph_put_snap_realm(mdsc, realm);
up_read(&mdsc->snap_rwsem);
return exceeded;
diff --git a/fs/dax.c b/fs/dax.c
index 616e36ea6aaa..9bcce89ea18e 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -98,12 +98,6 @@ static void *dax_make_entry(pfn_t pfn, unsigned long flags)
return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
}
-static void *dax_make_page_entry(struct page *page)
-{
- pfn_t pfn = page_to_pfn_t(page);
- return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0);
-}
-
static bool dax_is_locked(void *entry)
{
return xa_to_value(entry) & DAX_LOCKED;
@@ -116,12 +110,12 @@ static unsigned int dax_entry_order(void *entry)
return 0;
}
-static int dax_is_pmd_entry(void *entry)
+static unsigned long dax_is_pmd_entry(void *entry)
{
return xa_to_value(entry) & DAX_PMD;
}
-static int dax_is_pte_entry(void *entry)
+static bool dax_is_pte_entry(void *entry)
{
return !(xa_to_value(entry) & DAX_PMD);
}
@@ -222,9 +216,8 @@ static void *get_unlocked_entry(struct xa_state *xas)
ewait.wait.func = wake_exceptional_entry_func;
for (;;) {
- entry = xas_load(xas);
- if (!entry || xa_is_internal(entry) ||
- WARN_ON_ONCE(!xa_is_value(entry)) ||
+ entry = xas_find_conflict(xas);
+ if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
!dax_is_locked(entry))
return entry;
@@ -255,6 +248,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
{
void *old;
+ BUG_ON(dax_is_locked(entry));
xas_reset(xas);
xas_lock_irq(xas);
old = xas_store(xas, entry);
@@ -352,16 +346,27 @@ static struct page *dax_busy_page(void *entry)
return NULL;
}
+/*
+ * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
+ * @page: The page whose entry we want to lock
+ *
+ * Context: Process context.
+ * Return: %true if the entry was locked or does not need to be locked.
+ */
bool dax_lock_mapping_entry(struct page *page)
{
XA_STATE(xas, NULL, 0);
void *entry;
+ bool locked;
+ /* Ensure page->mapping isn't freed while we look at it */
+ rcu_read_lock();
for (;;) {
struct address_space *mapping = READ_ONCE(page->mapping);
+ locked = false;
if (!dax_mapping(mapping))
- return false;
+ break;
/*
* In the device-dax case there's no need to lock, a
@@ -370,8 +375,9 @@ bool dax_lock_mapping_entry(struct page *page)
* otherwise we would not have a valid pfn_to_page()
* translation.
*/
+ locked = true;
if (S_ISCHR(mapping->host->i_mode))
- return true;
+ break;
xas.xa = &mapping->i_pages;
xas_lock_irq(&xas);
@@ -382,28 +388,35 @@ bool dax_lock_mapping_entry(struct page *page)
xas_set(&xas, page->index);
entry = xas_load(&xas);
if (dax_is_locked(entry)) {
+ rcu_read_unlock();
entry = get_unlocked_entry(&xas);
- /* Did the page move while we slept? */
- if (dax_to_pfn(entry) != page_to_pfn(page)) {
- xas_unlock_irq(&xas);
- continue;
- }
+ xas_unlock_irq(&xas);
+ put_unlocked_entry(&xas, entry);
+ rcu_read_lock();
+ continue;
}
dax_lock_entry(&xas, entry);
xas_unlock_irq(&xas);
- return true;
+ break;
}
+ rcu_read_unlock();
+ return locked;
}
void dax_unlock_mapping_entry(struct page *page)
{
struct address_space *mapping = page->mapping;
XA_STATE(xas, &mapping->i_pages, page->index);
+ void *entry;
if (S_ISCHR(mapping->host->i_mode))
return;
- dax_unlock_entry(&xas, dax_make_page_entry(page));
+ rcu_read_lock();
+ entry = xas_load(&xas);
+ rcu_read_unlock();
+ entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry));
+ dax_unlock_entry(&xas, entry);
}
/*
@@ -445,11 +458,9 @@ static void *grab_mapping_entry(struct xa_state *xas,
retry:
xas_lock_irq(xas);
entry = get_unlocked_entry(xas);
- if (xa_is_internal(entry))
- goto fallback;
if (entry) {
- if (WARN_ON_ONCE(!xa_is_value(entry))) {
+ if (!xa_is_value(entry)) {
xas_set_err(xas, EIO);
goto out_unlock;
}
@@ -1628,8 +1639,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
/* Did we race with someone splitting entry or so? */
if (!entry ||
(order == 0 && !dax_is_pte_entry(entry)) ||
- (order == PMD_ORDER && (xa_is_internal(entry) ||
- !dax_is_pmd_entry(entry)))) {
+ (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
put_unlocked_entry(&xas, entry);
xas_unlock_irq(&xas);
trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
diff --git a/fs/exec.c b/fs/exec.c
index fc281b738a98..acc3a5536384 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -62,6 +62,7 @@
#include <linux/oom.h>
#include <linux/compat.h>
#include <linux/vmalloc.h>
+#include <linux/freezer.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -1083,7 +1084,7 @@ static int de_thread(struct task_struct *tsk)
while (sig->notify_count) {
__set_current_state(TASK_KILLABLE);
spin_unlock_irq(lock);
- schedule();
+ freezable_schedule();
if (unlikely(__fatal_signal_pending(tsk)))
goto killed;
spin_lock_irq(lock);
@@ -1111,7 +1112,7 @@ static int de_thread(struct task_struct *tsk)
__set_current_state(TASK_KILLABLE);
write_unlock_irq(&tasklist_lock);
cgroup_threadgroup_change_end(tsk);
- schedule();
+ freezable_schedule();
if (unlikely(__fatal_signal_pending(tsk)))
goto killed;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 05f01fbd9c7f..22a9d8159720 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5835,9 +5835,10 @@ int ext4_mark_iloc_dirty(handle_t *handle,
{
int err = 0;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
+ put_bh(iloc->bh);
return -EIO;
-
+ }
if (IS_I_VERSION(inode))
inode_inc_iversion(inode);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 17adcb16a9c8..437f71fe83ae 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -126,6 +126,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
if (!is_dx_block && type == INDEX) {
ext4_error_inode(inode, func, line, block,
"directory leaf block found instead of index block");
+ brelse(bh);
return ERR_PTR(-EFSCORRUPTED);
}
if (!ext4_has_metadata_csum(inode->i_sb) ||
@@ -2811,7 +2812,9 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
list_del_init(&EXT4_I(inode)->i_orphan);
mutex_unlock(&sbi->s_orphan_lock);
}
- }
+ } else
+ brelse(iloc.bh);
+
jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
jbd_debug(4, "orphan inode %lu will point to %d\n",
inode->i_ino, NEXT_ORPHAN(inode));
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index ebbc663d0798..a5efee34415f 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -459,16 +459,18 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
BUFFER_TRACE(bh, "get_write_access");
err = ext4_journal_get_write_access(handle, bh);
- if (err)
+ if (err) {
+ brelse(bh);
return err;
+ }
ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
first_cluster, first_cluster - start, count2);
ext4_set_bits(bh->b_data, first_cluster - start, count2);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ brelse(bh);
if (unlikely(err))
return err;
- brelse(bh);
}
return 0;
@@ -605,7 +607,6 @@ handle_bb:
bh = bclean(handle, sb, block);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
- bh = NULL;
goto out;
}
overhead = ext4_group_overhead_blocks(sb, group);
@@ -618,9 +619,9 @@ handle_bb:
ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
sb->s_blocksize * 8, bh->b_data);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ brelse(bh);
if (err)
goto out;
- brelse(bh);
handle_ib:
if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
@@ -635,18 +636,16 @@ handle_ib:
bh = bclean(handle, sb, block);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
- bh = NULL;
goto out;
}
ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
sb->s_blocksize * 8, bh->b_data);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ brelse(bh);
if (err)
goto out;
- brelse(bh);
}
- bh = NULL;
/* Mark group tables in block bitmap */
for (j = 0; j < GROUP_TABLE_COUNT; j++) {
@@ -685,7 +684,6 @@ handle_ib:
}
out:
- brelse(bh);
err2 = ext4_journal_stop(handle);
if (err2 && !err)
err = err2;
@@ -873,6 +871,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
if (unlikely(err)) {
ext4_std_error(sb, err);
+ iloc.bh = NULL;
goto exit_inode;
}
brelse(dind);
@@ -924,6 +923,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
sizeof(struct buffer_head *),
GFP_NOFS);
if (!n_group_desc) {
+ brelse(gdb_bh);
err = -ENOMEM;
ext4_warning(sb, "not enough memory for %lu groups",
gdb_num + 1);
@@ -939,8 +939,6 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
kvfree(o_group_desc);
BUFFER_TRACE(gdb_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, gdb_bh);
- if (unlikely(err))
- brelse(gdb_bh);
return err;
}
@@ -1124,8 +1122,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
backup_block, backup_block -
ext4_group_first_block_no(sb, group));
BUFFER_TRACE(bh, "get_write_access");
- if ((err = ext4_journal_get_write_access(handle, bh)))
+ if ((err = ext4_journal_get_write_access(handle, bh))) {
+ brelse(bh);
break;
+ }
lock_buffer(bh);
memcpy(bh->b_data, data, size);
if (rest)
@@ -2023,7 +2023,7 @@ retry:
err = ext4_alloc_flex_bg_array(sb, n_group + 1);
if (err)
- return err;
+ goto out;
err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
if (err)
@@ -2059,6 +2059,10 @@ retry:
n_blocks_count_retry = 0;
free_flex_gd(flex_gd);
flex_gd = NULL;
+ if (resize_inode) {
+ iput(resize_inode);
+ resize_inode = NULL;
+ }
goto retry;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index a221f1cdf704..53ff6c2a26ed 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -4075,6 +4075,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_groups_count = blocks_count;
sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
+ if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
+ le32_to_cpu(es->s_inodes_count)) {
+ ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
+ le32_to_cpu(es->s_inodes_count),
+ ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
+ ret = -EINVAL;
+ goto failed_mount;
+ }
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
if (ext4_has_feature_meta_bg(sb)) {
@@ -4094,14 +4102,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ret = -ENOMEM;
goto failed_mount;
}
- if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
- le32_to_cpu(es->s_inodes_count)) {
- ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
- le32_to_cpu(es->s_inodes_count),
- ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
- ret = -EINVAL;
- goto failed_mount;
- }
bgl_lock_init(sbi->s_blockgroup_lock);
@@ -4510,6 +4510,7 @@ failed_mount6:
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+ percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
failed_mount5:
ext4_ext_release(sb);
ext4_release_system_zone(sb);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index f36fc5d5b257..7643d52c776c 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1031,10 +1031,8 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
inode_lock(ea_inode);
ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
- if (ret) {
- iloc.bh = NULL;
+ if (ret)
goto out;
- }
ref_count = ext4_xattr_inode_get_ref(ea_inode);
ref_count += ref_change;
@@ -1080,12 +1078,10 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
}
ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc);
- iloc.bh = NULL;
if (ret)
ext4_warning_inode(ea_inode,
"ext4_mark_iloc_dirty() failed ret=%d", ret);
out:
- brelse(iloc.bh);
inode_unlock(ea_inode);
return ret;
}
@@ -1388,6 +1384,12 @@ retry:
bh = ext4_getblk(handle, ea_inode, block, 0);
if (IS_ERR(bh))
return PTR_ERR(bh);
+ if (!bh) {
+ WARN_ON_ONCE(1);
+ EXT4_ERROR_INODE(ea_inode,
+ "ext4_getblk() return bh = NULL");
+ return -EFSCORRUPTED;
+ }
ret = ext4_journal_get_write_access(handle, bh);
if (ret)
goto out;
@@ -2276,8 +2278,10 @@ static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
if (!bh)
return ERR_PTR(-EIO);
error = ext4_xattr_check_block(inode, bh);
- if (error)
+ if (error) {
+ brelse(bh);
return ERR_PTR(error);
+ }
return bh;
}
@@ -2397,6 +2401,8 @@ retry_inode:
error = ext4_xattr_block_set(handle, inode, &i, &bs);
} else if (error == -ENOSPC) {
if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
+ brelse(bs.bh);
+ bs.bh = NULL;
error = ext4_xattr_block_find(inode, &i, &bs);
if (error)
goto cleanup;
@@ -2617,6 +2623,8 @@ out:
kfree(buffer);
if (is)
brelse(is->iloc.bh);
+ if (bs)
+ brelse(bs->bh);
kfree(is);
kfree(bs);
@@ -2696,7 +2704,6 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
struct ext4_inode *raw_inode, handle_t *handle)
{
struct ext4_xattr_ibody_header *header;
- struct buffer_head *bh;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
static unsigned int mnt_count;
size_t min_offs;
@@ -2737,13 +2744,17 @@ retry:
* EA block can hold new_extra_isize bytes.
*/
if (EXT4_I(inode)->i_file_acl) {
+ struct buffer_head *bh;
+
bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
error = -EIO;
if (!bh)
goto cleanup;
error = ext4_xattr_check_block(inode, bh);
- if (error)
+ if (error) {
+ brelse(bh);
goto cleanup;
+ }
base = BHDR(bh);
end = bh->b_data + bh->b_size;
min_offs = end - base;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ae813e609932..a5e516a40e7a 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -165,9 +165,13 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
static void fuse_drop_waiting(struct fuse_conn *fc)
{
- if (fc->connected) {
- atomic_dec(&fc->num_waiting);
- } else if (atomic_dec_and_test(&fc->num_waiting)) {
+ /*
+ * lockess check of fc->connected is okay, because atomic_dec_and_test()
+ * provides a memory barrier mached with the one in fuse_wait_aborted()
+ * to ensure no wake-up is missed.
+ */
+ if (atomic_dec_and_test(&fc->num_waiting) &&
+ !READ_ONCE(fc->connected)) {
/* wake up aborters */
wake_up_all(&fc->blocked_waitq);
}
@@ -1768,8 +1772,10 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
req->in.args[1].size = total_len;
err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
- if (err)
+ if (err) {
fuse_retrieve_end(fc, req);
+ fuse_put_request(fc, req);
+ }
return err;
}
@@ -2219,6 +2225,8 @@ EXPORT_SYMBOL_GPL(fuse_abort_conn);
void fuse_wait_aborted(struct fuse_conn *fc)
{
+ /* matches implicit memory barrier in fuse_drop_waiting() */
+ smp_mb();
wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index cc2121b37bf5..b52f9baaa3e7 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2924,10 +2924,12 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
if (io->async) {
+ bool blocking = io->blocking;
+
fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
/* we have a non-extending, async request, so return */
- if (!io->blocking)
+ if (!blocking)
return -EIOCBQUEUED;
wait_for_completion(&wait);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index a683d9b27d76..9a4a15d646eb 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -826,7 +826,7 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
ret = gfs2_meta_inode_buffer(ip, &dibh);
if (ret)
goto unlock;
- iomap->private = dibh;
+ mp->mp_bh[0] = dibh;
if (gfs2_is_stuffed(ip)) {
if (flags & IOMAP_WRITE) {
@@ -863,9 +863,6 @@ unstuff:
len = lblock_stop - lblock + 1;
iomap->length = len << inode->i_blkbits;
- get_bh(dibh);
- mp->mp_bh[0] = dibh;
-
height = ip->i_height;
while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
height++;
@@ -898,8 +895,6 @@ out:
iomap->bdev = inode->i_sb->s_bdev;
unlock:
up_read(&ip->i_rw_mutex);
- if (ret && dibh)
- brelse(dibh);
return ret;
do_alloc:
@@ -980,9 +975,9 @@ static void gfs2_iomap_journaled_page_done(struct inode *inode, loff_t pos,
static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
loff_t length, unsigned flags,
- struct iomap *iomap)
+ struct iomap *iomap,
+ struct metapath *mp)
{
- struct metapath mp = { .mp_aheight = 1, };
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
@@ -996,9 +991,9 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
unstuff = gfs2_is_stuffed(ip) &&
pos + length > gfs2_max_stuffed_size(ip);
- ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
+ ret = gfs2_iomap_get(inode, pos, length, flags, iomap, mp);
if (ret)
- goto out_release;
+ goto out_unlock;
alloc_required = unstuff || iomap->type == IOMAP_HOLE;
@@ -1013,7 +1008,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
ret = gfs2_quota_lock_check(ip, &ap);
if (ret)
- goto out_release;
+ goto out_unlock;
ret = gfs2_inplace_reserve(ip, &ap);
if (ret)
@@ -1038,17 +1033,15 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
ret = gfs2_unstuff_dinode(ip, NULL);
if (ret)
goto out_trans_end;
- release_metapath(&mp);
- brelse(iomap->private);
- iomap->private = NULL;
+ release_metapath(mp);
ret = gfs2_iomap_get(inode, iomap->offset, iomap->length,
- flags, iomap, &mp);
+ flags, iomap, mp);
if (ret)
goto out_trans_end;
}
if (iomap->type == IOMAP_HOLE) {
- ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
+ ret = gfs2_iomap_alloc(inode, iomap, flags, mp);
if (ret) {
gfs2_trans_end(sdp);
gfs2_inplace_release(ip);
@@ -1056,7 +1049,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
goto out_qunlock;
}
}
- release_metapath(&mp);
if (!gfs2_is_stuffed(ip) && gfs2_is_jdata(ip))
iomap->page_done = gfs2_iomap_journaled_page_done;
return 0;
@@ -1069,10 +1061,7 @@ out_trans_fail:
out_qunlock:
if (alloc_required)
gfs2_quota_unlock(ip);
-out_release:
- if (iomap->private)
- brelse(iomap->private);
- release_metapath(&mp);
+out_unlock:
gfs2_write_unlock(inode);
return ret;
}
@@ -1088,10 +1077,10 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
trace_gfs2_iomap_start(ip, pos, length, flags);
if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) {
- ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap);
+ ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
} else {
ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
- release_metapath(&mp);
+
/*
* Silently fall back to buffered I/O for stuffed files or if
* we've hot a hole (see gfs2_file_direct_write).
@@ -1100,6 +1089,11 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
iomap->type != IOMAP_MAPPED)
ret = -ENOTBLK;
}
+ if (!ret) {
+ get_bh(mp.mp_bh[0]);
+ iomap->private = mp.mp_bh[0];
+ }
+ release_metapath(&mp);
trace_gfs2_iomap_end(ip, iomap, ret);
return ret;
}
@@ -1908,10 +1902,16 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
if (ret < 0)
goto out;
- /* issue read-ahead on metadata */
- if (mp.mp_aheight > 1) {
- for (; ret > 1; ret--) {
- metapointer_range(&mp, mp.mp_aheight - ret,
+ /* On the first pass, issue read-ahead on metadata. */
+ if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
+ unsigned int height = mp.mp_aheight - 1;
+
+ /* No read-ahead for data blocks. */
+ if (mp.mp_aheight - 1 == strip_h)
+ height--;
+
+ for (; height >= mp.mp_aheight - ret; height--) {
+ metapointer_range(&mp, height,
start_list, start_aligned,
end_list, end_aligned,
&start, &end);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index ffe3032b1043..b08a530433ad 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -733,6 +733,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
if (gl) {
glock_clear_object(gl, rgd);
+ gfs2_rgrp_brelse(rgd);
gfs2_glock_put(gl);
}
@@ -1174,7 +1175,7 @@ static u32 count_unlinked(struct gfs2_rgrpd *rgd)
* @rgd: the struct gfs2_rgrpd describing the RG to read in
*
* Read in all of a Resource Group's header and bitmap blocks.
- * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
+ * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
*
* Returns: errno
*/
diff --git a/fs/inode.c b/fs/inode.c
index 9e198f00b64c..35d2108d567c 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -730,8 +730,11 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
return LRU_REMOVED;
}
- /* recently referenced inodes get one more pass */
- if (inode->i_state & I_REFERENCED) {
+ /*
+ * Recently referenced inodes and inodes with many attached pages
+ * get one more pass.
+ */
+ if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
inode->i_state &= ~I_REFERENCED;
spin_unlock(&inode->i_lock);
return LRU_ROTATE;
diff --git a/fs/iomap.c b/fs/iomap.c
index 64ce240217a1..3ffb776fbebe 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -142,13 +142,14 @@ static void
iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
{
+ loff_t orig_pos = *pos;
+ loff_t isize = i_size_read(inode);
unsigned block_bits = inode->i_blkbits;
unsigned block_size = (1 << block_bits);
unsigned poff = offset_in_page(*pos);
unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
unsigned first = poff >> block_bits;
unsigned last = (poff + plen - 1) >> block_bits;
- unsigned end = offset_in_page(i_size_read(inode)) >> block_bits;
/*
* If the block size is smaller than the page size we need to check the
@@ -183,8 +184,12 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
* handle both halves separately so that we properly zero data in the
* page cache for blocks that are entirely outside of i_size.
*/
- if (first <= end && last > end)
- plen -= (last - end) * block_size;
+ if (orig_pos <= isize && orig_pos + length > isize) {
+ unsigned end = offset_in_page(isize - 1) >> block_bits;
+
+ if (first <= end && last > end)
+ plen -= (last - end) * block_size;
+ }
*offp = poff;
*lenp = plen;
@@ -1580,7 +1585,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
struct bio *bio;
bool need_zeroout = false;
bool use_fua = false;
- int nr_pages, ret;
+ int nr_pages, ret = 0;
size_t copied = 0;
if ((pos | length | align) & ((1 << blkbits) - 1))
@@ -1596,12 +1601,13 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
if (iomap->flags & IOMAP_F_NEW) {
need_zeroout = true;
- } else {
+ } else if (iomap->type == IOMAP_MAPPED) {
/*
- * Use a FUA write if we need datasync semantics, this
- * is a pure data IO that doesn't require any metadata
- * updates and the underlying device supports FUA. This
- * allows us to avoid cache flushes on IO completion.
+ * Use a FUA write if we need datasync semantics, this is a pure
+ * data IO that doesn't require any metadata updates (including
+ * after IO completion such as unwritten extent conversion) and
+ * the underlying device supports FUA. This allows us to avoid
+ * cache flushes on IO completion.
*/
if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
(dio->flags & IOMAP_DIO_WRITE_FUA) &&
@@ -1644,8 +1650,14 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
ret = bio_iov_iter_get_pages(bio, &iter);
if (unlikely(ret)) {
+ /*
+ * We have to stop part way through an IO. We must fall
+ * through to the sub-block tail zeroing here, otherwise
+ * this short IO may expose stale data in the tail of
+ * the block we haven't written data to.
+ */
bio_put(bio);
- return copied ? copied : ret;
+ goto zero_tail;
}
n = bio->bi_iter.bi_size;
@@ -1676,13 +1688,21 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
dio->submit.cookie = submit_bio(bio);
} while (nr_pages);
- if (need_zeroout) {
+ /*
+ * We need to zeroout the tail of a sub-block write if the extent type
+ * requires zeroing or the write extends beyond EOF. If we don't zero
+ * the block tail in the latter case, we can expose stale data via mmap
+ * reads of the EOF block.
+ */
+zero_tail:
+ if (need_zeroout ||
+ ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
/* zero out from the end of the write to the end of the block */
pad = pos & (fs_block_size - 1);
if (pad)
iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
}
- return copied;
+ return copied ? copied : ret;
}
static loff_t
@@ -1857,6 +1877,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->wait_for_completion = true;
ret = 0;
}
+
+ /*
+ * Splicing to pipes can fail on a full pipe. We have to
+ * swallow this to make it look like a short IO
+ * otherwise the higher splice layers will completely
+ * mishandle the error and stop moving data.
+ */
+ if (ret == -EFAULT)
+ ret = 0;
break;
}
pos += ret;
diff --git a/fs/namespace.c b/fs/namespace.c
index 98d27da43304..a7f91265ea67 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -695,9 +695,6 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
hlist_for_each_entry(mp, chain, m_hash) {
if (mp->m_dentry == dentry) {
- /* might be worth a WARN_ON() */
- if (d_unlinked(dentry))
- return ERR_PTR(-ENOENT);
mp->m_count++;
return mp;
}
@@ -711,6 +708,9 @@ static struct mountpoint *get_mountpoint(struct dentry *dentry)
int ret;
if (d_mountpoint(dentry)) {
+ /* might be worth a WARN_ON() */
+ if (d_unlinked(dentry))
+ return ERR_PTR(-ENOENT);
mountpoint:
read_seqlock_excl(&mount_lock);
mp = lookup_mountpoint(dentry);
@@ -1540,8 +1540,13 @@ static int do_umount(struct mount *mnt, int flags)
namespace_lock();
lock_mount_hash();
- event++;
+ /* Recheck MNT_LOCKED with the locks held */
+ retval = -EINVAL;
+ if (mnt->mnt.mnt_flags & MNT_LOCKED)
+ goto out;
+
+ event++;
if (flags & MNT_DETACH) {
if (!list_empty(&mnt->mnt_list))
umount_tree(mnt, UMOUNT_PROPAGATE);
@@ -1555,6 +1560,7 @@ static int do_umount(struct mount *mnt, int flags)
retval = 0;
}
}
+out:
unlock_mount_hash();
namespace_unlock();
return retval;
@@ -1645,7 +1651,7 @@ int ksys_umount(char __user *name, int flags)
goto dput_and_out;
if (!check_mnt(mnt))
goto dput_and_out;
- if (mnt->mnt.mnt_flags & MNT_LOCKED)
+ if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
goto dput_and_out;
retval = -EPERM;
if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
@@ -1728,8 +1734,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
for (s = r; s; s = next_mnt(s, r)) {
if (!(flag & CL_COPY_UNBINDABLE) &&
IS_MNT_UNBINDABLE(s)) {
- s = skip_mnt_tree(s);
- continue;
+ if (s->mnt.mnt_flags & MNT_LOCKED) {
+ /* Both unbindable and locked. */
+ q = ERR_PTR(-EPERM);
+ goto out;
+ } else {
+ s = skip_mnt_tree(s);
+ continue;
+ }
}
if (!(flag & CL_COPY_MNT_NS_FILE) &&
is_mnt_ns_file(s->mnt.mnt_root)) {
@@ -1782,7 +1794,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
{
namespace_lock();
lock_mount_hash();
- umount_tree(real_mount(mnt), UMOUNT_SYNC);
+ umount_tree(real_mount(mnt), 0);
unlock_mount_hash();
namespace_unlock();
}
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index fa515d5ea5ba..315967354954 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -66,7 +66,7 @@ __be32 nfs4_callback_getattr(void *argp, void *resp,
out_iput:
rcu_read_unlock();
trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
- iput(inode);
+ nfs_iput_and_deactive(inode);
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
return res->status;
@@ -108,7 +108,7 @@ __be32 nfs4_callback_recall(void *argp, void *resp,
}
trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
&args->stateid, -ntohl(res));
- iput(inode);
+ nfs_iput_and_deactive(inode);
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
return res;
@@ -686,20 +686,24 @@ __be32 nfs4_callback_offload(void *data, void *dummy,
{
struct cb_offloadargs *args = data;
struct nfs_server *server;
- struct nfs4_copy_state *copy;
+ struct nfs4_copy_state *copy, *tmp_copy;
bool found = false;
+ copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
+ if (!copy)
+ return htonl(NFS4ERR_SERVERFAULT);
+
spin_lock(&cps->clp->cl_lock);
rcu_read_lock();
list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
client_link) {
- list_for_each_entry(copy, &server->ss_copies, copies) {
+ list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
if (memcmp(args->coa_stateid.other,
- copy->stateid.other,
+ tmp_copy->stateid.other,
sizeof(args->coa_stateid.other)))
continue;
- nfs4_copy_cb_args(copy, args);
- complete(&copy->completion);
+ nfs4_copy_cb_args(tmp_copy, args);
+ complete(&tmp_copy->completion);
found = true;
goto out;
}
@@ -707,15 +711,11 @@ __be32 nfs4_callback_offload(void *data, void *dummy,
out:
rcu_read_unlock();
if (!found) {
- copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
- if (!copy) {
- spin_unlock(&cps->clp->cl_lock);
- return htonl(NFS4ERR_SERVERFAULT);
- }
memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
nfs4_copy_cb_args(copy, args);
list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids);
- }
+ } else
+ kfree(copy);
spin_unlock(&cps->clp->cl_lock);
return 0;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 07b839560576..6ec2f78c1e19 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -850,16 +850,23 @@ nfs_delegation_find_inode_server(struct nfs_server *server,
const struct nfs_fh *fhandle)
{
struct nfs_delegation *delegation;
- struct inode *res = NULL;
+ struct inode *freeme, *res = NULL;
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
spin_lock(&delegation->lock);
if (delegation->inode != NULL &&
nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
- res = igrab(delegation->inode);
+ freeme = igrab(delegation->inode);
+ if (freeme && nfs_sb_active(freeme->i_sb))
+ res = freeme;
spin_unlock(&delegation->lock);
if (res != NULL)
return res;
+ if (freeme) {
+ rcu_read_unlock();
+ iput(freeme);
+ rcu_read_lock();
+ }
return ERR_PTR(-EAGAIN);
}
spin_unlock(&delegation->lock);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 86bcba40ca61..74b36ed883ca 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1361,12 +1361,7 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
task))
return;
- if (ff_layout_read_prepare_common(task, hdr))
- return;
-
- if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
- hdr->args.lock_context, FMODE_READ) == -EIO)
- rpc_exit(task, -EIO); /* lost lock, terminate I/O */
+ ff_layout_read_prepare_common(task, hdr);
}
static void ff_layout_read_call_done(struct rpc_task *task, void *data)
@@ -1542,12 +1537,7 @@ static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
task))
return;
- if (ff_layout_write_prepare_common(task, hdr))
- return;
-
- if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
- hdr->args.lock_context, FMODE_WRITE) == -EIO)
- rpc_exit(task, -EIO); /* lost lock, terminate I/O */
+ ff_layout_write_prepare_common(task, hdr);
}
static void ff_layout_write_call_done(struct rpc_task *task, void *data)
@@ -1742,6 +1732,10 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
if (fh)
hdr->args.fh = fh;
+
+ if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
+ goto out_failed;
+
/*
* Note that if we ever decide to split across DSes,
* then we may need to handle dense-like offsets.
@@ -1804,6 +1798,9 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
if (fh)
hdr->args.fh = fh;
+ if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
+ goto out_failed;
+
/*
* Note that if we ever decide to split across DSes,
* then we may need to handle dense-like offsets.
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index 411798346e48..de50a342d5a5 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -215,6 +215,10 @@ unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
unsigned int maxnum);
struct nfs_fh *
nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx);
+int
+nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
+ u32 mirror_idx,
+ nfs4_stateid *stateid);
struct nfs4_pnfs_ds *
nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 74d8d5352438..d23347389626 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -370,6 +370,25 @@ out:
return fh;
}
+int
+nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
+ u32 mirror_idx,
+ nfs4_stateid *stateid)
+{
+ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
+
+ if (!ff_layout_mirror_valid(lseg, mirror, false)) {
+ pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
+ __func__, mirror_idx);
+ goto out;
+ }
+
+ nfs4_stateid_copy(stateid, &mirror->stateid);
+ return 1;
+out:
+ return 0;
+}
+
/**
* nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
* @lseg: the layout segment we're operating on
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index ac5b784a1de0..fed06fd9998d 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -137,31 +137,32 @@ static int handle_async_copy(struct nfs42_copy_res *res,
struct file *dst,
nfs4_stateid *src_stateid)
{
- struct nfs4_copy_state *copy;
+ struct nfs4_copy_state *copy, *tmp_copy;
int status = NFS4_OK;
bool found_pending = false;
struct nfs_open_context *ctx = nfs_file_open_context(dst);
+ copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
+ if (!copy)
+ return -ENOMEM;
+
spin_lock(&server->nfs_client->cl_lock);
- list_for_each_entry(copy, &server->nfs_client->pending_cb_stateids,
+ list_for_each_entry(tmp_copy, &server->nfs_client->pending_cb_stateids,
copies) {
- if (memcmp(&res->write_res.stateid, &copy->stateid,
+ if (memcmp(&res->write_res.stateid, &tmp_copy->stateid,
NFS4_STATEID_SIZE))
continue;
found_pending = true;
- list_del(&copy->copies);
+ list_del(&tmp_copy->copies);
break;
}
if (found_pending) {
spin_unlock(&server->nfs_client->cl_lock);
+ kfree(copy);
+ copy = tmp_copy;
goto out;
}
- copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
- if (!copy) {
- spin_unlock(&server->nfs_client->cl_lock);
- return -ENOMEM;
- }
memcpy(&copy->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
init_completion(&copy->completion);
copy->parent_state = ctx->state;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 8d59c9655ec4..1b994b527518 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -41,6 +41,8 @@ enum nfs4_client_state {
NFS4CLNT_MOVED,
NFS4CLNT_LEASE_MOVED,
NFS4CLNT_DELEGATION_EXPIRED,
+ NFS4CLNT_RUN_MANAGER,
+ NFS4CLNT_DELEGRETURN_RUNNING,
};
#define NFS4_RENEW_TIMEOUT 0x01
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 62ae0fd345ad..d8decf2ec48f 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1210,6 +1210,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
struct task_struct *task;
char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
+ set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
return;
__module_get(THIS_MODULE);
@@ -2503,6 +2504,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
/* Ensure exclusive access to NFSv4 state */
do {
+ clear_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
section = "purge state";
status = nfs4_purge_lease(clp);
@@ -2593,19 +2595,24 @@ static void nfs4_state_manager(struct nfs_client *clp)
}
nfs4_end_drain_session(clp);
- if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
- nfs_client_return_marked_delegations(clp);
- continue;
+ nfs4_clear_state_manager_bit(clp);
+
+ if (!test_and_set_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state)) {
+ if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
+ nfs_client_return_marked_delegations(clp);
+ set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
+ }
+ clear_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state);
}
- nfs4_clear_state_manager_bit(clp);
/* Did we race with an attempt to give us more work? */
- if (clp->cl_state == 0)
- break;
+ if (!test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
+ return;
if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
- break;
- } while (refcount_read(&clp->cl_count) > 1);
- return;
+ return;
+ } while (refcount_read(&clp->cl_count) > 1 && !signalled());
+ goto out_drain;
+
out_error:
if (strlen(section))
section_sep = ": ";
@@ -2613,6 +2620,7 @@ out_error:
" with error %d\n", section_sep, section,
clp->cl_hostname, -status);
ssleep(1);
+out_drain:
nfs4_end_drain_session(clp);
nfs4_clear_state_manager_bit(clp);
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index edff074d38c7..d505990dac7c 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1038,6 +1038,9 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
__be32 status;
+ if (!cstate->save_fh.fh_dentry)
+ return nfserr_nofilehandle;
+
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
src_stateid, RD_STATE, src, NULL);
if (status) {
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index de99db518571..f2129a5d9f23 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -266,9 +266,7 @@ void nilfs_btnode_abort_change_key(struct address_space *btnc,
return;
if (nbh == NULL) { /* blocksize == pagesize */
- xa_lock_irq(&btnc->i_pages);
- __xa_erase(&btnc->i_pages, newkey);
- xa_unlock_irq(&btnc->i_pages);
+ xa_erase_irq(&btnc->i_pages, newkey);
unlock_page(ctxt->bh->b_page);
} else
brelse(nbh);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 5769cf3ff035..e08a6647267b 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -115,12 +115,12 @@ static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
continue;
mark = iter_info->marks[type];
/*
- * if the event is for a child and this inode doesn't care about
- * events on the child, don't send it!
+ * If the event is for a child and this mark doesn't care about
+ * events on a child, don't send it!
*/
- if (type == FSNOTIFY_OBJ_TYPE_INODE &&
- (event_mask & FS_EVENT_ON_CHILD) &&
- !(mark->mask & FS_EVENT_ON_CHILD))
+ if (event_mask & FS_EVENT_ON_CHILD &&
+ (type != FSNOTIFY_OBJ_TYPE_INODE ||
+ !(mark->mask & FS_EVENT_ON_CHILD)))
continue;
marks_mask |= mark->mask;
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 2172ba516c61..d2c34900ae05 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -167,9 +167,9 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask
parent = dget_parent(dentry);
p_inode = parent->d_inode;
- if (unlikely(!fsnotify_inode_watches_children(p_inode)))
+ if (unlikely(!fsnotify_inode_watches_children(p_inode))) {
__fsnotify_update_child_dentry_flags(p_inode);
- else if (p_inode->i_fsnotify_mask & mask) {
+ } else if (p_inode->i_fsnotify_mask & mask & ALL_FSNOTIFY_EVENTS) {
struct name_snapshot name;
/* we are notifying a parent so come up with the new mask which
@@ -339,6 +339,9 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
sb = mnt->mnt.mnt_sb;
mnt_or_sb_mask = mnt->mnt_fsnotify_mask | sb->s_fsnotify_mask;
}
+ /* An event "on child" is not intended for a mount/sb mark */
+ if (mask & FS_EVENT_ON_CHILD)
+ mnt_or_sb_mask = 0;
/*
* Optimization: srcu_read_lock() has a memory barrier which can
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index da578ad4c08f..eb1ce30412dc 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2411,8 +2411,16 @@ static int ocfs2_dio_end_io(struct kiocb *iocb,
/* this io's submitter should not have unlocked this before we could */
BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
- if (bytes > 0 && private)
- ret = ocfs2_dio_end_io_write(inode, private, offset, bytes);
+ if (bytes <= 0)
+ mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld",
+ (long long)bytes);
+ if (private) {
+ if (bytes > 0)
+ ret = ocfs2_dio_end_io_write(inode, private, offset,
+ bytes);
+ else
+ ocfs2_dio_free_write_ctx(inode, private);
+ }
ocfs2_iocb_clear_rw_locked(iocb);
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 308ea0eb35fd..a396096a5099 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -178,6 +178,15 @@ do { \
##__VA_ARGS__); \
} while (0)
+#define mlog_ratelimited(mask, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ mlog(mask, fmt, ##__VA_ARGS__); \
+} while (0)
+
#define mlog_errno(st) ({ \
int _st = (st); \
if (_st != -ERESTARTSYS && _st != -EINTR && \
diff --git a/fs/read_write.c b/fs/read_write.c
index bfcb4ced5664..4dae0399c75a 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -2094,17 +2094,18 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
off = same->src_offset;
len = same->src_length;
- ret = -EISDIR;
if (S_ISDIR(src->i_mode))
- goto out;
+ return -EISDIR;
- ret = -EINVAL;
if (!S_ISREG(src->i_mode))
- goto out;
+ return -EINVAL;
+
+ if (!file->f_op->remap_file_range)
+ return -EOPNOTSUPP;
ret = remap_verify_area(file, off, len, false);
if (ret < 0)
- goto out;
+ return ret;
ret = 0;
if (off + len > i_size_read(src))
@@ -2147,10 +2148,8 @@ next_fdput:
fdput(dst_fd);
next_loop:
if (fatal_signal_pending(current))
- goto out;
+ break;
}
-
-out:
return ret;
}
EXPORT_SYMBOL(vfs_dedupe_file_range);
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 0a7252aecfa5..bb71db63c99c 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -334,7 +334,7 @@ int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
}
EXPORT_SYMBOL_GPL(sysfs_create_file_ns);
-int sysfs_create_files(struct kobject *kobj, const struct attribute **ptr)
+int sysfs_create_files(struct kobject *kobj, const struct attribute * const *ptr)
{
int err = 0;
int i;
@@ -493,7 +493,7 @@ bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr)
return ret;
}
-void sysfs_remove_files(struct kobject *kobj, const struct attribute **ptr)
+void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *ptr)
{
int i;
for (i = 0; ptr[i]; i++)
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 6fc5425b1474..2652d00842d6 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -243,7 +243,7 @@ xfs_attr3_leaf_verify(
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_attr_leafblock *leaf = bp->b_addr;
struct xfs_attr_leaf_entry *entries;
- uint16_t end;
+ uint32_t end; /* must be 32bit - see below */
int i;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
@@ -293,6 +293,11 @@ xfs_attr3_leaf_verify(
/*
* Quickly check the freemap information. Attribute data has to be
* aligned to 4-byte boundaries, and likewise for the free space.
+ *
+ * Note that for 64k block size filesystems, the freemap entries cannot
+ * overflow as they are only be16 fields. However, when checking end
+ * pointer of the freemap, we have to be careful to detect overflows and
+ * so use uint32_t for those checks.
*/
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
if (ichdr.freemap[i].base > mp->m_attr_geo->blksize)
@@ -303,7 +308,9 @@ xfs_attr3_leaf_verify(
return __this_address;
if (ichdr.freemap[i].size & 0x3)
return __this_address;
- end = ichdr.freemap[i].base + ichdr.freemap[i].size;
+
+ /* be care of 16 bit overflows here */
+ end = (uint32_t)ichdr.freemap[i].base + ichdr.freemap[i].size;
if (end < ichdr.freemap[i].base)
return __this_address;
if (end > mp->m_attr_geo->blksize)
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 74d7228e755b..19e921d1586f 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1694,10 +1694,13 @@ xfs_bmap_add_extent_delay_real(
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Filling in all of a previously delayed allocation extent.
- * The right neighbor is contiguous, the left is not.
+ * The right neighbor is contiguous, the left is not. Take care
+ * with delay -> unwritten extent allocation here because the
+ * delalloc record we are overwriting is always written.
*/
PREV.br_startblock = new->br_startblock;
PREV.br_blockcount += RIGHT.br_blockcount;
+ PREV.br_state = new->br_state;
xfs_iext_next(ifp, &bma->icur);
xfs_iext_remove(bma->ip, &bma->icur, state);
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 86c50208a143..7fbf8af0b159 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -538,15 +538,18 @@ xfs_inobt_rec_check_count(
static xfs_extlen_t
xfs_inobt_max_size(
- struct xfs_mount *mp)
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno)
{
+ xfs_agblock_t agblocks = xfs_ag_block_count(mp, agno);
+
/* Bail out if we're uninitialized, which can happen in mkfs. */
if (mp->m_inobt_mxr[0] == 0)
return 0;
return xfs_btree_calc_size(mp->m_inobt_mnr,
- (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
- XFS_INODES_PER_CHUNK);
+ (uint64_t)agblocks * mp->m_sb.sb_inopblock /
+ XFS_INODES_PER_CHUNK);
}
static int
@@ -594,7 +597,7 @@ xfs_finobt_calc_reserves(
if (error)
return error;
- *ask += xfs_inobt_max_size(mp);
+ *ask += xfs_inobt_max_size(mp, agno);
*used += tree_len;
return 0;
}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 5d263dfdb3bc..404e581f1ea1 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1042,7 +1042,7 @@ out_trans_cancel:
goto out_unlock;
}
-static int
+int
xfs_flush_unmap_range(
struct xfs_inode *ip,
xfs_off_t offset,
@@ -1195,13 +1195,7 @@ xfs_prepare_shift(
* Writeback and invalidate cache for the remainder of the file as we're
* about to shift down every extent from offset to EOF.
*/
- error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1);
- if (error)
- return error;
- error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
- offset >> PAGE_SHIFT, -1);
- if (error)
- return error;
+ error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
/*
* Clean out anything hanging around in the cow fork now that
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 87363d136bb6..7a78229cf1a7 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -80,4 +80,7 @@ int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
int whichfork, xfs_extnum_t *nextents,
xfs_filblks_t *count);
+int xfs_flush_unmap_range(struct xfs_inode *ip, xfs_off_t offset,
+ xfs_off_t len);
+
#endif /* __XFS_BMAP_UTIL_H__ */
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 12d8455bfbb2..010db5f8fb00 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -1233,9 +1233,23 @@ xfs_buf_iodone(
}
/*
- * Requeue a failed buffer for writeback
+ * Requeue a failed buffer for writeback.
*
- * Return true if the buffer has been re-queued properly, false otherwise
+ * We clear the log item failed state here as well, but we have to be careful
+ * about reference counts because the only active reference counts on the buffer
+ * may be the failed log items. Hence if we clear the log item failed state
+ * before queuing the buffer for IO we can release all active references to
+ * the buffer and free it, leading to use after free problems in
+ * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
+ * order we process them in - the buffer is locked, and we own the buffer list
+ * so nothing on them is going to change while we are performing this action.
+ *
+ * Hence we can safely queue the buffer for IO before we clear the failed log
+ * item state, therefore always having an active reference to the buffer and
+ * avoiding the transient zero-reference state that leads to use-after-free.
+ *
+ * Return true if the buffer was added to the buffer list, false if it was
+ * already on the buffer list.
*/
bool
xfs_buf_resubmit_failed_buffers(
@@ -1243,16 +1257,16 @@ xfs_buf_resubmit_failed_buffers(
struct list_head *buffer_list)
{
struct xfs_log_item *lip;
+ bool ret;
+
+ ret = xfs_buf_delwri_queue(bp, buffer_list);
/*
- * Clear XFS_LI_FAILED flag from all items before resubmit
- *
- * XFS_LI_FAILED set/clear is protected by ail_lock, caller this
+ * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this
* function already have it acquired
*/
list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
xfs_clear_li_failed(lip);
- /* Add this buffer back to the delayed write list */
- return xfs_buf_delwri_queue(bp, buffer_list);
+ return ret;
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 53c9ab8fb777..e47425071e65 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -920,7 +920,7 @@ out_unlock:
}
-loff_t
+STATIC loff_t
xfs_file_remap_range(
struct file *file_in,
loff_t pos_in,
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 6e2c08f30f60..6ecdbb3af7de 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1608,7 +1608,7 @@ xfs_ioc_getbmap(
error = 0;
out_free_buf:
kmem_free(buf);
- return 0;
+ return error;
}
struct getfsmap_info {
diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c
index 576c375ce12a..6b736ea58d35 100644
--- a/fs/xfs/xfs_message.c
+++ b/fs/xfs/xfs_message.c
@@ -107,5 +107,5 @@ assfail(char *expr, char *file, int line)
void
xfs_hex_dump(void *p, int length)
{
- print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1);
+ print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1);
}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index ecdb086bc23e..322a852ce284 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -296,6 +296,7 @@ xfs_reflink_reserve_cow(
if (error)
return error;
+ xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
trace_xfs_reflink_cow_alloc(ip, &got);
return 0;
}
@@ -1351,10 +1352,19 @@ xfs_reflink_remap_prep(
if (ret)
goto out_unlock;
- /* Zap any page cache for the destination file's range. */
- truncate_inode_pages_range(&inode_out->i_data,
- round_down(pos_out, PAGE_SIZE),
- round_up(pos_out + *len, PAGE_SIZE) - 1);
+ /*
+ * If pos_out > EOF, we may have dirtied blocks between EOF and
+ * pos_out. In that case, we need to extend the flush and unmap to cover
+ * from EOF to the end of the copy length.
+ */
+ if (pos_out > XFS_ISIZE(dest)) {
+ loff_t flen = *len + (pos_out - XFS_ISIZE(dest));
+ ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen);
+ } else {
+ ret = xfs_flush_unmap_range(dest, pos_out, *len);
+ }
+ if (ret)
+ goto out_unlock;
return 1;
out_unlock:
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 3043e5ed6495..8a6532aae779 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -280,7 +280,10 @@ DECLARE_EVENT_CLASS(xfs_buf_class,
),
TP_fast_assign(
__entry->dev = bp->b_target->bt_dev;
- __entry->bno = bp->b_bn;
+ if (bp->b_bn == XFS_BUF_DADDR_NULL)
+ __entry->bno = bp->b_maps[0].bm_bn;
+ else
+ __entry->bno = bp->b_bn;
__entry->nblks = bp->b_length;
__entry->hold = atomic_read(&bp->b_hold);
__entry->pincount = atomic_read(&bp->b_pin_count);
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 89f3b03b1445..e3667c9a33a5 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -3,7 +3,7 @@
#define _4LEVEL_FIXUP_H
#define __ARCH_HAS_4LEVEL_HACK
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
#define PUD_SHIFT PGDIR_SHIFT
#define PUD_SIZE PGDIR_SIZE
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index 9c2e0708eb82..73474bb52344 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -3,7 +3,7 @@
#define _5LEVEL_FIXUP_H
#define __ARCH_HAS_5LEVEL_HACK
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
#define P4D_SHIFT PGDIR_SHIFT
#define P4D_SIZE PGDIR_SIZE
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
index 0c34215263b8..1d6dd38c0e5e 100644
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -5,7 +5,7 @@
#ifndef __ASSEMBLY__
#include <asm-generic/5level-fixup.h>
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
/*
* Having the pud type consist of a pgd gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 1a29b2a0282b..04cb913797bc 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -4,7 +4,7 @@
#ifndef __ASSEMBLY__
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
typedef struct { pgd_t pgd; } p4d_t;
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index f35f6e8149e4..b85b8271a73d 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -8,7 +8,7 @@
struct mm_struct;
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
/*
* Having the pmd type consist of a pud gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index e950b9c50f34..9bef475db6fe 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -9,7 +9,7 @@
#else
#include <asm-generic/pgtable-nop4d.h>
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
/*
* Having the pud type consist of a p4d gets the size right, and allows
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 5657a20e0c59..359fb935ded6 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1127,4 +1127,20 @@ static inline bool arch_has_pfn_modify_check(void)
#endif
#endif
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index ccb5aa8468e0..9c56412bb2cf 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -133,6 +133,7 @@ struct dw_hdmi_plat_data {
const struct dw_hdmi_phy_ops *phy_ops;
const char *phy_name;
void *phy_data;
+ unsigned int phy_force_vendor;
/* Synopsys PHY support */
const struct dw_hdmi_mpll_config *mpll_cfg;
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index d9c6d549f971..48a671e782ca 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -19,6 +19,13 @@ struct dw_mipi_dsi_phy_ops {
unsigned int *lane_mbps);
};
+struct dw_mipi_dsi_host_ops {
+ int (*attach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+ int (*detach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+};
+
struct dw_mipi_dsi_plat_data {
void __iomem *base;
unsigned int max_data_lanes;
@@ -27,6 +34,7 @@ struct dw_mipi_dsi_plat_data {
const struct drm_display_mode *mode);
const struct dw_mipi_dsi_phy_ops *phy_ops;
+ const struct dw_mipi_dsi_host_ops *host_ops;
void *priv_data;
};
@@ -35,10 +43,8 @@ struct dw_mipi_dsi *dw_mipi_dsi_probe(struct platform_device *pdev,
const struct dw_mipi_dsi_plat_data
*plat_data);
void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi);
-struct dw_mipi_dsi *dw_mipi_dsi_bind(struct platform_device *pdev,
- struct drm_encoder *encoder,
- const struct dw_mipi_dsi_plat_data
- *plat_data);
+int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder);
void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi);
+void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave);
#endif /* __DW_MIPI_DSI__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 05350424a4d3..bdb0d5548f39 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -68,7 +68,6 @@
#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_global.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_mm.h>
#include <drm/drm_os_linux.h>
@@ -110,4 +109,10 @@ static inline bool drm_can_sleep(void)
return true;
}
+#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE)
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x)
+#else
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x)
+#endif
+
#endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 1e810e0b7664..f9b35834c45d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -265,7 +265,6 @@ struct __drm_private_objs_state {
* struct drm_atomic_state - the global state object for atomic updates
* @ref: count of all references to this state (will not be freed until zero)
* @dev: parent DRM device
- * @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
* @async_update: hint for asynchronous plane update
* @planes: pointer to array of structures with per-plane data
@@ -284,6 +283,15 @@ struct drm_atomic_state {
struct kref ref;
struct drm_device *dev;
+
+ /**
+ * @allow_modeset:
+ *
+ * Allow full modeset. This is used by the ATOMIC IOCTL handler to
+ * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should
+ * never consult this flag, instead looking at the output of
+ * drm_atomic_crtc_needs_modeset().
+ */
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
bool async_update : 1;
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 657af7b39379..58214be3bf3d 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -31,6 +31,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_util.h>
struct drm_atomic_state;
@@ -126,6 +127,9 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
void drm_atomic_helper_shutdown(struct drm_device *dev);
+struct drm_atomic_state *
+drm_atomic_helper_duplicate_state(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx);
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev);
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx);
@@ -144,51 +148,10 @@ int drm_atomic_helper_page_flip_target(
uint32_t flags,
uint32_t target,
struct drm_modeset_acquire_ctx *ctx);
-struct drm_encoder *
-drm_atomic_helper_best_encoder(struct drm_connector *connector);
-
-/* default implementations for state handling */
-void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
-void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
-struct drm_crtc_state *
-drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
-void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
-void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
-
-void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
- struct drm_plane_state *state);
-void drm_atomic_helper_plane_reset(struct drm_plane *plane);
-void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
- struct drm_plane_state *state);
-struct drm_plane_state *
-drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
-void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
-void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state);
-
-void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
- struct drm_connector_state *conn_state);
-void drm_atomic_helper_connector_reset(struct drm_connector *connector);
-void
-__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
- struct drm_connector_state *state);
-struct drm_connector_state *
-drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
-struct drm_atomic_state *
-drm_atomic_helper_duplicate_state(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx);
-void
-__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
-void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
- struct drm_connector_state *state);
int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx);
-void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
- struct drm_private_state *state);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
new file mode 100644
index 000000000000..66c92cbd8e16
--- /dev/null
+++ b/include/drm/drm_atomic_state_helper.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#include <linux/types.h>
+
+struct drm_crtc;
+struct drm_crtc_state;
+struct drm_plane;
+struct drm_plane_state;
+struct drm_connector;
+struct drm_connector_state;
+struct drm_private_obj;
+struct drm_private_state;
+struct drm_modeset_acquire_ctx;
+struct drm_device;
+
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void drm_atomic_helper_plane_reset(struct drm_plane *plane);
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
+void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
+ struct drm_connector_state *conn_state);
+void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
+ struct drm_private_state *state);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 9ccad6b062f2..9be2181b3ed7 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -508,6 +508,18 @@ struct drm_connector_state {
* drm_writeback_signal_completion()
*/
struct drm_writeback_job *writeback_job;
+
+ /**
+ * @max_requested_bpc: Connector property to limit the maximum bit
+ * depth of the pixels.
+ */
+ u8 max_requested_bpc;
+
+ /**
+ * @max_bpc: Connector max_bpc based on the requested max_bpc property
+ * and the connector bpc limitations obtained from edid.
+ */
+ u8 max_bpc;
};
/**
@@ -960,6 +972,17 @@ struct drm_connector {
struct drm_property *scaling_mode_property;
/**
+ * @vrr_capable_property: Optional property to help userspace
+ * query hardware support for variable refresh rate on a connector.
+ * connector. Drivers can add the property to a connector by
+ * calling drm_connector_attach_vrr_capable_property().
+ *
+ * This should be updated only by calling
+ * drm_connector_set_vrr_capable_property().
+ */
+ struct drm_property *vrr_capable_property;
+
+ /**
* @content_protection_property: DRM ENUM property for content
* protection. See drm_connector_attach_content_protection_property().
*/
@@ -973,6 +996,12 @@ struct drm_connector {
*/
struct drm_property_blob *path_blob_ptr;
+ /**
+ * @max_bpc_property: Default connector property for the max bpc to be
+ * driven out of the connector.
+ */
+ struct drm_property *max_bpc_property;
+
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
@@ -1133,6 +1162,7 @@ int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type);
+void drm_connector_attach_edid_property(struct drm_connector *connector);
int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
int drm_connector_attach_encoder(struct drm_connector *connector,
@@ -1192,30 +1222,6 @@ static inline void drm_connector_put(struct drm_connector *connector)
}
/**
- * drm_connector_reference - acquire a connector reference
- * @connector: DRM connector
- *
- * This is a compatibility alias for drm_connector_get() and should not be
- * used by new code.
- */
-static inline void drm_connector_reference(struct drm_connector *connector)
-{
- drm_connector_get(connector);
-}
-
-/**
- * drm_connector_unreference - release a connector reference
- * @connector: DRM connector
- *
- * This is a compatibility alias for drm_connector_put() and should not be
- * used by new code.
- */
-static inline void drm_connector_unreference(struct drm_connector *connector)
-{
- drm_connector_put(connector);
-}
-
-/**
* drm_connector_is_unregistered - has the connector been unregistered from
* userspace?
* @connector: DRM connector
@@ -1250,6 +1256,8 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev);
int drm_connector_attach_content_type_property(struct drm_connector *dev);
int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
+int drm_connector_attach_vrr_capable_property(
+ struct drm_connector *connector);
int drm_connector_attach_content_protection_property(
struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
@@ -1266,8 +1274,12 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
void drm_connector_set_link_status_property(struct drm_connector *connector,
uint64_t link_status);
+void drm_connector_set_vrr_capable_property(
+ struct drm_connector *connector, bool capable);
int drm_connector_init_panel_orientation_property(
struct drm_connector *connector, int width, int height);
+int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
+ int min, int max);
/**
* struct drm_tile_group - Tile group metadata
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b21437bc95bf..39c3900aab3c 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -291,6 +291,15 @@ struct drm_crtc_state {
u32 pageflip_flags;
/**
+ * @vrr_enabled:
+ *
+ * Indicates if variable refresh rate should be enabled for the CRTC.
+ * Support for the requested vrr state will depend on driver and
+ * hardware capabiltiy - lacking support is not treated as failure.
+ */
+ bool vrr_enabled;
+
+ /**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of the
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 6914633037a5..d65f034843ce 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -57,12 +57,6 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
void drm_helper_resume_force_mode(struct drm_device *dev);
-int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode, int x, int y,
- struct drm_framebuffer *old_fb);
-int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb);
-
/* drm_probe_helper.c */
int drm_helper_probe_single_connector_modes(struct drm_connector
*connector, uint32_t maxX,
diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h
new file mode 100644
index 000000000000..4487660b26b8
--- /dev/null
+++ b/include/drm/drm_damage_helper.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Deepak Rawat <drawat@vmware.com>
+ *
+ **************************************************************************/
+
+#ifndef DRM_DAMAGE_HELPER_H_
+#define DRM_DAMAGE_HELPER_H_
+
+#include <drm/drm_atomic_helper.h>
+
+/**
+ * drm_atomic_for_each_plane_damage - Iterator macro for plane damage.
+ * @iter: The iterator to advance.
+ * @rect: Return a rectangle in fb coordinate clipped to plane src.
+ *
+ * Note that if the first call to iterator macro return false then no need to do
+ * plane update. Iterator will return full plane src when damage is not passed
+ * by user-space.
+ */
+#define drm_atomic_for_each_plane_damage(iter, rect) \
+ while (drm_atomic_helper_damage_iter_next(iter, rect))
+
+/**
+ * struct drm_atomic_helper_damage_iter - Closure structure for damage iterator.
+ *
+ * This structure tracks state needed to walk the list of plane damage clips.
+ */
+struct drm_atomic_helper_damage_iter {
+ /* private: Plane src in whole number. */
+ struct drm_rect plane_src;
+ /* private: Rectangles in plane damage blob. */
+ const struct drm_rect *clips;
+ /* private: Number of rectangles in plane damage blob. */
+ uint32_t num_clips;
+ /* private: Current clip iterator is advancing on. */
+ uint32_t curr_clip;
+ /* private: Whether need full plane update. */
+ bool full_update;
+};
+
+void drm_plane_enable_fb_damage_clips(struct drm_plane *plane);
+void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
+ struct drm_plane_state *plane_state);
+int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int flags,
+ unsigned int color, struct drm_clip_rect *clips,
+ unsigned int num_clips);
+void
+drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
+ const struct drm_plane_state *old_state,
+ const struct drm_plane_state *new_state);
+bool
+drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
+ struct drm_rect *rect);
+
+/**
+ * drm_helper_get_plane_damage_clips - Returns damage clips in &drm_rect.
+ * @state: Plane state.
+ *
+ * Returns plane damage rectangles in internal &drm_rect. Currently &drm_rect
+ * can be obtained by simply typecasting &drm_mode_rect. This is because both
+ * are signed 32 and during drm_atomic_check_only() it is verified that damage
+ * clips are inside fb.
+ *
+ * Return: Clips in plane fb_damage_clips blob property.
+ */
+static inline struct drm_rect *
+drm_helper_get_plane_damage_clips(const struct drm_plane_state *state)
+{
+ return (struct drm_rect *)drm_plane_get_damage_clips(state);
+}
+
+#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2a3843f248cf..5736c942c85b 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -231,6 +231,8 @@
#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */
#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0)
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069
# define DP_DSC_RGB (1 << 0)
@@ -279,6 +281,8 @@
# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4)
#define DP_DSC_MAX_SLICE_WIDTH 0x06C
+#define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560
+#define DP_DSC_SLICE_WIDTH_MULTIPLIER 320
#define DP_DSC_SLICE_CAP_2 0x06D
# define DP_DSC_16_PER_DP_DSC_SINK (1 << 0)
@@ -477,6 +481,7 @@
# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
+# define DP_DECOMPRESSION_EN (1 << 0)
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE (1 << 0)
@@ -685,6 +690,8 @@
# define DP_EDP_12 0x01
# define DP_EDP_13 0x02
# define DP_EDP_14 0x03
+# define DP_EDP_14a 0x04 /* eDP 1.4a */
+# define DP_EDP_14b 0x05 /* eDP 1.4b */
#define DP_EDP_GENERAL_CAP_1 0x701
# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0)
@@ -905,6 +912,57 @@
#define DP_AUX_HDCP_KSV_FIFO 0x6802C
#define DP_AUX_HDCP_AINFO 0x6803B
+/* DP HDCP2.2 parameter offsets in DPCD address space */
+#define DP_HDCP_2_2_REG_RTX_OFFSET 0x69000
+#define DP_HDCP_2_2_REG_TXCAPS_OFFSET 0x69008
+#define DP_HDCP_2_2_REG_CERT_RX_OFFSET 0x6900B
+#define DP_HDCP_2_2_REG_RRX_OFFSET 0x69215
+#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET 0x6921D
+#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET 0x69220
+#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET 0x692A0
+#define DP_HDCP_2_2_REG_M_OFFSET 0x692B0
+#define DP_HDCP_2_2_REG_HPRIME_OFFSET 0x692C0
+#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET 0x692E0
+#define DP_HDCP_2_2_REG_RN_OFFSET 0x692F0
+#define DP_HDCP_2_2_REG_LPRIME_OFFSET 0x692F8
+#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET 0x69318
+#define DP_HDCP_2_2_REG_RIV_OFFSET 0x69328
+#define DP_HDCP_2_2_REG_RXINFO_OFFSET 0x69330
+#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET 0x69332
+#define DP_HDCP_2_2_REG_VPRIME_OFFSET 0x69335
+#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET 0x69345
+#define DP_HDCP_2_2_REG_V_OFFSET 0x693E0
+#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET 0x693F0
+#define DP_HDCP_2_2_REG_K_OFFSET 0x693F3
+#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET 0x693F5
+#define DP_HDCP_2_2_REG_MPRIME_OFFSET 0x69473
+#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET 0x69493
+#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
+#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
+
+/* DP HDCP message start offsets in DPCD address space */
+#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET
+#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKPUB_KM_OFFSET
+#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET DP_HDCP_2_2_REG_HPRIME_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \
+ DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET
+#define DP_HDCP_2_2_LC_INIT_OFFSET DP_HDCP_2_2_REG_RN_OFFSET
+#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET DP_HDCP_2_2_REG_LPRIME_OFFSET
+#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET DP_HDCP_2_2_REG_EDKEY_KS_OFFSET
+#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET DP_HDCP_2_2_REG_RXINFO_OFFSET
+#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET DP_HDCP_2_2_REG_V_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET DP_HDCP_2_2_REG_MPRIME_OFFSET
+
+#define HDCP_2_2_DP_RXSTATUS_LEN 1
+#define HDCP_2_2_DP_RXSTATUS_READY(x) ((x) & BIT(0))
+#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x) ((x) & BIT(1))
+#define HDCP_2_2_DP_RXSTATUS_PAIRING(x) ((x) & BIT(2))
+#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
+#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x) ((x) & BIT(4))
+
/* DP 1.2 Sideband message defines */
/* peer device type - DP 1.2a Table 2-92 */
#define DP_PEER_DEVICE_NONE 0x0
@@ -963,6 +1021,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
+#define DP_DSC_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
@@ -993,6 +1052,7 @@ struct dp_sdp_header {
#define EDP_SDP_HEADER_REVISION_MASK 0x1F
#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
+#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F
struct edp_vsc_psr {
struct dp_sdp_header sdp_header;
@@ -1059,6 +1119,44 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
}
+/* DP/eDP DSC support */
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ bool is_edp);
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
+ u8 dsc_bpc[3]);
+
+static inline bool
+drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_DECOMPRESSION_IS_SUPPORTED;
+}
+
+static inline u16
+drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
+ (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
+ DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
+ DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
+}
+
+static inline u32
+drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ /* Max Slicewidth = Number of Pixels * 320 */
+ return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] *
+ DP_DSC_SLICE_WIDTH_MULTIPLIER;
+}
+
+/* Forward Error Correction Support on DP 1.4 */
+static inline bool
+drm_dp_sink_supports_fec(const u8 fec_capable)
+{
+ return fec_capable & DP_FEC_CAPABLE;
+}
+
/*
* DisplayPort AUX channel
*/
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 7f78d26a0766..59f005b419cf 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -409,7 +409,6 @@ struct drm_dp_payload {
struct drm_dp_mst_topology_state {
struct drm_private_state base;
int avail_slots;
- struct drm_atomic_state *state;
struct drm_dp_mst_topology_mgr *mgr;
};
@@ -498,11 +497,6 @@ struct drm_dp_mst_topology_mgr {
int pbn_div;
/**
- * @state: State information for topology manager
- */
- struct drm_dp_mst_topology_state *state;
-
- /**
* @funcs: Atomic helper callbacks
*/
const struct drm_private_state_funcs *funcs;
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 3199ef70c007..35af23f5fa0d 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -471,6 +471,8 @@ struct drm_driver {
* @gem_prime_export:
*
* export GEM -> dmabuf
+ *
+ * This defaults to drm_gem_prime_export() if not set.
*/
struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
struct drm_gem_object *obj, int flags);
@@ -478,6 +480,8 @@ struct drm_driver {
* @gem_prime_import:
*
* import dmabuf -> GEM
+ *
+ * This defaults to drm_gem_prime_import() if not set.
*/
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
struct dma_buf *dma_buf);
@@ -523,8 +527,10 @@ struct drm_driver {
* @dumb_map_offset:
*
* Allocate an offset in the drm device node's address space to be able to
- * memory map a dumb buffer. GEM-based drivers must use
- * drm_gem_create_mmap_offset() to implement this.
+ * memory map a dumb buffer.
+ *
+ * The default implementation is drm_gem_create_mmap_offset(). GEM based
+ * drivers must not overwrite this.
*
* Called by the user via ioctl.
*
@@ -544,6 +550,9 @@ struct drm_driver {
*
* Called by the user via ioctl.
*
+ * The default implementation is drm_gem_dumb_destroy(). GEM based drivers
+ * must not overwrite this.
+ *
* Returns:
*
* Zero on success, negative errno on failure.
@@ -621,7 +630,6 @@ void drm_dev_unregister(struct drm_device *dev);
void drm_dev_get(struct drm_device *dev);
void drm_dev_put(struct drm_device *dev);
-void drm_dev_unref(struct drm_device *dev);
void drm_put_dev(struct drm_device *dev);
bool drm_dev_enter(struct drm_device *dev, int *idx);
void drm_dev_exit(int idx);
diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h
new file mode 100644
index 000000000000..d03f1b83421a
--- /dev/null
+++ b/include/drm/drm_dsc.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Authors:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#ifndef DRM_DSC_H_
+#define DRM_DSC_H_
+
+#include <drm/drm_dp_helper.h>
+
+/* VESA Display Stream Compression DSC 1.2 constants */
+#define DSC_NUM_BUF_RANGES 15
+#define DSC_MUX_WORD_SIZE_8_10_BPC 48
+#define DSC_MUX_WORD_SIZE_12_BPC 64
+#define DSC_RC_PIXELS_PER_GROUP 3
+#define DSC_SCALE_DECREMENT_INTERVAL_MAX 4095
+#define DSC_RANGE_BPG_OFFSET_MASK 0x3f
+
+/* DSC Rate Control Constants */
+#define DSC_RC_MODEL_SIZE_CONST 8192
+#define DSC_RC_EDGE_FACTOR_CONST 6
+#define DSC_RC_TGT_OFFSET_HI_CONST 3
+#define DSC_RC_TGT_OFFSET_LO_CONST 3
+
+/* DSC PPS constants and macros */
+#define DSC_PPS_VERSION_MAJOR_SHIFT 4
+#define DSC_PPS_BPC_SHIFT 4
+#define DSC_PPS_MSB_SHIFT 8
+#define DSC_PPS_LSB_MASK (0xFF << 0)
+#define DSC_PPS_BPP_HIGH_MASK (0x3 << 8)
+#define DSC_PPS_VBR_EN_SHIFT 2
+#define DSC_PPS_SIMPLE422_SHIFT 3
+#define DSC_PPS_CONVERT_RGB_SHIFT 4
+#define DSC_PPS_BLOCK_PRED_EN_SHIFT 5
+#define DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK (0x3 << 8)
+#define DSC_PPS_SCALE_DEC_INT_HIGH_MASK (0xF << 8)
+#define DSC_PPS_RC_TGT_OFFSET_HI_SHIFT 4
+#define DSC_PPS_RC_RANGE_MINQP_SHIFT 11
+#define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6
+#define DSC_PPS_NATIVE_420_SHIFT 1
+#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16
+#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0
+#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13
+
+/* Configuration for a single Rate Control model range */
+struct drm_dsc_rc_range_parameters {
+ /* Min Quantization Parameters allowed for this range */
+ u8 range_min_qp;
+ /* Max Quantization Parameters allowed for this range */
+ u8 range_max_qp;
+ /* Bits/group offset to apply to target for this group */
+ u8 range_bpg_offset;
+};
+
+struct drm_dsc_config {
+ /* Bits / component for previous reconstructed line buffer */
+ u8 line_buf_depth;
+ /* Bits per component to code (must be 8, 10, or 12) */
+ u8 bits_per_component;
+ /*
+ * Flag indicating to do RGB - YCoCg conversion
+ * and back (should be 1 for RGB input)
+ */
+ bool convert_rgb;
+ u8 slice_count;
+ /* Slice Width */
+ u16 slice_width;
+ /* Slice Height */
+ u16 slice_height;
+ /*
+ * 4:2:2 enable mode (from PPS, 4:2:2 conversion happens
+ * outside of DSC encode/decode algorithm)
+ */
+ bool enable422;
+ /* Picture Width */
+ u16 pic_width;
+ /* Picture Height */
+ u16 pic_height;
+ /* Offset to bits/group used by RC to determine QP adjustment */
+ u8 rc_tgt_offset_high;
+ /* Offset to bits/group used by RC to determine QP adjustment */
+ u8 rc_tgt_offset_low;
+ /* Bits/pixel target << 4 (ie., 4 fractional bits) */
+ u16 bits_per_pixel;
+ /*
+ * Factor to determine if an edge is present based
+ * on the bits produced
+ */
+ u8 rc_edge_factor;
+ /* Slow down incrementing once the range reaches this value */
+ u8 rc_quant_incr_limit1;
+ /* Slow down incrementing once the range reaches this value */
+ u8 rc_quant_incr_limit0;
+ /* Number of pixels to delay the initial transmission */
+ u16 initial_xmit_delay;
+ /* Number of pixels to delay the VLD on the decoder,not including SSM */
+ u16 initial_dec_delay;
+ /* Block prediction enable */
+ bool block_pred_enable;
+ /* Bits/group offset to use for first line of the slice */
+ u8 first_line_bpg_offset;
+ /* Value to use for RC model offset at slice start */
+ u16 initial_offset;
+ /* Thresholds defining each of the buffer ranges */
+ u16 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+ /* Parameters for each of the RC ranges */
+ struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+ /* Total size of RC model */
+ u16 rc_model_size;
+ /* Minimum QP where flatness information is sent */
+ u8 flatness_min_qp;
+ /* Maximum QP where flatness information is sent */
+ u8 flatness_max_qp;
+ /* Initial value for scale factor */
+ u8 initial_scale_value;
+ /* Decrement scale factor every scale_decrement_interval groups */
+ u16 scale_decrement_interval;
+ /* Increment scale factor every scale_increment_interval groups */
+ u16 scale_increment_interval;
+ /* Non-first line BPG offset to use */
+ u16 nfl_bpg_offset;
+ /* BPG offset used to enforce slice bit */
+ u16 slice_bpg_offset;
+ /* Final RC linear transformation offset value */
+ u16 final_offset;
+ /* Enable on-off VBR (ie., disable stuffing bits) */
+ bool vbr_enable;
+ /* Mux word size (in bits) for SSM mode */
+ u8 mux_word_size;
+ /*
+ * The (max) size in bytes of the "chunks" that are
+ * used in slice multiplexing
+ */
+ u16 slice_chunk_size;
+ /* Rate Control buffer siz in bits */
+ u16 rc_bits;
+ /* DSC Minor Version */
+ u8 dsc_version_minor;
+ /* DSC Major version */
+ u8 dsc_version_major;
+ /* Native 4:2:2 support */
+ bool native_422;
+ /* Native 4:2:0 support */
+ bool native_420;
+ /* Additional bits/grp for seconnd line of slice for native 4:2:0 */
+ u8 second_line_bpg_offset;
+ /* Num of bits deallocated for each grp that is not in second line of slice */
+ u16 nsl_bpg_offset;
+ /* Offset adj fr second line in Native 4:2:0 mode */
+ u16 second_line_offset_adj;
+};
+
+/**
+ * struct picture_parameter_set - Represents 128 bytes of Picture Parameter Set
+ *
+ * The VESA DSC standard defines picture parameter set (PPS) which display
+ * stream compression encoders must communicate to decoders.
+ * The PPS is encapsulated in 128 bytes (PPS 0 through PPS 127). The fields in
+ * this structure are as per Table 4.1 in Vesa DSC specification v1.1/v1.2.
+ * The PPS fields that span over more than a byte should be stored in Big Endian
+ * format.
+ */
+struct drm_dsc_picture_parameter_set {
+ /**
+ * @dsc_version:
+ * PPS0[3:0] - dsc_version_minor: Contains Minor version of DSC
+ * PPS0[7:4] - dsc_version_major: Contains major version of DSC
+ */
+ u8 dsc_version;
+ /**
+ * @pps_identifier:
+ * PPS1[7:0] - Application specific identifier that can be
+ * used to differentiate between different PPS tables.
+ */
+ u8 pps_identifier;
+ /**
+ * @pps_reserved:
+ * PPS2[7:0]- RESERVED Byte
+ */
+ u8 pps_reserved;
+ /**
+ * @pps_3:
+ * PPS3[3:0] - linebuf_depth: Contains linebuffer bit depth used to
+ * generate the bitstream. (0x0 - 16 bits for DSC 1.2, 0x8 - 8 bits,
+ * 0xA - 10 bits, 0xB - 11 bits, 0xC - 12 bits, 0xD - 13 bits,
+ * 0xE - 14 bits for DSC1.2, 0xF - 14 bits for DSC 1.2.
+ * PPS3[7:4] - bits_per_component: Bits per component for the original
+ * pixels of the encoded picture.
+ * 0x0 = 16bpc (allowed only when dsc_version_minor = 0x2)
+ * 0x8 = 8bpc, 0xA = 10bpc, 0xC = 12bpc, 0xE = 14bpc (also
+ * allowed only when dsc_minor_version = 0x2)
+ */
+ u8 pps_3;
+ /**
+ * @pps_4:
+ * PPS4[1:0] -These are the most significant 2 bits of
+ * compressed BPP bits_per_pixel[9:0] syntax element.
+ * PPS4[2] - vbr_enable: 0 = VBR disabled, 1 = VBR enabled
+ * PPS4[3] - simple_422: Indicates if decoder drops samples to
+ * reconstruct the 4:2:2 picture.
+ * PPS4[4] - Convert_rgb: Indicates if DSC color space conversion is
+ * active.
+ * PPS4[5] - blobk_pred_enable: Indicates if BP is used to code any
+ * groups in picture
+ * PPS4[7:6] - Reseved bits
+ */
+ u8 pps_4;
+ /**
+ * @bits_per_pixel_low:
+ * PPS5[7:0] - This indicates the lower significant 8 bits of
+ * the compressed BPP bits_per_pixel[9:0] element.
+ */
+ u8 bits_per_pixel_low;
+ /**
+ * @pic_height:
+ * PPS6[7:0], PPS7[7:0] -pic_height: Specifies the number of pixel rows
+ * within the raster.
+ */
+ __be16 pic_height;
+ /**
+ * @pic_width:
+ * PPS8[7:0], PPS9[7:0] - pic_width: Number of pixel columns within
+ * the raster.
+ */
+ __be16 pic_width;
+ /**
+ * @slice_height:
+ * PPS10[7:0], PPS11[7:0] - Slice height in units of pixels.
+ */
+ __be16 slice_height;
+ /**
+ * @slice_width:
+ * PPS12[7:0], PPS13[7:0] - Slice width in terms of pixels.
+ */
+ __be16 slice_width;
+ /**
+ * @chunk_size:
+ * PPS14[7:0], PPS15[7:0] - Size in units of bytes of the chunks
+ * that are used for slice multiplexing.
+ */
+ __be16 chunk_size;
+ /**
+ * @initial_xmit_delay_high:
+ * PPS16[1:0] - Most Significant two bits of initial transmission delay.
+ * It specifies the number of pixel times that the encoder waits before
+ * transmitting data from its rate buffer.
+ * PPS16[7:2] - Reserved
+ */
+ u8 initial_xmit_delay_high;
+ /**
+ * @initial_xmit_delay_low:
+ * PPS17[7:0] - Least significant 8 bits of initial transmission delay.
+ */
+ u8 initial_xmit_delay_low;
+ /**
+ * @initial_dec_delay:
+ *
+ * PPS18[7:0], PPS19[7:0] - Initial decoding delay which is the number
+ * of pixel times that the decoder accumulates data in its rate buffer
+ * before starting to decode and output pixels.
+ */
+ __be16 initial_dec_delay;
+ /**
+ * @pps20_reserved:
+ *
+ * PPS20[7:0] - Reserved
+ */
+ u8 pps20_reserved;
+ /**
+ * @initial_scale_value:
+ * PPS21[5:0] - Initial rcXformScale factor used at beginning
+ * of a slice.
+ * PPS21[7:6] - Reserved
+ */
+ u8 initial_scale_value;
+ /**
+ * @scale_increment_interval:
+ * PPS22[7:0], PPS23[7:0] - Number of group times between incrementing
+ * the rcXformScale factor at end of a slice.
+ */
+ __be16 scale_increment_interval;
+ /**
+ * @scale_decrement_interval_high:
+ * PPS24[3:0] - Higher 4 bits indicating number of group times between
+ * decrementing the rcXformScale factor at beginning of a slice.
+ * PPS24[7:4] - Reserved
+ */
+ u8 scale_decrement_interval_high;
+ /**
+ * @scale_decrement_interval_low:
+ * PPS25[7:0] - Lower 8 bits of scale decrement interval
+ */
+ u8 scale_decrement_interval_low;
+ /**
+ * @pps26_reserved:
+ * PPS26[7:0]
+ */
+ u8 pps26_reserved;
+ /**
+ * @first_line_bpg_offset:
+ * PPS27[4:0] - Number of additional bits that are allocated
+ * for each group on first line of a slice.
+ * PPS27[7:5] - Reserved
+ */
+ u8 first_line_bpg_offset;
+ /**
+ * @nfl_bpg_offset:
+ * PPS28[7:0], PPS29[7:0] - Number of bits including frac bits
+ * deallocated for each group for groups after the first line of slice.
+ */
+ __be16 nfl_bpg_offset;
+ /**
+ * @slice_bpg_offset:
+ * PPS30, PPS31[7:0] - Number of bits that are deallocated for each
+ * group to enforce the slice constraint.
+ */
+ __be16 slice_bpg_offset;
+ /**
+ * @initial_offset:
+ * PPS32,33[7:0] - Initial value for rcXformOffset
+ */
+ __be16 initial_offset;
+ /**
+ * @final_offset:
+ * PPS34,35[7:0] - Maximum end-of-slice value for rcXformOffset
+ */
+ __be16 final_offset;
+ /**
+ * @flatness_min_qp:
+ * PPS36[4:0] - Minimum QP at which flatness is signaled and
+ * flatness QP adjustment is made.
+ * PPS36[7:5] - Reserved
+ */
+ u8 flatness_min_qp;
+ /**
+ * @flatness_max_qp:
+ * PPS37[4:0] - Max QP at which flatness is signalled and
+ * the flatness adjustment is made.
+ * PPS37[7:5] - Reserved
+ */
+ u8 flatness_max_qp;
+ /**
+ * @rc_model_size:
+ * PPS38,39[7:0] - Number of bits within RC Model.
+ */
+ __be16 rc_model_size;
+ /**
+ * @rc_edge_factor:
+ * PPS40[3:0] - Ratio of current activity vs, previous
+ * activity to determine presence of edge.
+ * PPS40[7:4] - Reserved
+ */
+ u8 rc_edge_factor;
+ /**
+ * @rc_quant_incr_limit0:
+ * PPS41[4:0] - QP threshold used in short term RC
+ * PPS41[7:5] - Reserved
+ */
+ u8 rc_quant_incr_limit0;
+ /**
+ * @rc_quant_incr_limit1:
+ * PPS42[4:0] - QP threshold used in short term RC
+ * PPS42[7:5] - Reserved
+ */
+ u8 rc_quant_incr_limit1;
+ /**
+ * @rc_tgt_offset:
+ * PPS43[3:0] - Lower end of the variability range around the target
+ * bits per group that is allowed by short term RC.
+ * PPS43[7:4]- Upper end of the variability range around the target
+ * bits per group that i allowed by short term rc.
+ */
+ u8 rc_tgt_offset;
+ /**
+ * @rc_buf_thresh:
+ * PPS44[7:0] - PPS57[7:0] - Specifies the thresholds in RC model for
+ * the 15 ranges defined by 14 thresholds.
+ */
+ u8 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+ /**
+ * @rc_range_parameters:
+ * PPS58[7:0] - PPS87[7:0]
+ * Parameters that correspond to each of the 15 ranges.
+ */
+ __be16 rc_range_parameters[DSC_NUM_BUF_RANGES];
+ /**
+ * @native_422_420:
+ * PPS88[0] - 0 = Native 4:2:2 not used
+ * 1 = Native 4:2:2 used
+ * PPS88[1] - 0 = Native 4:2:0 not use
+ * 1 = Native 4:2:0 used
+ * PPS88[7:2] - Reserved 6 bits
+ */
+ u8 native_422_420;
+ /**
+ * @second_line_bpg_offset:
+ * PPS89[4:0] - Additional bits/group budget for the
+ * second line of a slice in Native 4:2:0 mode.
+ * Set to 0 if DSC minor version is 1 or native420 is 0.
+ * PPS89[7:5] - Reserved
+ */
+ u8 second_line_bpg_offset;
+ /**
+ * @nsl_bpg_offset:
+ * PPS90[7:0], PPS91[7:0] - Number of bits that are deallocated
+ * for each group that is not in the second line of a slice.
+ */
+ __be16 nsl_bpg_offset;
+ /**
+ * @second_line_offset_adj:
+ * PPS92[7:0], PPS93[7:0] - Used as offset adjustment for the second
+ * line in Native 4:2:0 mode.
+ */
+ __be16 second_line_offset_adj;
+ /**
+ * @pps_long_94_reserved:
+ * PPS 94, 95, 96, 97 - Reserved
+ */
+ u32 pps_long_94_reserved;
+ /**
+ * @pps_long_98_reserved:
+ * PPS 98, 99, 100, 101 - Reserved
+ */
+ u32 pps_long_98_reserved;
+ /**
+ * @pps_long_102_reserved:
+ * PPS 102, 103, 104, 105 - Reserved
+ */
+ u32 pps_long_102_reserved;
+ /**
+ * @pps_long_106_reserved:
+ * PPS 106, 107, 108, 109 - reserved
+ */
+ u32 pps_long_106_reserved;
+ /**
+ * @pps_long_110_reserved:
+ * PPS 110, 111, 112, 113 - reserved
+ */
+ u32 pps_long_110_reserved;
+ /**
+ * @pps_long_114_reserved:
+ * PPS 114 - 117 - reserved
+ */
+ u32 pps_long_114_reserved;
+ /**
+ * @pps_long_118_reserved:
+ * PPS 118 - 121 - reserved
+ */
+ u32 pps_long_118_reserved;
+ /**
+ * @pps_long_122_reserved:
+ * PPS 122- 125 - reserved
+ */
+ u32 pps_long_122_reserved;
+ /**
+ * @pps_short_126_reserved:
+ * PPS 126, 127 - reserved
+ */
+ __be16 pps_short_126_reserved;
+} __packed;
+
+/**
+ * struct drm_dsc_pps_infoframe - DSC infoframe carrying the Picture Parameter
+ * Set Metadata
+ *
+ * This structure represents the DSC PPS infoframe required to send the Picture
+ * Parameter Set metadata required before enabling VESA Display Stream
+ * Compression. This is based on the DP Secondary Data Packet structure and
+ * comprises of SDP Header as defined in drm_dp_helper.h and PPS payload.
+ *
+ * @pps_header: Header for PPS as per DP SDP header format
+ * @pps_payload: PPS payload fields as per DSC specification Table 4-1
+ */
+struct drm_dsc_pps_infoframe {
+ struct dp_sdp_header pps_header;
+ struct drm_dsc_picture_parameter_set pps_payload;
+} __packed;
+
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp);
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+ const struct drm_dsc_config *dsc_cfg);
+
+#endif /* _DRM_DSC_H_ */
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 4a65f0d155b0..8dbbe1eece1b 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -26,8 +26,6 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
-void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
- bool state);
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 26485acc51d7..84ac79219e4c 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -164,14 +164,14 @@ struct drm_file {
* See also the :ref:`section on primary nodes and authentication
* <drm_primary_node>`.
*/
- unsigned authenticated :1;
+ bool authenticated;
/**
* @stereo_allowed:
*
* True when the client has asked us to expose stereo 3D mode flags.
*/
- unsigned stereo_allowed :1;
+ bool stereo_allowed;
/**
* @universal_planes:
@@ -179,10 +179,10 @@ struct drm_file {
* True if client understands CRTC primary planes and cursor planes
* in the plane list. Automatically set when @atomic is set.
*/
- unsigned universal_planes:1;
+ bool universal_planes;
/** @atomic: True if client understands atomic properties. */
- unsigned atomic:1;
+ bool atomic;
/**
* @aspect_ratio_allowed:
@@ -190,14 +190,14 @@ struct drm_file {
* True, if client can handle picture aspect ratios, and has requested
* to pass this information along with the mode.
*/
- unsigned aspect_ratio_allowed:1;
+ bool aspect_ratio_allowed;
/**
* @writeback_connectors:
*
* True if client understands writeback connectors
*/
- unsigned writeback_connectors:1;
+ bool writeback_connectors;
/**
* @is_master:
@@ -208,7 +208,7 @@ struct drm_file {
* See also the :ref:`section on primary nodes and authentication
* <drm_primary_node>`.
*/
- unsigned is_master:1;
+ bool is_master;
/**
* @master:
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 865ef60c17af..bcb389f04618 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -52,25 +52,86 @@ struct drm_mode_fb_cmd2;
/**
* struct drm_format_info - information about a DRM format
- * @format: 4CC format identifier (DRM_FORMAT_*)
- * @depth: Color depth (number of bits per pixel excluding padding bits),
- * valid for a subset of RGB formats only. This is a legacy field, do not
- * use in new code and set to 0 for new formats.
- * @num_planes: Number of color planes (1 to 3)
- * @cpp: Number of bytes per pixel (per plane)
- * @hsub: Horizontal chroma subsampling factor
- * @vsub: Vertical chroma subsampling factor
- * @has_alpha: Does the format embeds an alpha component?
- * @is_yuv: Is it a YUV format?
*/
struct drm_format_info {
+ /** @format: 4CC format identifier (DRM_FORMAT_*) */
u32 format;
+
+ /**
+ * @depth:
+ *
+ * Color depth (number of bits per pixel excluding padding bits),
+ * valid for a subset of RGB formats only. This is a legacy field, do
+ * not use in new code and set to 0 for new formats.
+ */
u8 depth;
+
+ /** @num_planes: Number of color planes (1 to 3) */
u8 num_planes;
- u8 cpp[3];
+
+ union {
+ /**
+ * @cpp:
+ *
+ * Number of bytes per pixel (per plane), this is aliased with
+ * @char_per_block. It is deprecated in favour of using the
+ * triplet @char_per_block, @block_w, @block_h for better
+ * describing the pixel format.
+ */
+ u8 cpp[3];
+
+ /**
+ * @char_per_block:
+ *
+ * Number of bytes per block (per plane), where blocks are
+ * defined as a rectangle of pixels which are stored next to
+ * each other in a byte aligned memory region. Together with
+ * @block_w and @block_h this is used to properly describe tiles
+ * in tiled formats or to describe groups of pixels in packed
+ * formats for which the memory needed for a single pixel is not
+ * byte aligned.
+ *
+ * @cpp has been kept for historical reasons because there are
+ * a lot of places in drivers where it's used. In drm core for
+ * generic code paths the preferred way is to use
+ * @char_per_block, drm_format_info_block_width() and
+ * drm_format_info_block_height() which allows handling both
+ * block and non-block formats in the same way.
+ *
+ * For formats that are intended to be used only with non-linear
+ * modifiers both @cpp and @char_per_block must be 0 in the
+ * generic format table. Drivers could supply accurate
+ * information from their drm_mode_config.get_format_info hook
+ * if they want the core to be validating the pitch.
+ */
+ u8 char_per_block[3];
+ };
+
+ /**
+ * @block_w:
+ *
+ * Block width in pixels, this is intended to be accessed through
+ * drm_format_info_block_width()
+ */
+ u8 block_w[3];
+
+ /**
+ * @block_h:
+ *
+ * Block height in pixels, this is intended to be accessed through
+ * drm_format_info_block_height()
+ */
+ u8 block_h[3];
+
+ /** @hsub: Horizontal chroma subsampling factor */
u8 hsub;
+ /** @vsub: Vertical chroma subsampling factor */
u8 vsub;
+
+ /** @has_alpha: Does the format embeds an alpha component? */
bool has_alpha;
+
+ /** @is_yuv: Is it a YUV format? */
bool is_yuv;
};
@@ -96,6 +157,12 @@ int drm_format_horz_chroma_subsampling(uint32_t format);
int drm_format_vert_chroma_subsampling(uint32_t format);
int drm_format_plane_width(int width, uint32_t format, int plane);
int drm_format_plane_height(int height, uint32_t format, int plane);
+unsigned int drm_format_info_block_width(const struct drm_format_info *info,
+ int plane);
+unsigned int drm_format_info_block_height(const struct drm_format_info *info,
+ int plane);
+uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
+ int plane, unsigned int buffer_width);
const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index c50502c656e5..c94acedfb08e 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -241,30 +241,6 @@ static inline void drm_framebuffer_put(struct drm_framebuffer *fb)
}
/**
- * drm_framebuffer_reference - acquire a framebuffer reference
- * @fb: DRM framebuffer
- *
- * This is a compatibility alias for drm_framebuffer_get() and should not be
- * used by new code.
- */
-static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
-{
- drm_framebuffer_get(fb);
-}
-
-/**
- * drm_framebuffer_unreference - release a framebuffer reference
- * @fb: DRM framebuffer
- *
- * This is a compatibility alias for drm_framebuffer_put() and should not be
- * used by new code.
- */
-static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
- drm_framebuffer_put(fb);
-}
-
-/**
* drm_framebuffer_read_refcount - read the framebuffer reference count.
* @fb: framebuffer
*
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 3583b98a1718..c95727425284 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -38,6 +38,121 @@
#include <drm/drm_vma_manager.h>
+struct drm_gem_object;
+
+/**
+ * struct drm_gem_object_funcs - GEM object functions
+ */
+struct drm_gem_object_funcs {
+ /**
+ * @free:
+ *
+ * Deconstructor for drm_gem_objects.
+ *
+ * This callback is mandatory.
+ */
+ void (*free)(struct drm_gem_object *obj);
+
+ /**
+ * @open:
+ *
+ * Called upon GEM handle creation.
+ *
+ * This callback is optional.
+ */
+ int (*open)(struct drm_gem_object *obj, struct drm_file *file);
+
+ /**
+ * @close:
+ *
+ * Called upon GEM handle release.
+ *
+ * This callback is optional.
+ */
+ void (*close)(struct drm_gem_object *obj, struct drm_file *file);
+
+ /**
+ * @print_info:
+ *
+ * If driver subclasses struct &drm_gem_object, it can implement this
+ * optional hook for printing additional driver specific info.
+ *
+ * drm_printf_indent() should be used in the callback passing it the
+ * indent argument.
+ *
+ * This callback is called from drm_gem_print_info().
+ *
+ * This callback is optional.
+ */
+ void (*print_info)(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj);
+
+ /**
+ * @export:
+ *
+ * Export backing buffer as a &dma_buf.
+ * If this is not set drm_gem_prime_export() is used.
+ *
+ * This callback is optional.
+ */
+ struct dma_buf *(*export)(struct drm_gem_object *obj, int flags);
+
+ /**
+ * @pin:
+ *
+ * Pin backing buffer in memory.
+ *
+ * This callback is optional.
+ */
+ int (*pin)(struct drm_gem_object *obj);
+
+ /**
+ * @unpin:
+ *
+ * Unpin backing buffer.
+ *
+ * This callback is optional.
+ */
+ void (*unpin)(struct drm_gem_object *obj);
+
+ /**
+ * @get_sg_table:
+ *
+ * Returns a Scatter-Gather table representation of the buffer.
+ * Used when exporting a buffer.
+ *
+ * This callback is mandatory if buffer export is supported.
+ */
+ struct sg_table *(*get_sg_table)(struct drm_gem_object *obj);
+
+ /**
+ * @vmap:
+ *
+ * Returns a virtual address for the buffer.
+ *
+ * This callback is optional.
+ */
+ void *(*vmap)(struct drm_gem_object *obj);
+
+ /**
+ * @vunmap:
+ *
+ * Releases the the address previously returned by @vmap.
+ *
+ * This callback is optional.
+ */
+ void (*vunmap)(struct drm_gem_object *obj, void *vaddr);
+
+ /**
+ * @vm_ops:
+ *
+ * Virtual memory operations used with mmap.
+ *
+ * This is optional but necessary for mmap support.
+ */
+ const struct vm_operations_struct *vm_ops;
+};
+
/**
* struct drm_gem_object - GEM buffer object
*
@@ -146,6 +261,17 @@ struct drm_gem_object {
* simply leave it as NULL.
*/
struct dma_buf_attachment *import_attach;
+
+ /**
+ * @funcs:
+ *
+ * Optional GEM object functions. If this is set, it will be used instead of the
+ * corresponding &drm_driver GEM callbacks.
+ *
+ * New drivers should use this.
+ *
+ */
+ const struct drm_gem_object_funcs *funcs;
};
/**
@@ -222,56 +348,6 @@ __drm_gem_object_put(struct drm_gem_object *obj)
void drm_gem_object_put_unlocked(struct drm_gem_object *obj);
void drm_gem_object_put(struct drm_gem_object *obj);
-/**
- * drm_gem_object_reference - acquire a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_get() and should not be
- * used by new code.
- */
-static inline void drm_gem_object_reference(struct drm_gem_object *obj)
-{
- drm_gem_object_get(obj);
-}
-
-/**
- * __drm_gem_object_unreference - raw function to release a GEM buffer object
- * reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for __drm_gem_object_put() and should not be
- * used by new code.
- */
-static inline void __drm_gem_object_unreference(struct drm_gem_object *obj)
-{
- __drm_gem_object_put(obj);
-}
-
-/**
- * drm_gem_object_unreference_unlocked - release a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_put_unlocked() and should
- * not be used by new code.
- */
-static inline void
-drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
-{
- drm_gem_object_put_unlocked(obj);
-}
-
-/**
- * drm_gem_object_unreference - release a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_put() and should not be
- * used by new code.
- */
-static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
-{
- drm_gem_object_put(obj);
-}
-
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
@@ -293,4 +369,9 @@ int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle);
+int drm_gem_pin(struct drm_gem_object *obj);
+void drm_gem_unpin(struct drm_gem_object *obj);
+void *drm_gem_vmap(struct drm_gem_object *obj);
+void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr);
+
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 19777145cf8e..07c504940ba1 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -103,4 +103,28 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj);
void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+struct drm_gem_object *
+drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size);
+
+/**
+ * DRM_GEM_CMA_VMAP_DRIVER_OPS - CMA GEM driver operations ensuring a virtual
+ * address on the buffer
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ */
+#define DRM_GEM_CMA_VMAP_DRIVER_OPS \
+ .gem_create_object = drm_cma_gem_create_object_default_funcs, \
+ .dumb_create = drm_gem_cma_dumb_create, \
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \
+ .gem_prime_mmap = drm_gem_prime_mmap
+
+struct drm_gem_object *
+drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_global.h b/include/drm/drm_global.h
deleted file mode 100644
index 3a830602a2e4..000000000000
--- a/include/drm/drm_global.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#ifndef _DRM_GLOBAL_H_
-#define _DRM_GLOBAL_H_
-enum drm_global_types {
- DRM_GLOBAL_TTM_MEM = 0,
- DRM_GLOBAL_TTM_BO,
- DRM_GLOBAL_TTM_OBJECT,
- DRM_GLOBAL_NUM
-};
-
-struct drm_global_reference {
- enum drm_global_types global_type;
- size_t size;
- void *object;
- int (*init) (struct drm_global_reference *);
- void (*release) (struct drm_global_reference *);
-};
-
-void drm_global_init(void);
-void drm_global_release(void);
-int drm_global_item_ref(struct drm_global_reference *ref);
-void drm_global_item_unref(struct drm_global_reference *ref);
-
-#endif
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 98e63d870139..a6de09c5e47f 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -38,4 +38,216 @@
#define DRM_HDCP_DDC_BSTATUS 0x41
#define DRM_HDCP_DDC_KSV_FIFO 0x43
+#define DRM_HDCP_1_4_SRM_ID 0x8
+#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3
+#define DRM_HDCP_1_4_DCP_SIG_SIZE 40
+
+/* Protocol message definition for HDCP2.2 specification */
+/*
+ * Protected content streams are classified into 2 types:
+ * - Type0: Can be transmitted with HDCP 1.4+
+ * - Type1: Can be transmitted with HDCP 2.2+
+ */
+#define HDCP_STREAM_TYPE0 0x00
+#define HDCP_STREAM_TYPE1 0x01
+
+/* HDCP2.2 Msg IDs */
+#define HDCP_2_2_NULL_MSG 1
+#define HDCP_2_2_AKE_INIT 2
+#define HDCP_2_2_AKE_SEND_CERT 3
+#define HDCP_2_2_AKE_NO_STORED_KM 4
+#define HDCP_2_2_AKE_STORED_KM 5
+#define HDCP_2_2_AKE_SEND_HPRIME 7
+#define HDCP_2_2_AKE_SEND_PAIRING_INFO 8
+#define HDCP_2_2_LC_INIT 9
+#define HDCP_2_2_LC_SEND_LPRIME 10
+#define HDCP_2_2_SKE_SEND_EKS 11
+#define HDCP_2_2_REP_SEND_RECVID_LIST 12
+#define HDCP_2_2_REP_SEND_ACK 15
+#define HDCP_2_2_REP_STREAM_MANAGE 16
+#define HDCP_2_2_REP_STREAM_READY 17
+#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
+
+#define HDCP_2_2_RTX_LEN 8
+#define HDCP_2_2_RRX_LEN 8
+
+#define HDCP_2_2_K_PUB_RX_MOD_N_LEN 128
+#define HDCP_2_2_K_PUB_RX_EXP_E_LEN 3
+#define HDCP_2_2_K_PUB_RX_LEN (HDCP_2_2_K_PUB_RX_MOD_N_LEN + \
+ HDCP_2_2_K_PUB_RX_EXP_E_LEN)
+
+#define HDCP_2_2_DCP_LLC_SIG_LEN 384
+
+#define HDCP_2_2_E_KPUB_KM_LEN 128
+#define HDCP_2_2_E_KH_KM_M_LEN (16 + 16)
+#define HDCP_2_2_H_PRIME_LEN 32
+#define HDCP_2_2_E_KH_KM_LEN 16
+#define HDCP_2_2_RN_LEN 8
+#define HDCP_2_2_L_PRIME_LEN 32
+#define HDCP_2_2_E_DKEY_KS_LEN 16
+#define HDCP_2_2_RIV_LEN 8
+#define HDCP_2_2_SEQ_NUM_LEN 3
+#define HDCP_2_2_V_PRIME_HALF_LEN (HDCP_2_2_L_PRIME_LEN / 2)
+#define HDCP_2_2_RECEIVER_ID_LEN DRM_HDCP_KSV_LEN
+#define HDCP_2_2_MAX_DEVICE_COUNT 31
+#define HDCP_2_2_RECEIVER_IDS_MAX_LEN (HDCP_2_2_RECEIVER_ID_LEN * \
+ HDCP_2_2_MAX_DEVICE_COUNT)
+#define HDCP_2_2_MPRIME_LEN 32
+
+/* Following Macros take a byte at a time for bit(s) masking */
+/*
+ * TODO: This has to be changed for DP MST, as multiple stream on
+ * same port is possible.
+ * For HDCP2.2 on HDMI and DP SST this value is always 1.
+ */
+#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 1
+#define HDCP_2_2_TXCAP_MASK_LEN 2
+#define HDCP_2_2_RXCAPS_LEN 3
+#define HDCP_2_2_RX_REPEATER(x) ((x) & BIT(0))
+#define HDCP_2_2_DP_HDCP_CAPABLE(x) ((x) & BIT(1))
+#define HDCP_2_2_RXINFO_LEN 2
+
+/* HDCP1.x compliant device in downstream */
+#define HDCP_2_2_HDCP1_DEVICE_CONNECTED(x) ((x) & BIT(0))
+
+/* HDCP2.0 Compliant repeater in downstream */
+#define HDCP_2_2_HDCP_2_0_REP_CONNECTED(x) ((x) & BIT(1))
+#define HDCP_2_2_MAX_CASCADE_EXCEEDED(x) ((x) & BIT(2))
+#define HDCP_2_2_MAX_DEVS_EXCEEDED(x) ((x) & BIT(3))
+#define HDCP_2_2_DEV_COUNT_LO(x) (((x) & (0xF << 4)) >> 4)
+#define HDCP_2_2_DEV_COUNT_HI(x) ((x) & BIT(0))
+#define HDCP_2_2_DEPTH(x) (((x) & (0x7 << 1)) >> 1)
+
+struct hdcp2_cert_rx {
+ u8 receiver_id[HDCP_2_2_RECEIVER_ID_LEN];
+ u8 kpub_rx[HDCP_2_2_K_PUB_RX_LEN];
+ u8 reserved[2];
+ u8 dcp_signature[HDCP_2_2_DCP_LLC_SIG_LEN];
+} __packed;
+
+struct hdcp2_streamid_type {
+ u8 stream_id;
+ u8 stream_type;
+} __packed;
+
+/*
+ * The TxCaps field specified in the HDCP HDMI, DP specs
+ * This field is big endian as specified in the errata.
+ */
+struct hdcp2_tx_caps {
+ /* Transmitter must set this to 0x2 */
+ u8 version;
+
+ /* Reserved for HDCP and DP Spec. Read as Zero */
+ u8 tx_cap_mask[HDCP_2_2_TXCAP_MASK_LEN];
+} __packed;
+
+/* Main structures for HDCP2.2 protocol communication */
+struct hdcp2_ake_init {
+ u8 msg_id;
+ u8 r_tx[HDCP_2_2_RTX_LEN];
+ struct hdcp2_tx_caps tx_caps;
+} __packed;
+
+struct hdcp2_ake_send_cert {
+ u8 msg_id;
+ struct hdcp2_cert_rx cert_rx;
+ u8 r_rx[HDCP_2_2_RRX_LEN];
+ u8 rx_caps[HDCP_2_2_RXCAPS_LEN];
+} __packed;
+
+struct hdcp2_ake_no_stored_km {
+ u8 msg_id;
+ u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
+} __packed;
+
+struct hdcp2_ake_stored_km {
+ u8 msg_id;
+ u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
+} __packed;
+
+struct hdcp2_ake_send_hprime {
+ u8 msg_id;
+ u8 h_prime[HDCP_2_2_H_PRIME_LEN];
+} __packed;
+
+struct hdcp2_ake_send_pairing_info {
+ u8 msg_id;
+ u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN];
+} __packed;
+
+struct hdcp2_lc_init {
+ u8 msg_id;
+ u8 r_n[HDCP_2_2_RN_LEN];
+} __packed;
+
+struct hdcp2_lc_send_lprime {
+ u8 msg_id;
+ u8 l_prime[HDCP_2_2_L_PRIME_LEN];
+} __packed;
+
+struct hdcp2_ske_send_eks {
+ u8 msg_id;
+ u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
+ u8 riv[HDCP_2_2_RIV_LEN];
+} __packed;
+
+struct hdcp2_rep_send_receiverid_list {
+ u8 msg_id;
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
+ u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
+ u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
+} __packed;
+
+struct hdcp2_rep_send_ack {
+ u8 msg_id;
+ u8 v[HDCP_2_2_V_PRIME_HALF_LEN];
+} __packed;
+
+struct hdcp2_rep_stream_manage {
+ u8 msg_id;
+ u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
+ __be16 k;
+ struct hdcp2_streamid_type streams[HDCP_2_2_MAX_CONTENT_STREAMS_CNT];
+} __packed;
+
+struct hdcp2_rep_stream_ready {
+ u8 msg_id;
+ u8 m_prime[HDCP_2_2_MPRIME_LEN];
+} __packed;
+
+struct hdcp2_dp_errata_stream_type {
+ u8 msg_id;
+ u8 stream_type;
+} __packed;
+
+/* HDCP2.2 TIMEOUTs in mSec */
+#define HDCP_2_2_CERT_TIMEOUT_MS 100
+#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000
+#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS 200
+#define HDCP_2_2_PAIRING_TIMEOUT_MS 200
+#define HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS 20
+#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 7
+#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS 3000
+#define HDCP_2_2_STREAM_READY_TIMEOUT_MS 100
+
+/* HDMI HDCP2.2 Register Offsets */
+#define HDCP_2_2_HDMI_REG_VER_OFFSET 0x50
+#define HDCP_2_2_HDMI_REG_WR_MSG_OFFSET 0x60
+#define HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET 0x70
+#define HDCP_2_2_HDMI_REG_RD_MSG_OFFSET 0x80
+#define HDCP_2_2_HDMI_REG_DBG_OFFSET 0xC0
+
+#define HDCP_2_2_HDMI_SUPPORT_MASK BIT(2)
+#define HDCP_2_2_RX_CAPS_VERSION_VAL 0x02
+#define HDCP_2_2_SEQ_NUM_MAX 0xFFFFFF
+#define HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN 200
+
+/* Below macros take a byte at a time and mask the bit(s) */
+#define HDCP_2_2_HDMI_RXSTATUS_LEN 2
+#define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x) ((x) & 0x3)
+#define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2))
+#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
+
#endif
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 4fef19064b0f..491528f48cfb 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -168,6 +168,12 @@ struct mipi_dsi_device_info {
* @format: pixel format for video mode
* @lanes: number of active data lanes
* @mode_flags: DSI operation mode related flags
+ * @hs_rate: maximum lane frequency for high speed mode in hertz, this should
+ * be set to the real limits of the hardware, zero is only accepted for
+ * legacy drivers
+ * @lp_rate: maximum lane frequency for low power mode in hertz, this should
+ * be set to the real limits of the hardware, zero is only accepted for
+ * legacy drivers
*/
struct mipi_dsi_device {
struct mipi_dsi_host *host;
@@ -178,6 +184,8 @@ struct mipi_dsi_device {
unsigned int lanes;
enum mipi_dsi_pixel_format format;
unsigned long mode_flags;
+ unsigned long hs_rate;
+ unsigned long lp_rate;
};
#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 928e4172a0bb..572274ccbec7 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -52,6 +52,12 @@ struct drm_mode_config_funcs {
* requested metadata, but most of that is left to the driver. See
* &struct drm_mode_fb_cmd2 for details.
*
+ * To validate the pixel format and modifier drivers can use
+ * drm_any_plane_has_format() to make sure at least one plane supports
+ * the requested values. Note that the driver must first determine the
+ * actual modifier used if the request doesn't have it specified,
+ * ie. when (@mode_cmd->flags & DRM_MODE_FB_MODIFIERS) == 0.
+ *
* If the parameters are deemed valid and the backing storage objects in
* the underlying memory manager all exist, then the driver allocates
* a new &drm_framebuffer structure, subclassed to contain
@@ -628,6 +634,15 @@ struct drm_mode_config {
*/
struct drm_property *prop_crtc_id;
/**
+ * @prop_fb_damage_clips: Optional plane property to mark damaged
+ * regions on the plane in framebuffer coordinates of the framebuffer
+ * attached to the plane.
+ *
+ * The layout of blob data is simply an array of &drm_mode_rect. Unlike
+ * plane src coordinates, damage clips are not in 16.16 fixed point.
+ */
+ struct drm_property *prop_fb_damage_clips;
+ /**
* @prop_active: Default atomic CRTC property to control the active
* state, which is the simplified implementation for DPMS in atomic
* drivers.
@@ -639,6 +654,11 @@ struct drm_mode_config {
* connectors must be of and active must be set to disabled, too.
*/
struct drm_property *prop_mode_id;
+ /**
+ * @prop_vrr_enabled: Default atomic CRTC property to indicate
+ * whether variable refresh rate should be enabled on the CRTC.
+ */
+ struct drm_property *prop_vrr_enabled;
/**
* @dvi_i_subconnector_property: Optional DVI-I property to
@@ -809,6 +829,13 @@ struct drm_mode_config {
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
+
+ /**
+ * @quirk_addfb_prefer_xbgr_30bpp:
+ *
+ * Special hack for legacy ADDFB to keep nouveau userspace happy. Should
+ * only ever be set by the nouveau kernel driver.
+ */
bool quirk_addfb_prefer_xbgr_30bpp;
/**
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index a685d1bb21f2..a308f2d6496f 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -130,4 +130,63 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
+/**
+ * DRM_MODESET_LOCK_ALL_BEGIN - Helper to acquire modeset locks
+ * @dev: drm device
+ * @ctx: local modeset acquire context, will be dereferenced
+ * @flags: DRM_MODESET_ACQUIRE_* flags to pass to drm_modeset_acquire_init()
+ * @ret: local ret/err/etc variable to track error status
+ *
+ * Use these macros to simplify grabbing all modeset locks using a local
+ * context. This has the advantage of reducing boilerplate, but also properly
+ * checking return values where appropriate.
+ *
+ * Any code run between BEGIN and END will be holding the modeset locks.
+ *
+ * This must be paired with DRM_MODESET_LOCK_ALL_END(). We will jump back and
+ * forth between the labels on deadlock and error conditions.
+ *
+ * Drivers can acquire additional modeset locks. If any lock acquisition
+ * fails, the control flow needs to jump to DRM_MODESET_LOCK_ALL_END() with
+ * the @ret parameter containing the return value of drm_modeset_lock().
+ *
+ * Returns:
+ * The only possible value of ret immediately after DRM_MODESET_LOCK_ALL_BEGIN()
+ * is 0, so no error checking is necessary
+ */
+#define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \
+ drm_modeset_acquire_init(&ctx, flags); \
+modeset_lock_retry: \
+ ret = drm_modeset_lock_all_ctx(dev, &ctx); \
+ if (ret) \
+ goto modeset_lock_fail;
+
+/**
+ * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks
+ * @ctx: local modeset acquire context, will be dereferenced
+ * @ret: local ret/err/etc variable to track error status
+ *
+ * The other side of DRM_MODESET_LOCK_ALL_BEGIN(). It will bounce back to BEGIN
+ * if ret is -EDEADLK.
+ *
+ * It's important that you use the same ret variable for begin and end so
+ * deadlock conditions are properly handled.
+ *
+ * Returns:
+ * ret will be untouched unless it is -EDEADLK on entry. That means that if you
+ * successfully acquire the locks, ret will be whatever your code sets it to. If
+ * there is a deadlock or other failure with acquire or backoff, ret will be set
+ * to that failure. In both of these cases the code between BEGIN/END will not
+ * be run, so the failure will reflect the inability to grab the locks.
+ */
+#define DRM_MODESET_LOCK_ALL_END(ctx, ret) \
+modeset_lock_fail: \
+ if (ret == -EDEADLK) { \
+ ret = drm_modeset_backoff(&ctx); \
+ if (!ret) \
+ goto modeset_lock_retry; \
+ } \
+ drm_modeset_drop_locks(&ctx); \
+ drm_modeset_acquire_fini(&ctx);
+
#endif /* DRM_MODESET_LOCK_H_ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 0a0834bef8bd..6078c700d9ba 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -173,6 +173,16 @@ struct drm_plane_state {
*/
enum drm_color_range color_range;
+ /**
+ * @fb_damage_clips:
+ *
+ * Blob representing damage (area in plane framebuffer that changed
+ * since last plane update) as an array of &drm_mode_rect in framebuffer
+ * coodinates of the attached framebuffer. Note that unlike plane src,
+ * damage clips are not in 16.16 fixed point.
+ */
+ struct drm_property_blob *fb_damage_clips;
+
/** @src: clipped source coordinates of the plane (in 16.16) */
/** @dst: clipped destination coordinates of the plane */
struct drm_rect src, dst;
@@ -798,5 +808,39 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
#define drm_for_each_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+bool drm_any_plane_has_format(struct drm_device *dev,
+ u32 format, u64 modifier);
+/**
+ * drm_plane_get_damage_clips_count - Returns damage clips count.
+ * @state: Plane state.
+ *
+ * Simple helper to get the number of &drm_mode_rect clips set by user-space
+ * during plane update.
+ *
+ * Return: Number of clips in plane fb_damage_clips blob property.
+ */
+static inline unsigned int
+drm_plane_get_damage_clips_count(const struct drm_plane_state *state)
+{
+ return (state && state->fb_damage_clips) ?
+ state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0;
+}
+
+/**
+ * drm_plane_get_damage_clips - Returns damage clips.
+ * @state: Plane state.
+ *
+ * Note that this function returns uapi type &drm_mode_rect. Drivers might
+ * instead be interested in internal &drm_rect which can be obtained by calling
+ * drm_helper_get_plane_damage_clips().
+ *
+ * Return: Damage clips in plane fb_damage_clips blob property.
+ */
+static inline struct drm_mode_rect *
+drm_plane_get_damage_clips(const struct drm_plane_state *state)
+{
+ return (struct drm_mode_rect *)((state && state->fb_damage_clips) ?
+ state->fb_damage_clips->data : NULL);
+}
#endif
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 26cee2934781..331ebd60b3a3 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -38,42 +38,7 @@
*/
#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
-int drm_plane_helper_check_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_rect *src,
- struct drm_rect *dest,
- unsigned int rotation,
- int min_scale,
- int max_scale,
- bool can_position,
- bool can_update_disabled,
- bool *visible);
-int drm_primary_helper_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx);
-int drm_primary_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx);
void drm_primary_helper_destroy(struct drm_plane *plane);
extern const struct drm_plane_funcs drm_primary_helper_funcs;
-int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx);
-int drm_plane_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx);
-
-/* For use by drm_crtc_helper.c */
-int drm_plane_helper_commit(struct drm_plane *plane,
- struct drm_plane_state *plane_state,
- struct drm_framebuffer *old_fb);
#endif
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index d716d653b096..b03731a3f079 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -70,6 +70,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
+int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
@@ -93,9 +94,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
-void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
-void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
- void *addr);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 5b9efff35d6d..4a0a80d658c7 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -153,7 +153,8 @@ struct drm_property {
* userspace. The kernel is allowed to update the value of these
* properties. This is generally used to expose probe state to
* userspace, e.g. the EDID, or the connector path property on DP
- * MST sinks.
+ * MST sinks. Kernel can update the value of an immutable property
+ * by calling drm_object_property_set_value().
*/
uint32_t flags;
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 425432b85a87..b1fe921f8e8f 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -131,10 +131,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
u32 handle);
-void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, u64 point,
+void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence);
int drm_syncobj_find_fence(struct drm_file *file_private,
- u32 handle, u64 point,
+ u32 handle, u64 point, u64 flags,
struct dma_fence **fence);
void drm_syncobj_free(struct kref *kref);
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index d25a9603ab57..6ad9630d4f48 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -95,7 +95,7 @@ struct drm_vblank_crtc {
/**
* @queue: Wait queue for vblank waiters.
*/
- wait_queue_head_t queue; /**< VBLANK wait queue */
+ wait_queue_head_t queue;
/**
* @disable_timer: Disable timer for the delayed vblank disabling
* hysteresis logic. Vblank disabling is controlled through the
@@ -107,7 +107,7 @@ struct drm_vblank_crtc {
/**
* @seqlock: Protect vblank count and time.
*/
- seqlock_t seqlock; /* protects vblank count and time */
+ seqlock_t seqlock;
/**
* @count: Current software vblank counter.
@@ -123,7 +123,7 @@ struct drm_vblank_crtc {
* this refcount reaches 0 can the hardware interrupt be disabled using
* @disable_timer.
*/
- atomic_t refcount; /* number of users of vblank interruptsper crtc */
+ atomic_t refcount;
/**
* @last: Protected by &drm_device.vbl_lock, used for wraparound handling.
*/
@@ -136,7 +136,7 @@ struct drm_vblank_crtc {
* call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly
* save and restore the vblank count.
*/
- unsigned int inmodeset; /* Display driver is setting mode */
+ unsigned int inmodeset;
/**
* @pipe: drm_crtc_index() of the &drm_crtc corresponding to this
* structure.
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index d87b268f1781..47e19796c450 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -264,6 +264,7 @@ struct drm_sched_backend_ops {
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
* @num_jobs: the number of jobs in queue in the scheduler
+ * @ready: marks if the underlying HW is ready to work
*
* One scheduler is implemented for each hardware ring.
*/
@@ -283,22 +284,26 @@ struct drm_gpu_scheduler {
spinlock_t job_list_lock;
int hang_limit;
atomic_t num_jobs;
+ bool ready;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
uint32_t hw_submission, unsigned hang_limit, long timeout,
const char *name);
+
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner);
+void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
struct drm_sched_job *job);
void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity);
+void drm_sched_fault(struct drm_gpu_scheduler *sched);
void drm_sched_job_kickout(struct drm_sched_job *s_job);
void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
@@ -326,4 +331,8 @@ struct drm_sched_fence *drm_sched_fence_create(
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining);
+
#endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index fd965ffbb92e..192667144693 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -365,16 +365,20 @@
INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
/* AML/KBL Y GT2 */
-#define INTEL_AML_GT2_IDS(info) \
+#define INTEL_AML_KBL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
+/* AML/CFL Y GT2 */
+#define INTEL_AML_CFL_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x87CA, info)
+
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
INTEL_KBL_GT3_IDS(info), \
INTEL_KBL_GT4_IDS(info), \
- INTEL_AML_GT2_IDS(info)
+ INTEL_AML_KBL_GT2_IDS(info)
/* CFL S */
#define INTEL_CFL_S_GT1_IDS(info) \
@@ -407,17 +411,17 @@
/* WHL/CFL U GT1 */
#define INTEL_WHL_U_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA1, info)
+ INTEL_VGA_DEVICE(0x3EA1, info), \
+ INTEL_VGA_DEVICE(0x3EA4, info)
/* WHL/CFL U GT2 */
#define INTEL_WHL_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA0, info)
+ INTEL_VGA_DEVICE(0x3EA0, info), \
+ INTEL_VGA_DEVICE(0x3EA3, info)
/* WHL/CFL U GT3 */
#define INTEL_WHL_U_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA2, info), \
- INTEL_VGA_DEVICE(0x3EA3, info), \
- INTEL_VGA_DEVICE(0x3EA4, info)
+ INTEL_VGA_DEVICE(0x3EA2, info)
#define INTEL_CFL_IDS(info) \
INTEL_CFL_S_GT1_IDS(info), \
@@ -427,7 +431,8 @@
INTEL_CFL_U_GT3_IDS(info), \
INTEL_WHL_U_GT1_IDS(info), \
INTEL_WHL_U_GT2_IDS(info), \
- INTEL_WHL_U_GT3_IDS(info)
+ INTEL_WHL_U_GT3_IDS(info), \
+ INTEL_AML_CFL_GT2_IDS(info)
/* CNL */
#define INTEL_CNL_IDS(info) \
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index fe9827d0ca8a..448aa5ea4722 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -10,10 +10,15 @@
#ifndef __LINUX_TINYDRM_H
#define __LINUX_TINYDRM_H
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <linux/mutex.h>
#include <drm/drm_simple_kms_helper.h>
+struct drm_clip_rect;
+struct drm_driver;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_framebuffer_funcs;
+
/**
* struct tinydrm_device - tinydrm device
*/
@@ -54,27 +59,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
}
/**
- * TINYDRM_GEM_DRIVER_OPS - default tinydrm gem operations
- *
- * This macro provides a shortcut for setting the tinydrm GEM operations in
- * the &drm_driver structure.
- */
-#define TINYDRM_GEM_DRIVER_OPS \
- .gem_free_object_unlocked = tinydrm_gem_cma_free_object, \
- .gem_print_info = drm_gem_cma_print_info, \
- .gem_vm_ops = &drm_gem_cma_vm_ops, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import = drm_gem_prime_import, \
- .gem_prime_export = drm_gem_prime_export, \
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, \
- .gem_prime_import_sg_table = tinydrm_gem_cma_prime_import_sg_table, \
- .gem_prime_vmap = drm_gem_cma_prime_vmap, \
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap, \
- .gem_prime_mmap = drm_gem_cma_prime_mmap, \
- .dumb_create = drm_gem_cma_dumb_create
-
-/**
* TINYDRM_MODE - tinydrm display mode
* @hd: Horizontal resolution, width
* @vd: Vertical resolution, height
@@ -97,11 +81,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
.type = DRM_MODE_TYPE_DRIVER, \
.clock = 1 /* pass validation */
-void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj);
-struct drm_gem_object *
-tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
const struct drm_framebuffer_funcs *fb_funcs,
struct drm_driver *driver);
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e4fee8e02559..1021106438b2 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -31,7 +31,6 @@
#define _TTM_BO_DRIVER_H_
#include <drm/drm_mm.h>
-#include <drm/drm_global.h>
#include <drm/drm_vma_manager.h>
#include <linux/workqueue.h>
#include <linux/fs.h>
@@ -385,15 +384,6 @@ struct ttm_bo_driver {
};
/**
- * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
- */
-
-struct ttm_bo_global_ref {
- struct drm_global_reference ref;
- struct ttm_mem_global *mem_glob;
-};
-
-/**
* struct ttm_bo_global - Buffer object driver global data.
*
* @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
@@ -407,7 +397,7 @@ struct ttm_bo_global_ref {
* @swap_lru: Lru list of buffer objects used for swapping.
*/
-struct ttm_bo_global {
+extern struct ttm_bo_global {
/**
* Constant after init.
@@ -416,12 +406,12 @@ struct ttm_bo_global {
struct kobject kobj;
struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
- struct mutex device_list_mutex;
spinlock_t lru_lock;
/**
- * Protected by device_list_mutex.
+ * Protected by ttm_global_mutex.
*/
+ unsigned int use_count;
struct list_head device_list;
/**
@@ -433,7 +423,7 @@ struct ttm_bo_global {
* Internal protection.
*/
atomic_t bo_count;
-};
+} ttm_bo_glob;
#define TTM_NUM_MEM_TYPES 8
@@ -578,9 +568,6 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-void ttm_bo_global_release(struct drm_global_reference *ref);
-int ttm_bo_global_init(struct drm_global_reference *ref);
-
int ttm_bo_device_release(struct ttm_bo_device *bdev);
/**
@@ -598,7 +585,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
* Returns:
* !0: Failure.
*/
-int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
uint64_t file_page_offset, bool need_dma32);
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index b0fdd1980034..621615fa7728 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -40,13 +40,13 @@
*
* @head: list head for thread-private list.
* @bo: refcounted buffer object pointer.
- * @shared: should the fence be added shared?
+ * @num_shared: How many shared fences we want to add.
*/
struct ttm_validate_buffer {
struct list_head head;
struct ttm_buffer_object *bo;
- bool shared;
+ unsigned int num_shared;
};
/**
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 737b5fed8003..3ff48a0a2d7b 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -63,7 +63,7 @@
#define TTM_MEM_MAX_ZONES 2
struct ttm_mem_zone;
-struct ttm_mem_global {
+extern struct ttm_mem_global {
struct kobject kobj;
struct ttm_bo_global *bo_glob;
struct workqueue_struct *swap_queue;
@@ -78,7 +78,7 @@ struct ttm_mem_global {
#else
struct ttm_mem_zone *zone_dma32;
#endif
-};
+} ttm_mem_glob;
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index a83e1f632eb7..f01623aef2f7 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index cb31683bbe15..8268811a697e 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
+int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp);
+unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp);
+int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+ struct sk_buff *skb);
void can_rx_offload_reset(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 6b92b3395fa9..65a38c4a02a1 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -213,12 +213,6 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
CEPH_FEATURE_CEPHX_V2)
-#define CEPH_FEATURES_REQUIRED_DEFAULT \
- (CEPH_FEATURE_NOSRCADDR | \
- CEPH_FEATURE_SUBSCRIBE2 | \
- CEPH_FEATURE_RECONNECT_SEQ | \
- CEPH_FEATURE_PGID64 | \
- CEPH_FEATURE_PGPOOL3 | \
- CEPH_FEATURE_OSDENC)
+#define CEPH_FEATURES_REQUIRED_DEFAULT 0
#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index c0f5db3a9621..2010493e1040 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -143,18 +143,6 @@
#define KASAN_ABI_VERSION 3
#endif
-/*
- * Because __no_sanitize_address conflicts with inlining:
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * we do one or the other.
- */
-#ifdef CONFIG_KASAN
-#define __no_sanitize_address_or_inline \
- __no_sanitize_address __maybe_unused notrace
-#else
-#define __no_sanitize_address_or_inline inline
-#endif
-
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 18c80cfa4fc4..06396c1cf127 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -189,7 +189,7 @@ void __read_once_size(const volatile void *p, void *res, int size)
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
-# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
#else
# define __no_kasan_or_inline __always_inline
#endif
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 6b28c1b7310c..f8c400ba1929 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -4,22 +4,26 @@
/*
* The attributes in this file are unconditionally defined and they directly
- * map to compiler attribute(s) -- except those that are optional.
+ * map to compiler attribute(s), unless one of the compilers does not support
+ * the attribute. In that case, __has_attribute is used to check for support
+ * and the reason is stated in its comment ("Optional: ...").
*
* Any other "attributes" (i.e. those that depend on a configuration option,
* on a compiler, on an architecture, on plugins, on other attributes...)
* should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
+ * The intention is to keep this file as simple as possible, as well as
+ * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks).
*
* This file is meant to be sorted (by actual attribute name,
* not by #define identifier). Use the __attribute__((__name__)) syntax
* (i.e. with underscores) to avoid future collisions with other macros.
- * If an attribute is optional, state the reason in the comment.
+ * Provide links to the documentation of each supported compiler, if it exists.
*/
/*
- * To check for optional attributes, we use __has_attribute, which is supported
- * on gcc >= 5, clang >= 2.9 and icc >= 17. In the meantime, to support
- * 4.6 <= gcc < 5, we implement __has_attribute by hand.
+ * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
+ * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
+ * by hand.
*
* sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
* depending on the compiler used to build it; however, these attributes have
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 3439d7d0249a..4a3f9c09c92d 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -130,6 +130,10 @@ struct ftrace_likely_data {
# define randomized_struct_fields_end
#endif
+#ifndef asm_volatile_goto
+#define asm_volatile_goto(x...) asm goto(x)
+#endif
+
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index bd73e7a91410..9e66bfe369aa 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -5,7 +5,7 @@
#include <linux/dma-mapping.h>
#include <linux/mem_encrypt.h>
-#define DIRECT_MAPPING_ERROR 0
+#define DIRECT_MAPPING_ERROR (~(dma_addr_t)0)
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h>
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 02dba8cd033d..999e4b104410 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -541,6 +541,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
return ret < 0 ? ret : 0;
}
+struct dma_fence *dma_fence_get_stub(void);
u64 dma_fence_context_alloc(unsigned num);
#define DMA_FENCE_TRACE(f, fmt, args...) \
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 845174e113ce..100ce4a4aff6 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1167,6 +1167,8 @@ static inline bool efi_enabled(int feature)
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
extern bool efi_is_table_address(unsigned long phys_addr);
+
+extern int efi_apply_persistent_mem_reservations(void);
#else
static inline bool efi_enabled(int feature)
{
@@ -1185,6 +1187,11 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
{
return false;
}
+
+static inline int efi_apply_persistent_mem_reservations(void)
+{
+ return 0;
+}
#endif
extern int efi_status_to_err(efi_status_t status);
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 4f3febc0f971..d2bacf502429 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -163,6 +163,9 @@ struct hdmi_avi_infoframe {
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size);
+ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame);
enum hdmi_spd_sdi {
HDMI_SPD_SDI_UNKNOWN,
@@ -194,6 +197,9 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
const char *vendor, const char *product);
ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
size_t size);
+ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame);
enum hdmi_audio_coding_type {
HDMI_AUDIO_CODING_TYPE_STREAM,
@@ -272,6 +278,9 @@ struct hdmi_audio_infoframe {
int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
+ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame);
enum hdmi_3d_structure {
HDMI_3D_STRUCTURE_INVALID = -1,
@@ -299,6 +308,9 @@ struct hdmi_vendor_infoframe {
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size);
+ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame);
union hdmi_vendor_any_infoframe {
struct {
@@ -330,10 +342,14 @@ union hdmi_infoframe {
struct hdmi_audio_infoframe audio;
};
-ssize_t
-hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
-int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
+ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,
+ size_t size);
+ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_infoframe_check(union hdmi_infoframe *frame);
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
+ const void *buffer, size_t size);
void hdmi_infoframe_log(const char *level, struct device *dev,
- union hdmi_infoframe *frame);
+ const union hdmi_infoframe *frame);
#endif /* _DRM_HDMI_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 2827b87590d8..a355d61940f2 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -722,8 +722,8 @@ struct hid_usage_id {
* input will not be passed to raw_event unless hid_device_io_start is
* called.
*
- * raw_event and event should return 0 on no action performed, 1 when no
- * further processing should be done and negative on error
+ * raw_event and event should return negative on error, any other value will
+ * pass the event on to .event() typically return 0 for success.
*
* input_mapping shall return a negative value to completely ignore this usage
* (e.g. doubled or invalid usage), zero to continue with parsing of this
@@ -1139,34 +1139,6 @@ static inline u32 hid_report_len(struct hid_report *report)
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt);
-
-/**
- * struct hid_scroll_counter - Utility class for processing high-resolution
- * scroll events.
- * @dev: the input device for which events should be reported.
- * @microns_per_hi_res_unit: the amount moved by the user's finger for each
- * high-resolution unit reported by the mouse, in
- * microns.
- * @resolution_multiplier: the wheel's resolution in high-resolution mode as a
- * multiple of its lower resolution. For example, if
- * moving the wheel by one "notch" would result in a
- * value of 1 in low-resolution mode but 8 in
- * high-resolution, the multiplier is 8.
- * @remainder: counts the number of high-resolution units moved since the last
- * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should
- * only be used by class methods.
- */
-struct hid_scroll_counter {
- struct input_dev *dev;
- int microns_per_hi_res_unit;
- int resolution_multiplier;
-
- int remainder;
-};
-
-void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
- int hi_res_value);
-
/* HID quirks API */
unsigned long hid_lookup_quirk(const struct hid_device *hdev);
int hid_quirks_init(char **quirks_param, __u16 bus, int count);
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index e6bb36a97519..8336b2f6f834 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -21,6 +21,7 @@
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
+extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fcf9cc9d535f..5411de93a363 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1744,11 +1744,15 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
static inline void mm_inc_nr_puds(struct mm_struct *mm)
{
+ if (mm_pud_folded(mm))
+ return;
atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_puds(struct mm_struct *mm)
{
+ if (mm_pud_folded(mm))
+ return;
atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
#endif
@@ -1768,11 +1772,15 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
static inline void mm_inc_nr_pmds(struct mm_struct *mm)
{
+ if (mm_pmd_folded(mm))
+ return;
atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_pmds(struct mm_struct *mm)
{
+ if (mm_pmd_folded(mm))
+ return;
atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
#endif
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index abe975c87b90..7f53ece2c039 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -324,9 +324,8 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
*/
static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
{
- return (u64)nand->memorg.luns_per_target *
- nand->memorg.eraseblocks_per_lun *
- nand->memorg.pages_per_eraseblock;
+ return nand->memorg.ntargets * nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun;
}
/**
@@ -569,7 +568,7 @@ static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
}
/**
- * nanddev_pos_next_eraseblock() - Move a position to the next page
+ * nanddev_pos_next_page() - Move a position to the next page
* @nand: NAND device
* @pos: the position to update
*
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
index c79e859408e6..fd458389f7d1 100644
--- a/include/linux/net_dim.h
+++ b/include/linux/net_dim.h
@@ -406,6 +406,8 @@ static inline void net_dim(struct net_dim *dim,
}
/* fall through */
case NET_DIM_START_MEASURE:
+ net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr,
+ &dim->start_sample);
dim->state = NET_DIM_MEASURE_IN_PROGRESS;
break;
case NET_DIM_APPLY_NEW_PROFILE:
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index dc1d9ed33b31..857f8abf7b91 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3190,6 +3190,26 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
#endif
}
+/* Variant of netdev_tx_sent_queue() for drivers that are aware
+ * that they should not test BQL status themselves.
+ * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
+ * skb of a batch.
+ * Returns true if the doorbell must be used to kick the NIC.
+ */
+static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+ unsigned int bytes,
+ bool xmit_more)
+{
+ if (xmit_more) {
+#ifdef CONFIG_BQL
+ dql_queued(&dev_queue->dql, bytes);
+#endif
+ return netif_tx_queue_stopped(dev_queue);
+ }
+ netdev_tx_sent_queue(dev_queue, bytes);
+ return true;
+}
+
/**
* netdev_sent_queue - report the number of bytes queued to hardware
* @dev: network device
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 34fc80f3eb90..1d100efe74ec 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -314,7 +314,7 @@ enum {
extern ip_set_id_t ip_set_get_byname(struct net *net,
const char *name, struct ip_set **set);
extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
-extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 8e2bab1e8e90..70877f8de7e9 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
rcu_assign_pointer(comment->c, c);
}
-/* Used only when dumping a set, protected by rcu_read_lock_bh() */
+/* Used only when dumping a set, protected by rcu_read_lock() */
static inline int
ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
{
- struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
+ struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
if (!c)
return 0;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 08f9247e9827..9003e29cde46 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -119,6 +119,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; }
void watchdog_nmi_stop(void);
void watchdog_nmi_start(void);
int watchdog_nmi_probe(void);
+int watchdog_nmi_enable(unsigned int cpu);
+void watchdog_nmi_disable(unsigned int cpu);
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 02166e815afb..2f0ffca35780 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -68,7 +68,6 @@ struct reservation_object_list {
* @seq: sequence count for managing RCU read-side synchronization
* @fence_excl: the exclusive fence, if there is one currently
* @fence: list of current shared fences
- * @staged: staged copy of shared fences for RCU updates
*/
struct reservation_object {
struct ww_mutex lock;
@@ -76,7 +75,6 @@ struct reservation_object {
struct dma_fence __rcu *fence_excl;
struct reservation_object_list __rcu *fence;
- struct reservation_object_list *staged;
};
#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
@@ -95,7 +93,6 @@ reservation_object_init(struct reservation_object *obj)
__seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
RCU_INIT_POINTER(obj->fence, NULL);
RCU_INIT_POINTER(obj->fence_excl, NULL);
- obj->staged = NULL;
}
/**
@@ -124,7 +121,6 @@ reservation_object_fini(struct reservation_object *obj)
kfree(fobj);
}
- kfree(obj->staged);
ww_mutex_destroy(&obj->lock);
}
@@ -218,6 +214,11 @@ reservation_object_trylock(struct reservation_object *obj)
static inline void
reservation_object_unlock(struct reservation_object *obj)
{
+#ifdef CONFIG_DEBUG_MUTEXES
+ /* Test shared fence slot reservation */
+ if (obj->fence)
+ obj->fence->shared_max = obj->fence->shared_count;
+#endif
ww_mutex_unlock(&obj->lock);
}
@@ -265,7 +266,8 @@ reservation_object_get_excl_rcu(struct reservation_object *obj)
return fence;
}
-int reservation_object_reserve_shared(struct reservation_object *obj);
+int reservation_object_reserve_shared(struct reservation_object *obj,
+ unsigned int num_fences);
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct dma_fence *fence);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0ba687454267..0d1b2c3f127b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1326,6 +1326,22 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
}
}
+static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
+{
+ skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
+ skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+}
+
+static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
+{
+ return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
+}
+
+static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
+{
+ return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
+}
+
/* Release a reference on a zerocopy structure */
static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
{
@@ -1335,7 +1351,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
if (uarg->callback == sock_zerocopy_callback) {
uarg->zerocopy = uarg->zerocopy && zerocopy;
sock_zerocopy_put(uarg);
- } else {
+ } else if (!skb_zcopy_is_nouarg(skb)) {
uarg->callback(uarg, zerocopy);
}
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d8a07a4f171d..a8f6d5d89524 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -18,6 +18,8 @@ struct notifier_block;
struct bio;
+struct pagevec;
+
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff
#define SWAP_FLAG_PRIO_SHIFT 0
@@ -369,7 +371,7 @@ static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
#endif
extern int page_evictable(struct page *page);
-extern void check_move_unevictable_pages(struct page **, int nr_pages);
+extern void check_move_unevictable_pages(struct pagevec *pvec);
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 987cefa337de..786816cf4aa5 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -234,7 +234,7 @@ int __must_check sysfs_create_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns);
int __must_check sysfs_create_files(struct kobject *kobj,
- const struct attribute **attr);
+ const struct attribute * const *attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
@@ -243,7 +243,7 @@ void sysfs_unbreak_active_protection(struct kernfs_node *kn);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
-void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
+void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr);
int __must_check sysfs_create_bin_file(struct kobject *kobj,
const struct bin_attribute *attr);
@@ -342,7 +342,7 @@ static inline int sysfs_create_file_ns(struct kobject *kobj,
}
static inline int sysfs_create_files(struct kobject *kobj,
- const struct attribute **attr)
+ const struct attribute * const *attr)
{
return 0;
}
@@ -377,7 +377,7 @@ static inline bool sysfs_remove_file_self(struct kobject *kobj,
}
static inline void sysfs_remove_files(struct kobject *kobj,
- const struct attribute **attr)
+ const struct attribute * const *attr)
{
}
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 8ed77bb4ed86..a9b0280687d5 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -196,6 +196,7 @@ struct tcp_sock {
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
+ u32 compressed_ack_rcv_nxt;
u32 tsoffset; /* timestamp offset */
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index b7a99ce56bc9..a1be64c9940f 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -66,4 +66,7 @@
/* Device needs a pause after every control message. */
#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
+/* Hub needs extra delay after resetting its port. */
+#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index d9514928ddac..564892e19f8c 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -289,9 +289,7 @@ struct xarray {
void xa_init_flags(struct xarray *, gfp_t flags);
void *xa_load(struct xarray *, unsigned long index);
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
-void *xa_cmpxchg(struct xarray *, unsigned long index,
- void *old, void *entry, gfp_t);
-int xa_reserve(struct xarray *, unsigned long index, gfp_t);
+void *xa_erase(struct xarray *, unsigned long index);
void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
void *entry, gfp_t);
bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -344,65 +342,6 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
}
/**
- * xa_erase() - Erase this entry from the XArray.
- * @xa: XArray.
- * @index: Index of entry.
- *
- * This function is the equivalent of calling xa_store() with %NULL as
- * the third argument. The XArray does not need to allocate memory, so
- * the user does not need to provide GFP flags.
- *
- * Context: Process context. Takes and releases the xa_lock.
- * Return: The entry which used to be at this index.
- */
-static inline void *xa_erase(struct xarray *xa, unsigned long index)
-{
- return xa_store(xa, index, NULL, 0);
-}
-
-/**
- * xa_insert() - Store this entry in the XArray unless another entry is
- * already present.
- * @xa: XArray.
- * @index: Index into array.
- * @entry: New entry.
- * @gfp: Memory allocation flags.
- *
- * If you would rather see the existing entry in the array, use xa_cmpxchg().
- * This function is for users who don't care what the entry is, only that
- * one is present.
- *
- * Context: Process context. Takes and releases the xa_lock.
- * May sleep if the @gfp flags permit.
- * Return: 0 if the store succeeded. -EEXIST if another entry was present.
- * -ENOMEM if memory could not be allocated.
- */
-static inline int xa_insert(struct xarray *xa, unsigned long index,
- void *entry, gfp_t gfp)
-{
- void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
- if (!curr)
- return 0;
- if (xa_is_err(curr))
- return xa_err(curr);
- return -EEXIST;
-}
-
-/**
- * xa_release() - Release a reserved entry.
- * @xa: XArray.
- * @index: Index of entry.
- *
- * After calling xa_reserve(), you can call this function to release the
- * reservation. If the entry at @index has been stored to, this function
- * will do nothing.
- */
-static inline void xa_release(struct xarray *xa, unsigned long index)
-{
- xa_cmpxchg(xa, index, NULL, NULL, 0);
-}
-
-/**
* xa_for_each() - Iterate over a portion of an XArray.
* @xa: XArray.
* @entry: Entry retrieved from array.
@@ -455,6 +394,7 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
void *entry, gfp_t);
int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
+int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -487,6 +427,58 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
}
/**
+ * xa_store_bh() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_store() except it disables softirqs
+ * while holding the array lock.
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs.
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_bh(xa);
+ curr = __xa_store(xa, index, entry, gfp);
+ xa_unlock_bh(xa);
+
+ return curr;
+}
+
+/**
+ * xa_store_irq() - Erase this entry from the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_store() except it disables interrupts
+ * while holding the array lock.
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts.
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_irq(xa);
+ curr = __xa_store(xa, index, entry, gfp);
+ xa_unlock_irq(xa);
+
+ return curr;
+}
+
+/**
* xa_erase_bh() - Erase this entry from the XArray.
* @xa: XArray.
* @index: Index of entry.
@@ -495,7 +487,7 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
* the third argument. The XArray does not need to allocate memory, so
* the user does not need to provide GFP flags.
*
- * Context: Process context. Takes and releases the xa_lock while
+ * Context: Any context. Takes and releases the xa_lock while
* disabling softirqs.
* Return: The entry which used to be at this index.
*/
@@ -535,6 +527,61 @@ static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
}
/**
+ * xa_cmpxchg() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * If the entry at @index is the same as @old, replace it with @entry.
+ * If the return value is equal to @old, then the exchange was successful.
+ *
+ * Context: Any context. Takes and releases the xa_lock. May sleep
+ * if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock(xa);
+ curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+ xa_unlock(xa);
+
+ return curr;
+}
+
+/**
+ * xa_insert() - Store this entry in the XArray unless another entry is
+ * already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * If you would rather see the existing entry in the array, use xa_cmpxchg().
+ * This function is for users who don't care what the entry is, only that
+ * one is present.
+ *
+ * Context: Process context. Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+static inline int xa_insert(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
+ if (!curr)
+ return 0;
+ if (xa_is_err(curr))
+ return xa_err(curr);
+ return -EEXIST;
+}
+
+/**
* xa_alloc() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
@@ -575,7 +622,7 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry,
* Updates the @id pointer with the index, then stores the entry at that
* index. A concurrent lookup will not see an uninitialised @id.
*
- * Context: Process context. Takes and releases the xa_lock while
+ * Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
* there is no more space in the XArray.
@@ -621,6 +668,98 @@ static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry,
return err;
}
+/**
+ * xa_reserve() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * Ensures there is somewhere to store an entry at @index in the array.
+ * If there is already something stored at @index, this function does
+ * nothing. If there was nothing there, the entry is marked as reserved.
+ * Loading from a reserved entry returns a %NULL pointer.
+ *
+ * If you do not use the entry that you have reserved, call xa_release()
+ * or xa_erase() to free any unnecessary memory.
+ *
+ * Context: Any context. Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ int ret;
+
+ xa_lock(xa);
+ ret = __xa_reserve(xa, index, gfp);
+ xa_unlock(xa);
+
+ return ret;
+}
+
+/**
+ * xa_reserve_bh() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * A softirq-disabling version of xa_reserve().
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ int ret;
+
+ xa_lock_bh(xa);
+ ret = __xa_reserve(xa, index, gfp);
+ xa_unlock_bh(xa);
+
+ return ret;
+}
+
+/**
+ * xa_reserve_irq() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * An interrupt-disabling version of xa_reserve().
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ int ret;
+
+ xa_lock_irq(xa);
+ ret = __xa_reserve(xa, index, gfp);
+ xa_unlock_irq(xa);
+
+ return ret;
+}
+
+/**
+ * xa_release() - Release a reserved entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ *
+ * After calling xa_reserve(), you can call this function to release the
+ * reservation. If the entry at @index has been stored to, this function
+ * will do nothing.
+ */
+static inline void xa_release(struct xarray *xa, unsigned long index)
+{
+ xa_cmpxchg(xa, index, NULL, NULL, 0);
+}
+
/* Everything below here is the Advanced API. Proceed with caution. */
/*
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 58c1ecf3d648..5467264771ec 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -624,7 +624,7 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
/* v4l2 request helper */
-void vb2_m2m_request_queue(struct media_request *req);
+void v4l2_m2m_request_queue(struct media_request *req);
/* v4l2 ioctl helpers */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 14b789a123e7..1656c5978498 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -317,6 +317,8 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
+int ipv6_anycast_init(void);
+void ipv6_anycast_cleanup(void);
/* Device notifier */
int register_inet6addr_notifier(struct notifier_block *nb);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index de587948042a..1adefe42c0a6 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -77,7 +77,8 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *, struct key *);
int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
enum rxrpc_call_completion *, u32 *);
-u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *);
+u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
+void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
ktime_t *);
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index d7578cf49c3a..c9c78c15bce0 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -146,10 +146,12 @@ struct ifacaddr6 {
struct in6_addr aca_addr;
struct fib6_info *aca_rt;
struct ifacaddr6 *aca_next;
+ struct hlist_node aca_addr_lst;
int aca_users;
refcount_t aca_refcnt;
unsigned long aca_cstamp;
unsigned long aca_tstamp;
+ struct rcu_head rcu;
};
#define IFA_HOST IPV6_ADDR_LOOPBACK
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index eed04af9b75e..ae7b86f587f2 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -153,4 +153,43 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const char *fmt, ...) { }
#endif /* CONFIG_SYSCTL */
+static inline struct nf_generic_net *nf_generic_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.generic;
+}
+
+static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.tcp;
+}
+
+static inline struct nf_udp_net *nf_udp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.udp;
+}
+
+static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmp;
+}
+
+static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmpv6;
+}
+
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.dccp;
+}
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.sctp;
+}
+#endif
+
#endif /*_NF_CONNTRACK_PROTOCOL_H*/
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 8c2caa370e0f..ab9242e51d9e 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -608,4 +608,16 @@ static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
SCTP_DEFAULT_MINSEGMENT));
}
+static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+{
+ __u32 pmtu = sctp_dst_mtu(t->dst);
+
+ if (t->pathmtu == pmtu)
+ return true;
+
+ t->pathmtu = pmtu;
+
+ return false;
+}
+
#endif /* __net_sctp_h__ */
diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h
index a9834c37ac40..c0e7d24ca256 100644
--- a/include/trace/events/kyber.h
+++ b/include/trace/events/kyber.h
@@ -31,8 +31,8 @@ TRACE_EVENT(kyber_latency,
TP_fast_assign(
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
- strlcpy(__entry->domain, domain, DOMAIN_LEN);
- strlcpy(__entry->type, type, DOMAIN_LEN);
+ strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ strlcpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile;
__entry->numerator = numerator;
__entry->denominator = denominator;
@@ -60,7 +60,7 @@ TRACE_EVENT(kyber_adjust,
TP_fast_assign(
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
- strlcpy(__entry->domain, domain, DOMAIN_LEN);
+ strlcpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth;
),
@@ -82,7 +82,7 @@ TRACE_EVENT(kyber_throttled,
TP_fast_assign(
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
- strlcpy(__entry->domain, domain, DOMAIN_LEN);
+ strlcpy(__entry->domain, domain, sizeof(__entry->domain));
),
TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 573d5b901fb1..5b50fe4906d2 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -181,6 +181,7 @@ enum rxrpc_timer_trace {
enum rxrpc_propose_ack_trace {
rxrpc_propose_ack_client_tx_end,
rxrpc_propose_ack_input_data,
+ rxrpc_propose_ack_ping_for_check_life,
rxrpc_propose_ack_ping_for_keepalive,
rxrpc_propose_ack_ping_for_lost_ack,
rxrpc_propose_ack_ping_for_lost_reply,
@@ -380,6 +381,7 @@ enum rxrpc_tx_point {
#define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \
+ EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \
EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 370e9a5536ef..be84e43c1e19 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -326,6 +326,12 @@ struct drm_amdgpu_gem_userptr {
/* GFX9 and later: */
#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
#define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
+#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
+#define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF
+#define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29
+#define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
/* Set/Get helpers for tiling flags. */
#define AMDGPU_TILING_SET(field, value) \
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 0cd40ebfa1b1..0b44260a5ee9 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -151,6 +151,21 @@ extern "C" {
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * packed YCbCr420 2x2 tiled formats
+ * first 64 bits will contain Y,Cb,Cr components for a 2x2 tile
+ */
+/* [63:0] A3:A2:Y3:0:Cr0:0:Y2:0:A1:A0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_Y0L0 fourcc_code('Y', '0', 'L', '0')
+/* [63:0] X3:X2:Y3:0:Cr0:0:Y2:0:X1:X0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_X0L0 fourcc_code('X', '0', 'L', '0')
+
+/* [63:0] A3:A2:Y3:Cr0:Y2:A1:A0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_Y0L2 fourcc_code('Y', '0', 'L', '2')
+/* [63:0] X3:X2:Y3:Cr0:Y2:X1:X0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_X0L2 fourcc_code('X', '0', 'L', '2')
/*
* 2 plane RGB + A
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index d3e0fe31efc5..a439c2e67896 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -888,6 +888,25 @@ struct drm_mode_revoke_lease {
__u32 lessee_id;
};
+/**
+ * struct drm_mode_rect - Two dimensional rectangle.
+ * @x1: Horizontal starting coordinate (inclusive).
+ * @y1: Vertical starting coordinate (inclusive).
+ * @x2: Horizontal ending coordinate (exclusive).
+ * @y2: Vertical ending coordinate (exclusive).
+ *
+ * With drm subsystem using struct drm_rect to manage rectangular area this
+ * export it to user-space.
+ *
+ * Currently used by drm_mode_atomic blob property FB_DAMAGE_CLIPS.
+ */
+struct drm_mode_rect {
+ __s32 x1;
+ __s32 y1;
+ __s32 x2;
+ __s32 y2;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index a4446f452040..298b2e197744 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -412,6 +412,14 @@ typedef struct drm_i915_irq_wait {
int irq_seq;
} drm_i915_irq_wait_t;
+/*
+ * Different modes of per-process Graphics Translation Table,
+ * see I915_PARAM_HAS_ALIASING_PPGTT
+ */
+#define I915_GEM_PPGTT_NONE 0
+#define I915_GEM_PPGTT_ALIASING 1
+#define I915_GEM_PPGTT_FULL 2
+
/* Ioctl to query kernel params:
*/
#define I915_PARAM_IRQ_ACTIVE 1
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 7b6627783608..35c7d813c66e 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -36,6 +36,7 @@ extern "C" {
#define DRM_V3D_MMAP_BO 0x03
#define DRM_V3D_GET_PARAM 0x04
#define DRM_V3D_GET_BO_OFFSET 0x05
+#define DRM_V3D_SUBMIT_TFU 0x06
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -43,6 +44,7 @@ extern "C" {
#define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo)
#define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
+#define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
@@ -58,10 +60,15 @@ struct drm_v3d_submit_cl {
* coordinate shader to determine where primitives land on the screen,
* then writes out the state updates and draw calls necessary per tile
* to the tile allocation BO.
+ *
+ * This BCL will block on any previous BCL submitted on the
+ * same FD, but not on any RCL or BCLs submitted by other
+ * clients -- that is left up to the submitter to control
+ * using in_sync_bcl if necessary.
*/
__u32 bcl_start;
- /** End address of the BCL (first byte after the BCL) */
+ /** End address of the BCL (first byte after the BCL) */
__u32 bcl_end;
/* Offset of the render command list.
@@ -69,10 +76,15 @@ struct drm_v3d_submit_cl {
* This is the second set of commands executed, which will either
* execute the tiles that have been set up by the BCL, or a fixed set
* of tiles (in the case of RCL-only blits).
+ *
+ * This RCL will block on this submit's BCL, and any previous
+ * RCL submitted on the same FD, but not on any RCL or BCLs
+ * submitted by other clients -- that is left up to the
+ * submitter to control using in_sync_rcl if necessary.
*/
__u32 rcl_start;
- /** End address of the RCL (first byte after the RCL) */
+ /** End address of the RCL (first byte after the RCL) */
__u32 rcl_end;
/** An optional sync object to wait on before starting the BCL. */
@@ -169,6 +181,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_V3D_CORE0_IDENT0,
DRM_V3D_PARAM_V3D_CORE0_IDENT1,
DRM_V3D_PARAM_V3D_CORE0_IDENT2,
+ DRM_V3D_PARAM_SUPPORTS_TFU,
};
struct drm_v3d_get_param {
@@ -187,6 +200,28 @@ struct drm_v3d_get_bo_offset {
__u32 offset;
};
+struct drm_v3d_submit_tfu {
+ __u32 icfg;
+ __u32 iia;
+ __u32 iis;
+ __u32 ica;
+ __u32 iua;
+ __u32 ioa;
+ __u32 ios;
+ __u32 coef[4];
+ /* First handle is the output BO, following are other inputs.
+ * 0 for unused.
+ */
+ __u32 bo_handles[4];
+ /* sync object to block on before running the TFU job. Each TFU
+ * job will execute in the order submitted to its FD. Synchronization
+ * against rendering jobs requires using sync objects.
+ */
+ __u32 in_sync;
+ /* Sync object to signal when the TFU job is done. */
+ __u32 out_sync;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 9a781f0611df..f06a789f34cd 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -47,6 +47,13 @@ extern "C" {
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
+#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_FLAGS (\
+ VIRTGPU_EXECBUF_FENCE_FD_IN |\
+ VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ 0)
+
struct drm_virtgpu_map {
__u64 offset; /* use for mmap system call */
__u32 handle;
@@ -54,12 +61,12 @@ struct drm_virtgpu_map {
};
struct drm_virtgpu_execbuffer {
- __u32 flags; /* for future use */
+ __u32 flags;
__u32 size;
__u64 command; /* void* */
__u64 bo_handles;
__u32 num_bo_handles;
- __u32 pad;
+ __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
@@ -137,7 +144,7 @@ struct drm_virtgpu_get_caps {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
- DRM_IOW(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
struct drm_virtgpu_execbuffer)
#define DRM_IOCTL_VIRTGPU_GETPARAM \
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 6d180cc60a5d..3eb5a4c3d60a 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -716,7 +716,6 @@
* the situation described above.
*/
#define REL_RESERVED 0x0a
-#define REL_WHEEL_HI_RES 0x0b
#define REL_MAX 0x0f
#define REL_CNT (REL_MAX+1)
@@ -753,15 +752,6 @@
#define ABS_MISC 0x28
-/*
- * 0x2e is reserved and should not be used in input drivers.
- * It was used by HID as ABS_MISC+6 and userspace needs to detect if
- * the next ABS_* event is correct or is just ABS_MISC + n.
- * We define here ABS_RESERVED so userspace can rely on it and detect
- * the situation described above.
- */
-#define ABS_RESERVED 0x2e
-
#define ABS_MT_SLOT 0x2f /* MT slot being modified */
#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index f5ff8a76e208..e622fd1fbd46 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -83,11 +83,11 @@ struct kfd_ioctl_set_cu_mask_args {
};
struct kfd_ioctl_get_queue_wave_state_args {
- uint64_t ctl_stack_address; /* to KFD */
- uint32_t ctl_stack_used_size; /* from KFD */
- uint32_t save_area_used_size; /* from KFD */
- uint32_t queue_id; /* to KFD */
- uint32_t pad;
+ __u64 ctl_stack_address; /* to KFD */
+ __u32 ctl_stack_used_size; /* from KFD */
+ __u32 save_area_used_size; /* from KFD */
+ __u32 queue_id; /* to KFD */
+ __u32 pad;
};
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
@@ -255,10 +255,10 @@ struct kfd_hsa_memory_exception_data {
/* hw exception data */
struct kfd_hsa_hw_exception_data {
- uint32_t reset_type;
- uint32_t reset_cause;
- uint32_t memory_lost;
- uint32_t gpu_id;
+ __u32 reset_type;
+ __u32 reset_cause;
+ __u32 memory_lost;
+ __u32 gpu_id;
};
/* Event data */
@@ -398,6 +398,24 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
__u32 n_success; /* to/from KFD */
};
+struct kfd_ioctl_get_dmabuf_info_args {
+ __u64 size; /* from KFD */
+ __u64 metadata_ptr; /* to KFD */
+ __u32 metadata_size; /* to KFD (space allocated by user)
+ * from KFD (actual metadata size)
+ */
+ __u32 gpu_id; /* from KFD */
+ __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
+ __u32 dmabuf_fd; /* to KFD */
+};
+
+struct kfd_ioctl_import_dmabuf_args {
+ __u64 va_addr; /* to KFD */
+ __u64 handle; /* from KFD */
+ __u32 gpu_id; /* to KFD */
+ __u32 dmabuf_fd; /* to KFD */
+};
+
#define AMDKFD_IOCTL_BASE 'K'
#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -486,7 +504,13 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
+#define AMDKFD_IOC_GET_DMABUF_INFO \
+ AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
+
+#define AMDKFD_IOC_IMPORT_DMABUF \
+ AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x1C
+#define AMDKFD_COMMAND_END 0x1E
#endif
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 579974b0bf0d..7de4f1bdaf06 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1635,8 +1635,8 @@ enum nft_ng_attributes {
NFTA_NG_MODULUS,
NFTA_NG_TYPE,
NFTA_NG_OFFSET,
- NFTA_NG_SET_NAME,
- NFTA_NG_SET_ID,
+ NFTA_NG_SET_NAME, /* deprecated */
+ NFTA_NG_SET_ID, /* deprecated */
__NFTA_NG_MAX
};
#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h
index 156ccd089df1..1610fdbab98d 100644
--- a/include/uapi/linux/netfilter_bridge.h
+++ b/include/uapi/linux/netfilter_bridge.h
@@ -11,6 +11,10 @@
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
+#ifndef __KERNEL__
+#include <limits.h> /* for INT_MIN, INT_MAX */
+#endif
+
/* Bridge Hooks */
/* After promisc drops, checksum checks. */
#define NF_BR_PRE_ROUTING 0
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 34dd3d497f2c..c81feb373d3e 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -568,6 +568,8 @@ struct sctp_assoc_reset_event {
#define SCTP_ASSOC_CHANGE_DENIED 0x0004
#define SCTP_ASSOC_CHANGE_FAILED 0x0008
+#define SCTP_STREAM_CHANGE_DENIED SCTP_ASSOC_CHANGE_DENIED
+#define SCTP_STREAM_CHANGE_FAILED SCTP_ASSOC_CHANGE_FAILED
struct sctp_stream_change_event {
__u16 strchange_type;
__u16 strchange_flags;
@@ -1151,6 +1153,7 @@ struct sctp_add_streams {
/* SCTP Stream schedulers */
enum sctp_sched_type {
SCTP_SS_FCFS,
+ SCTP_SS_DEFAULT = SCTP_SS_FCFS,
SCTP_SS_PRIO,
SCTP_SS_RR,
SCTP_SS_MAX = SCTP_SS_RR
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 51b095898f4b..998983a6e6b7 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -50,6 +50,8 @@
#ifndef __LINUX_V4L2_CONTROLS_H
#define __LINUX_V4L2_CONTROLS_H
+#include <linux/types.h>
+
/* Control classes */
#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */
#define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */
@@ -1110,6 +1112,7 @@ struct v4l2_mpeg2_sequence {
__u8 profile_and_level_indication;
__u8 progressive_sequence;
__u8 chroma_format;
+ __u8 pad;
};
struct v4l2_mpeg2_picture {
@@ -1128,6 +1131,7 @@ struct v4l2_mpeg2_picture {
__u8 alternate_scan;
__u8 repeat_first_field;
__u8 progressive_frame;
+ __u8 pad;
};
struct v4l2_ctrl_mpeg2_slice_params {
@@ -1142,6 +1146,7 @@ struct v4l2_ctrl_mpeg2_slice_params {
__u8 backward_ref_index;
__u8 forward_ref_index;
+ __u8 pad;
};
struct v4l2_ctrl_mpeg2_quantization {
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index f43c3c6171ff..8e88eba1fa7a 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -41,6 +41,7 @@
#include <linux/types.h>
#define VIRTIO_GPU_F_VIRGL 0
+#define VIRTIO_GPU_F_EDID 1
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
@@ -56,6 +57,7 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
VIRTIO_GPU_CMD_GET_CAPSET_INFO,
VIRTIO_GPU_CMD_GET_CAPSET,
+ VIRTIO_GPU_CMD_GET_EDID,
/* 3d commands */
VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
@@ -76,6 +78,7 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
VIRTIO_GPU_RESP_OK_CAPSET_INFO,
VIRTIO_GPU_RESP_OK_CAPSET,
+ VIRTIO_GPU_RESP_OK_EDID,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -291,6 +294,21 @@ struct virtio_gpu_resp_capset {
__u8 capset_data[];
};
+/* VIRTIO_GPU_CMD_GET_EDID */
+struct virtio_gpu_cmd_get_edid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 scanout;
+ __le32 padding;
+};
+
+/* VIRTIO_GPU_RESP_OK_EDID */
+struct virtio_gpu_resp_edid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 size;
+ __le32 padding;
+ __u8 edid[1024];
+};
+
#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
struct virtio_gpu_config {
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index abbad94e14a1..e582e8e7527a 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -246,6 +246,9 @@ struct ipu_image {
struct v4l2_rect rect;
dma_addr_t phys0;
dma_addr_t phys1;
+ /* chroma plane offset overrides */
+ u32 u_offset;
+ u32 v_offset;
};
void ipu_cpmem_zero(struct ipuv3_channel *ch);
@@ -387,6 +390,12 @@ int ipu_ic_task_init(struct ipu_ic *ic,
int out_width, int out_height,
enum ipu_color_space in_cs,
enum ipu_color_space out_cs);
+int ipu_ic_task_init_rsc(struct ipu_ic *ic,
+ int in_width, int in_height,
+ int out_width, int out_height,
+ enum ipu_color_space in_cs,
+ enum ipu_color_space out_cs,
+ u32 rsc);
int ipu_ic_task_graphics_init(struct ipu_ic *ic,
enum ipu_color_space in_g_cs,
bool galpha_en, u32 galpha,
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 18803ff76e27..4969817124a8 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -42,16 +42,12 @@ int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap;
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle);
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
-
-int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
- xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
- unsigned int domid, bool no_translate, struct page **pages);
#else
static inline int xen_create_contiguous_region(phys_addr_t pstart,
unsigned int order,
@@ -63,7 +59,13 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart,
static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
unsigned int order) { }
+#endif
+#if defined(CONFIG_XEN_PV)
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+ xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
+ unsigned int domid, bool no_translate, struct page **pages);
+#else
static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr,
pgprot_t prot, unsigned int domid,
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 6377225b2082..1a796e0799ec 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -553,7 +553,6 @@ bool is_bpf_text_address(unsigned long addr)
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym)
{
- unsigned long symbol_start, symbol_end;
struct bpf_prog_aux *aux;
unsigned int it = 0;
int ret = -ERANGE;
@@ -566,10 +565,9 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
if (it++ != symnum)
continue;
- bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
bpf_get_prog_name(aux->prog, sym);
- *value = symbol_start;
+ *value = (unsigned long)aux->prog->bpf_func;
*type = BPF_SYM_ELF_TYPE;
ret = 0;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ccb93277aae2..cf5040fd5434 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2078,6 +2078,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
info.jited_prog_len = 0;
info.xlated_prog_len = 0;
info.nr_jited_ksyms = 0;
+ info.nr_jited_func_lens = 0;
goto done;
}
@@ -2158,11 +2159,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
}
ulen = info.nr_jited_ksyms;
- info.nr_jited_ksyms = prog->aux->func_cnt;
+ info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
if (info.nr_jited_ksyms && ulen) {
if (bpf_dump_raw_ok()) {
+ unsigned long ksym_addr;
u64 __user *user_ksyms;
- ulong ksym_addr;
u32 i;
/* copy the address of the kernel symbol
@@ -2170,10 +2171,17 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
*/
ulen = min_t(u32, info.nr_jited_ksyms, ulen);
user_ksyms = u64_to_user_ptr(info.jited_ksyms);
- for (i = 0; i < ulen; i++) {
- ksym_addr = (ulong) prog->aux->func[i]->bpf_func;
- ksym_addr &= PAGE_MASK;
- if (put_user((u64) ksym_addr, &user_ksyms[i]))
+ if (prog->aux->func_cnt) {
+ for (i = 0; i < ulen; i++) {
+ ksym_addr = (unsigned long)
+ prog->aux->func[i]->bpf_func;
+ if (put_user((u64) ksym_addr,
+ &user_ksyms[i]))
+ return -EFAULT;
+ }
+ } else {
+ ksym_addr = (unsigned long) prog->bpf_func;
+ if (put_user((u64) ksym_addr, &user_ksyms[0]))
return -EFAULT;
}
} else {
@@ -2182,7 +2190,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
}
ulen = info.nr_jited_func_lens;
- info.nr_jited_func_lens = prog->aux->func_cnt;
+ info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
if (info.nr_jited_func_lens && ulen) {
if (bpf_dump_raw_ok()) {
u32 __user *user_lens;
@@ -2191,9 +2199,16 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
/* copy the JITed image lengths for each function */
ulen = min_t(u32, info.nr_jited_func_lens, ulen);
user_lens = u64_to_user_ptr(info.jited_func_lens);
- for (i = 0; i < ulen; i++) {
- func_len = prog->aux->func[i]->jited_len;
- if (put_user(func_len, &user_lens[i]))
+ if (prog->aux->func_cnt) {
+ for (i = 0; i < ulen; i++) {
+ func_len =
+ prog->aux->func[i]->jited_len;
+ if (put_user(func_len, &user_lens[i]))
+ return -EFAULT;
+ }
+ } else {
+ func_len = prog->jited_len;
+ if (put_user(func_len, &user_lens[0]))
return -EFAULT;
}
} else {
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
index 6ad4a9fcbd6f..7921ae4fca8d 100644
--- a/kernel/debug/kdb/kdb_bt.c
+++ b/kernel/debug/kdb/kdb_bt.c
@@ -179,14 +179,14 @@ kdb_bt(int argc, const char **argv)
kdb_printf("no process for cpu %ld\n", cpu);
return 0;
}
- sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
+ sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
kdb_parse(buf);
return 0;
}
kdb_printf("btc: cpu status: ");
kdb_parse("cpu\n");
for_each_online_cpu(cpu) {
- sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
+ sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
kdb_parse(buf);
touch_nmi_watchdog();
}
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index ed5d34925ad0..6a4b41484afe 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -216,7 +216,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
int count;
int i;
int diag, dtab_count;
- int key;
+ int key, buf_size, ret;
diag = kdbgetintenv("DTABCOUNT", &dtab_count);
@@ -336,9 +336,8 @@ poll_again:
else
p_tmp = tmpbuffer;
len = strlen(p_tmp);
- count = kallsyms_symbol_complete(p_tmp,
- sizeof(tmpbuffer) -
- (p_tmp - tmpbuffer));
+ buf_size = sizeof(tmpbuffer) - (p_tmp - tmpbuffer);
+ count = kallsyms_symbol_complete(p_tmp, buf_size);
if (tab == 2 && count > 0) {
kdb_printf("\n%d symbols are found.", count);
if (count > dtab_count) {
@@ -350,9 +349,13 @@ poll_again:
}
kdb_printf("\n");
for (i = 0; i < count; i++) {
- if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
+ ret = kallsyms_symbol_next(p_tmp, i, buf_size);
+ if (WARN_ON(!ret))
break;
- kdb_printf("%s ", p_tmp);
+ if (ret != -E2BIG)
+ kdb_printf("%s ", p_tmp);
+ else
+ kdb_printf("%s... ", p_tmp);
*(p_tmp + len) = '\0';
}
if (i >= dtab_count)
diff --git a/kernel/debug/kdb/kdb_keyboard.c b/kernel/debug/kdb/kdb_keyboard.c
index 118527aa60ea..750497b0003a 100644
--- a/kernel/debug/kdb/kdb_keyboard.c
+++ b/kernel/debug/kdb/kdb_keyboard.c
@@ -173,11 +173,11 @@ int kdb_get_kbd_char(void)
case KT_LATIN:
if (isprint(keychar))
break; /* printable characters */
- /* drop through */
+ /* fall through */
case KT_SPEC:
if (keychar == K_ENTER)
break;
- /* drop through */
+ /* fall through */
default:
return -1; /* ignore unprintables */
}
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index bb4fe4e1a601..d72b32c66f7d 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1192,7 +1192,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
if (reason == KDB_REASON_DEBUG) {
/* special case below */
} else {
- kdb_printf("\nEntering kdb (current=0x%p, pid %d) ",
+ kdb_printf("\nEntering kdb (current=0x%px, pid %d) ",
kdb_current, kdb_current ? kdb_current->pid : 0);
#if defined(CONFIG_SMP)
kdb_printf("on processor %d ", raw_smp_processor_id());
@@ -1208,7 +1208,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
*/
switch (db_result) {
case KDB_DB_BPT:
- kdb_printf("\nEntering kdb (0x%p, pid %d) ",
+ kdb_printf("\nEntering kdb (0x%px, pid %d) ",
kdb_current, kdb_current->pid);
#if defined(CONFIG_SMP)
kdb_printf("on processor %d ", raw_smp_processor_id());
@@ -1493,6 +1493,7 @@ static void kdb_md_line(const char *fmtstr, unsigned long addr,
char cbuf[32];
char *c = cbuf;
int i;
+ int j;
unsigned long word;
memset(cbuf, '\0', sizeof(cbuf));
@@ -1538,25 +1539,9 @@ static void kdb_md_line(const char *fmtstr, unsigned long addr,
wc.word = word;
#define printable_char(c) \
({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; })
- switch (bytesperword) {
- case 8:
+ for (j = 0; j < bytesperword; j++)
*c++ = printable_char(*cp++);
- *c++ = printable_char(*cp++);
- *c++ = printable_char(*cp++);
- *c++ = printable_char(*cp++);
- addr += 4;
- case 4:
- *c++ = printable_char(*cp++);
- *c++ = printable_char(*cp++);
- addr += 2;
- case 2:
- *c++ = printable_char(*cp++);
- addr++;
- case 1:
- *c++ = printable_char(*cp++);
- addr++;
- break;
- }
+ addr += bytesperword;
#undef printable_char
}
}
@@ -2048,7 +2033,7 @@ static int kdb_lsmod(int argc, const char **argv)
if (mod->state == MODULE_STATE_UNFORMED)
continue;
- kdb_printf("%-20s%8u 0x%p ", mod->name,
+ kdb_printf("%-20s%8u 0x%px ", mod->name,
mod->core_layout.size, (void *)mod);
#ifdef CONFIG_MODULE_UNLOAD
kdb_printf("%4d ", module_refcount(mod));
@@ -2059,7 +2044,7 @@ static int kdb_lsmod(int argc, const char **argv)
kdb_printf(" (Loading)");
else
kdb_printf(" (Live)");
- kdb_printf(" 0x%p", mod->core_layout.base);
+ kdb_printf(" 0x%px", mod->core_layout.base);
#ifdef CONFIG_MODULE_UNLOAD
{
@@ -2341,7 +2326,7 @@ void kdb_ps1(const struct task_struct *p)
return;
cpu = kdb_process_cpu(p);
- kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n",
+ kdb_printf("0x%px %8d %8d %d %4d %c 0x%px %c%s\n",
(void *)p, p->pid, p->parent->pid,
kdb_task_has_cpu(p), kdb_process_cpu(p),
kdb_task_state_char(p),
@@ -2354,7 +2339,7 @@ void kdb_ps1(const struct task_struct *p)
} else {
if (KDB_TSK(cpu) != p)
kdb_printf(" Error: does not match running "
- "process table (0x%p)\n", KDB_TSK(cpu));
+ "process table (0x%px)\n", KDB_TSK(cpu));
}
}
}
@@ -2687,7 +2672,7 @@ int kdb_register_flags(char *cmd,
for_each_kdbcmd(kp, i) {
if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
kdb_printf("Duplicate kdb command registered: "
- "%s, func %p help %s\n", cmd, func, help);
+ "%s, func %px help %s\n", cmd, func, help);
return 1;
}
}
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 1e5a502ba4a7..2118d8258b7c 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -83,7 +83,7 @@ typedef struct __ksymtab {
unsigned long sym_start;
unsigned long sym_end;
} kdb_symtab_t;
-extern int kallsyms_symbol_next(char *prefix_name, int flag);
+extern int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size);
extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
/* Exported Symbols for kernel loadable modules to use. */
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 990b3cc526c8..50bf9b119bad 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -40,7 +40,7 @@
int kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
{
if (KDB_DEBUG(AR))
- kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname,
+ kdb_printf("kdbgetsymval: symname=%s, symtab=%px\n", symname,
symtab);
memset(symtab, 0, sizeof(*symtab));
symtab->sym_start = kallsyms_lookup_name(symname);
@@ -88,7 +88,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
char *knt1 = NULL;
if (KDB_DEBUG(AR))
- kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab);
+ kdb_printf("kdbnearsym: addr=0x%lx, symtab=%px\n", addr, symtab);
memset(symtab, 0, sizeof(*symtab));
if (addr < 4096)
@@ -149,7 +149,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
symtab->mod_name = "kernel";
if (KDB_DEBUG(AR))
kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, "
- "symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret,
+ "symtab->mod_name=%px, symtab->sym_name=%px (%s)\n", ret,
symtab->sym_start, symtab->mod_name, symtab->sym_name,
symtab->sym_name);
@@ -221,11 +221,13 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len)
* Parameters:
* prefix_name prefix of a symbol name to lookup
* flag 0 means search from the head, 1 means continue search.
+ * buf_size maximum length that can be written to prefix_name
+ * buffer
* Returns:
* 1 if a symbol matches the given prefix.
* 0 if no string found
*/
-int kallsyms_symbol_next(char *prefix_name, int flag)
+int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size)
{
int prefix_len = strlen(prefix_name);
static loff_t pos;
@@ -235,10 +237,8 @@ int kallsyms_symbol_next(char *prefix_name, int flag)
pos = 0;
while ((name = kdb_walk_kallsyms(&pos))) {
- if (strncmp(name, prefix_name, prefix_len) == 0) {
- strncpy(prefix_name, name, strlen(name)+1);
- return 1;
- }
+ if (!strncmp(name, prefix_name, prefix_len))
+ return strscpy(prefix_name, name, buf_size);
}
return 0;
}
@@ -432,7 +432,7 @@ int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size)
*word = w8;
break;
}
- /* drop through */
+ /* fall through */
default:
diag = KDB_BADWIDTH;
kdb_printf("kdb_getphysword: bad width %ld\n", (long) size);
@@ -481,7 +481,7 @@ int kdb_getword(unsigned long *word, unsigned long addr, size_t size)
*word = w8;
break;
}
- /* drop through */
+ /* fall through */
default:
diag = KDB_BADWIDTH;
kdb_printf("kdb_getword: bad width %ld\n", (long) size);
@@ -525,7 +525,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size)
diag = kdb_putarea(addr, w8);
break;
}
- /* drop through */
+ /* fall through */
default:
diag = KDB_BADWIDTH;
kdb_printf("kdb_putword: bad width %ld\n", (long) size);
@@ -887,13 +887,13 @@ void debug_kusage(void)
__func__, dah_first);
if (dah_first) {
h_used = (struct debug_alloc_header *)debug_alloc_pool;
- kdb_printf("%s: h_used %p size %d\n", __func__, h_used,
+ kdb_printf("%s: h_used %px size %d\n", __func__, h_used,
h_used->size);
}
do {
h_used = (struct debug_alloc_header *)
((char *)h_free + dah_overhead + h_free->size);
- kdb_printf("%s: h_used %p size %d caller %p\n",
+ kdb_printf("%s: h_used %px size %d caller %px\n",
__func__, h_used, h_used->size, h_used->caller);
h_free = (struct debug_alloc_header *)
(debug_alloc_pool + h_free->next);
@@ -902,7 +902,7 @@ void debug_kusage(void)
((char *)h_free + dah_overhead + h_free->size);
if ((char *)h_used - debug_alloc_pool !=
sizeof(debug_alloc_pool_aligned))
- kdb_printf("%s: h_used %p size %d caller %p\n",
+ kdb_printf("%s: h_used %px size %d caller %px\n",
__func__, h_used, h_used->size, h_used->caller);
out:
spin_unlock(&dap_lock);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 5731daa09a32..045930e32c0e 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -679,7 +679,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
}
if (!dev_is_dma_coherent(dev) &&
- (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0 &&
+ dev_addr != DIRECT_MAPPING_ERROR)
arch_sync_dma_for_device(dev, phys, size, dir);
return dev_addr;
diff --git a/kernel/resource.c b/kernel/resource.c
index b3a3a1fc499e..b0fbf685c77a 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -319,16 +319,23 @@ int release_resource(struct resource *old)
EXPORT_SYMBOL(release_resource);
/**
- * Finds the lowest iomem resource that covers part of [start..end]. The
- * caller must specify start, end, flags, and desc (which may be
+ * Finds the lowest iomem resource that covers part of [@start..@end]. The
+ * caller must specify @start, @end, @flags, and @desc (which may be
* IORES_DESC_NONE).
*
- * If a resource is found, returns 0 and *res is overwritten with the part
- * of the resource that's within [start..end]; if none is found, returns
- * -1.
+ * If a resource is found, returns 0 and @*res is overwritten with the part
+ * of the resource that's within [@start..@end]; if none is found, returns
+ * -1 or -EINVAL for other invalid parameters.
*
* This function walks the whole tree and not just first level children
* unless @first_lvl is true.
+ *
+ * @start: start address of the resource searched for
+ * @end: end address of same resource
+ * @flags: flags which the resource must have
+ * @desc: descriptor the resource must have
+ * @first_lvl: walk only the first level children, if set
+ * @res: return ptr, if resource found
*/
static int find_next_iomem_res(resource_size_t start, resource_size_t end,
unsigned long flags, unsigned long desc,
@@ -399,6 +406,8 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
* @flags: I/O resource flags
* @start: start addr
* @end: end addr
+ * @arg: function argument for the callback @func
+ * @func: callback function that is called for each qualifying resource area
*
* NOTE: For a new descriptor search, define a new IORES_DESC in
* <linux/ioport.h> and set it in 'desc' of a target resource entry.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f12225f26b70..091e089063be 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5851,11 +5851,14 @@ void __init sched_init_smp(void)
/*
* There's no userspace yet to cause hotplug operations; hence all the
* CPU masks are stable and all blatant races in the below code cannot
- * happen.
+ * happen. The hotplug lock is nevertheless taken to satisfy lockdep,
+ * but there won't be any contention on it.
*/
+ cpus_read_lock();
mutex_lock(&sched_domains_mutex);
sched_init_domains(cpu_active_mask);
mutex_unlock(&sched_domains_mutex);
+ cpus_read_unlock();
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ee271bb661cc..ac855b2f4774 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2400,8 +2400,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
local = 1;
/*
- * Retry task to preferred node migration periodically, in case it
- * case it previously failed, or the scheduler moved us.
+ * Retry to migrate task to preferred node periodically, in case it
+ * previously failed, or the scheduler moved us.
*/
if (time_after(jiffies, p->numa_migrate_retry)) {
task_numa_placement(p);
@@ -5674,11 +5674,11 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
return target;
}
-static unsigned long cpu_util_wake(int cpu, struct task_struct *p);
+static unsigned long cpu_util_without(int cpu, struct task_struct *p);
-static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
+static unsigned long capacity_spare_without(int cpu, struct task_struct *p)
{
- return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0);
+ return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0);
}
/*
@@ -5738,7 +5738,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
- spare_cap = capacity_spare_wake(i, p);
+ spare_cap = capacity_spare_without(i, p);
if (spare_cap > max_spare_cap)
max_spare_cap = spare_cap;
@@ -5889,8 +5889,8 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
return prev_cpu;
/*
- * We need task's util for capacity_spare_wake, sync it up to prev_cpu's
- * last_update_time.
+ * We need task's util for capacity_spare_without, sync it up to
+ * prev_cpu's last_update_time.
*/
if (!(sd_flag & SD_BALANCE_FORK))
sync_entity_load_avg(&p->se);
@@ -6216,10 +6216,19 @@ static inline unsigned long cpu_util(int cpu)
}
/*
- * cpu_util_wake: Compute CPU utilization with any contributions from
- * the waking task p removed.
+ * cpu_util_without: compute cpu utilization without any contributions from *p
+ * @cpu: the CPU which utilization is requested
+ * @p: the task which utilization should be discounted
+ *
+ * The utilization of a CPU is defined by the utilization of tasks currently
+ * enqueued on that CPU as well as tasks which are currently sleeping after an
+ * execution on that CPU.
+ *
+ * This method returns the utilization of the specified CPU by discounting the
+ * utilization of the specified task, whenever the task is currently
+ * contributing to the CPU utilization.
*/
-static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
+static unsigned long cpu_util_without(int cpu, struct task_struct *p)
{
struct cfs_rq *cfs_rq;
unsigned int util;
@@ -6231,7 +6240,7 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
cfs_rq = &cpu_rq(cpu)->cfs;
util = READ_ONCE(cfs_rq->avg.util_avg);
- /* Discount task's blocked util from CPU's util */
+ /* Discount task's util from CPU's util */
util -= min_t(unsigned int, util, task_util(p));
/*
@@ -6240,14 +6249,14 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
* a) if *p is the only task sleeping on this CPU, then:
* cpu_util (== task_util) > util_est (== 0)
* and thus we return:
- * cpu_util_wake = (cpu_util - task_util) = 0
+ * cpu_util_without = (cpu_util - task_util) = 0
*
* b) if other tasks are SLEEPING on this CPU, which is now exiting
* IDLE, then:
* cpu_util >= task_util
* cpu_util > util_est (== 0)
* and thus we discount *p's blocked utilization to return:
- * cpu_util_wake = (cpu_util - task_util) >= 0
+ * cpu_util_without = (cpu_util - task_util) >= 0
*
* c) if other tasks are RUNNABLE on that CPU and
* util_est > cpu_util
@@ -6260,8 +6269,33 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
* covered by the following code when estimated utilization is
* enabled.
*/
- if (sched_feat(UTIL_EST))
- util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
+ if (sched_feat(UTIL_EST)) {
+ unsigned int estimated =
+ READ_ONCE(cfs_rq->avg.util_est.enqueued);
+
+ /*
+ * Despite the following checks we still have a small window
+ * for a possible race, when an execl's select_task_rq_fair()
+ * races with LB's detach_task():
+ *
+ * detach_task()
+ * p->on_rq = TASK_ON_RQ_MIGRATING;
+ * ---------------------------------- A
+ * deactivate_task() \
+ * dequeue_task() + RaceTime
+ * util_est_dequeue() /
+ * ---------------------------------- B
+ *
+ * The additional check on "current == p" it's required to
+ * properly fix the execl regression and it helps in further
+ * reducing the chances for the above race.
+ */
+ if (unlikely(task_on_rq_queued(p) || current == p)) {
+ estimated -= min_t(unsigned int, estimated,
+ (_task_util_est(p) | UTIL_AVG_UNCHANGED));
+ }
+ util = max(util, estimated);
+ }
/*
* Utilization (estimated) can exceed the CPU capacity, thus let's
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 7cdecfc010af..3d7355d7c3e3 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -633,38 +633,39 @@ void psi_cgroup_free(struct cgroup *cgroup)
*/
void cgroup_move_task(struct task_struct *task, struct css_set *to)
{
- bool move_psi = !psi_disabled;
unsigned int task_flags = 0;
struct rq_flags rf;
struct rq *rq;
- if (move_psi) {
- rq = task_rq_lock(task, &rf);
+ if (psi_disabled) {
+ /*
+ * Lame to do this here, but the scheduler cannot be locked
+ * from the outside, so we move cgroups from inside sched/.
+ */
+ rcu_assign_pointer(task->cgroups, to);
+ return;
+ }
- if (task_on_rq_queued(task))
- task_flags = TSK_RUNNING;
- else if (task->in_iowait)
- task_flags = TSK_IOWAIT;
+ rq = task_rq_lock(task, &rf);
- if (task->flags & PF_MEMSTALL)
- task_flags |= TSK_MEMSTALL;
+ if (task_on_rq_queued(task))
+ task_flags = TSK_RUNNING;
+ else if (task->in_iowait)
+ task_flags = TSK_IOWAIT;
- if (task_flags)
- psi_task_change(task, task_flags, 0);
- }
+ if (task->flags & PF_MEMSTALL)
+ task_flags |= TSK_MEMSTALL;
- /*
- * Lame to do this here, but the scheduler cannot be locked
- * from the outside, so we move cgroups from inside sched/.
- */
+ if (task_flags)
+ psi_task_change(task, task_flags, 0);
+
+ /* See comment above */
rcu_assign_pointer(task->cgroups, to);
- if (move_psi) {
- if (task_flags)
- psi_task_change(task, 0, task_flags);
+ if (task_flags)
+ psi_task_change(task, 0, task_flags);
- task_rq_unlock(rq, task, &rf);
- }
+ task_rq_unlock(rq, task, &rf);
}
#endif /* CONFIG_CGROUPS */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index ce32cf741b25..8f0644af40be 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -917,9 +917,6 @@ static void check_process_timers(struct task_struct *tsk,
struct task_cputime cputime;
unsigned long soft;
- if (dl_task(tsk))
- check_dl_overrun(tsk);
-
/*
* If cputimer is not running, then there are no active
* process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 3ef15a6683c0..bd30e9398d2a 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -535,7 +535,7 @@ int traceprobe_update_arg(struct probe_arg *arg)
if (code[1].op != FETCH_OP_IMM)
return -EINVAL;
- tmp = strpbrk("+-", code->data);
+ tmp = strpbrk(code->data, "+-");
if (tmp)
c = *tmp;
ret = traceprobe_split_symbol_offset(code->data,
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index e5222b5fb4fe..923414a246e9 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -974,10 +974,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
goto out;
- ret = sort_idmaps(&new_map);
- if (ret < 0)
- goto out;
-
ret = -EPERM;
/* Map the lower ids from the parent user namespace to the
* kernel global id space.
@@ -1004,6 +1000,14 @@ static ssize_t map_write(struct file *file, const char __user *buf,
e->lower_first = lower_first;
}
+ /*
+ * If we want to use binary search for lookup, this clones the extent
+ * array and sorts both copies.
+ */
+ ret = sort_idmaps(&new_map);
+ if (ret < 0)
+ goto out;
+
/* Install the map */
if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
memcpy(map->extent, new_map.extent,
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 5d73f5cb4d8a..79777645cac9 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -27,7 +27,7 @@ ifeq ($(ARCH),arm)
CFLAGS += -I../../../arch/arm/include -mfpu=neon
HAS_NEON = yes
endif
-ifeq ($(ARCH),arm64)
+ifeq ($(ARCH),aarch64)
CFLAGS += -I../../../arch/arm64/include
HAS_NEON = yes
endif
@@ -41,7 +41,7 @@ ifeq ($(IS_X86),yes)
gcc -c -x assembler - >&/dev/null && \
rm ./-.o && echo -DCONFIG_AS_AVX512=1)
else ifeq ($(HAS_NEON),yes)
- OBJS += neon.o neon1.o neon2.o neon4.o neon8.o
+ OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
else
HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index b984806d7d7b..7cab9a9869ac 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -837,6 +837,7 @@ static ssize_t read_firmware_show(struct device *dev,
if (req->fw->size > PAGE_SIZE) {
pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
rc = -EINVAL;
+ goto out;
}
memcpy(buf, req->fw->data, req->fw->size);
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index aa47754150ce..0598e86af8fc 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -208,15 +208,19 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
/* We should see two elements in the array */
+ rcu_read_lock();
xas_for_each(&xas, entry, ULONG_MAX)
seen++;
+ rcu_read_unlock();
XA_BUG_ON(xa, seen != 2);
/* One of which is marked */
xas_set(&xas, 0);
seen = 0;
+ rcu_read_lock();
xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
seen++;
+ rcu_read_unlock();
XA_BUG_ON(xa, seen != 1);
}
XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
@@ -373,6 +377,12 @@ static noinline void check_reserve(struct xarray *xa)
xa_erase_index(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
+ /* And so does xa_insert */
+ xa_reserve(xa, 12345678, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0);
+ xa_erase_index(xa, 12345678);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
/* Can iterate through a reserved entry */
xa_store_index(xa, 5, GFP_KERNEL);
xa_reserve(xa, 6, GFP_KERNEL);
@@ -436,7 +446,9 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
XA_BUG_ON(xa, xa_load(xa, max) != NULL);
XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
+ xas_lock(&xas);
XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index));
+ xas_unlock(&xas);
XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min));
XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min));
XA_BUG_ON(xa, xa_load(xa, max) != NULL);
@@ -452,9 +464,11 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
XA_STATE(xas, xa, index);
xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
+ xas_lock(&xas);
XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
XA_BUG_ON(xa, xas.xa_index != index);
XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
+ xas_unlock(&xas);
XA_BUG_ON(xa, !xa_empty(xa));
}
#endif
@@ -498,7 +512,7 @@ static noinline void check_multi_store(struct xarray *xa)
rcu_read_unlock();
/* We can erase multiple values with a single store */
- xa_store_order(xa, 0, 63, NULL, GFP_KERNEL);
+ xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
XA_BUG_ON(xa, !xa_empty(xa));
/* Even when the first slot is empty but the others aren't */
@@ -702,7 +716,7 @@ static noinline void check_multi_find_2(struct xarray *xa)
}
}
-static noinline void check_find(struct xarray *xa)
+static noinline void check_find_1(struct xarray *xa)
{
unsigned long i, j, k;
@@ -748,6 +762,34 @@ static noinline void check_find(struct xarray *xa)
XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
}
XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_find_2(struct xarray *xa)
+{
+ void *entry;
+ unsigned long i, j, index = 0;
+
+ xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ XA_BUG_ON(xa, true);
+ }
+
+ for (i = 0; i < 1024; i++) {
+ xa_store_index(xa, index, GFP_KERNEL);
+ j = 0;
+ index = 0;
+ xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ XA_BUG_ON(xa, xa_mk_value(index) != entry);
+ XA_BUG_ON(xa, index != j++);
+ }
+ }
+
+ xa_destroy(xa);
+}
+
+static noinline void check_find(struct xarray *xa)
+{
+ check_find_1(xa);
+ check_find_2(xa);
check_multi_find(xa);
check_multi_find_2(xa);
}
@@ -1067,7 +1109,7 @@ static noinline void check_store_range(struct xarray *xa)
__check_store_range(xa, 4095 + i, 4095 + j);
__check_store_range(xa, 4096 + i, 4096 + j);
__check_store_range(xa, 123456 + i, 123456 + j);
- __check_store_range(xa, UINT_MAX + i, UINT_MAX + j);
+ __check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
}
}
}
@@ -1146,10 +1188,12 @@ static noinline void check_account(struct xarray *xa)
XA_STATE(xas, xa, 1 << order);
xa_store_order(xa, 0, order, xa, GFP_KERNEL);
+ rcu_read_lock();
xas_load(&xas);
XA_BUG_ON(xa, xas.xa_node->count == 0);
XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
+ rcu_read_unlock();
xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order),
GFP_KERNEL);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 59fee96c29a0..e4162f59a81c 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -427,8 +427,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
-void __noreturn
-__ubsan_handle_builtin_unreachable(struct unreachable_data *data)
+void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
{
unsigned long flags;
diff --git a/lib/xarray.c b/lib/xarray.c
index 8b176f009c08..bbacca576593 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -610,8 +610,8 @@ static int xas_expand(struct xa_state *xas, void *head)
* (see the xa_cmpxchg() implementation for an example).
*
* Return: If the slot already existed, returns the contents of this slot.
- * If the slot was newly created, returns NULL. If it failed to create the
- * slot, returns NULL and indicates the error in @xas.
+ * If the slot was newly created, returns %NULL. If it failed to create the
+ * slot, returns %NULL and indicates the error in @xas.
*/
static void *xas_create(struct xa_state *xas)
{
@@ -1334,44 +1334,31 @@ void *__xa_erase(struct xarray *xa, unsigned long index)
XA_STATE(xas, xa, index);
return xas_result(&xas, xas_store(&xas, NULL));
}
-EXPORT_SYMBOL_GPL(__xa_erase);
+EXPORT_SYMBOL(__xa_erase);
/**
- * xa_store() - Store this entry in the XArray.
+ * xa_erase() - Erase this entry from the XArray.
* @xa: XArray.
- * @index: Index into array.
- * @entry: New entry.
- * @gfp: Memory allocation flags.
+ * @index: Index of entry.
*
- * After this function returns, loads from this index will return @entry.
- * Storing into an existing multislot entry updates the entry of every index.
- * The marks associated with @index are unaffected unless @entry is %NULL.
+ * This function is the equivalent of calling xa_store() with %NULL as
+ * the third argument. The XArray does not need to allocate memory, so
+ * the user does not need to provide GFP flags.
*
- * Context: Process context. Takes and releases the xa_lock. May sleep
- * if the @gfp flags permit.
- * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
- * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
- * failed.
+ * Context: Any context. Takes and releases the xa_lock.
+ * Return: The entry which used to be at this index.
*/
-void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
+void *xa_erase(struct xarray *xa, unsigned long index)
{
- XA_STATE(xas, xa, index);
- void *curr;
-
- if (WARN_ON_ONCE(xa_is_internal(entry)))
- return XA_ERROR(-EINVAL);
+ void *entry;
- do {
- xas_lock(&xas);
- curr = xas_store(&xas, entry);
- if (xa_track_free(xa) && entry)
- xas_clear_mark(&xas, XA_FREE_MARK);
- xas_unlock(&xas);
- } while (xas_nomem(&xas, gfp));
+ xa_lock(xa);
+ entry = __xa_erase(xa, index);
+ xa_unlock(xa);
- return xas_result(&xas, curr);
+ return entry;
}
-EXPORT_SYMBOL(xa_store);
+EXPORT_SYMBOL(xa_erase);
/**
* __xa_store() - Store this entry in the XArray.
@@ -1395,10 +1382,12 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
if (WARN_ON_ONCE(xa_is_internal(entry)))
return XA_ERROR(-EINVAL);
+ if (xa_track_free(xa) && !entry)
+ entry = XA_ZERO_ENTRY;
do {
curr = xas_store(&xas, entry);
- if (xa_track_free(xa) && entry)
+ if (xa_track_free(xa))
xas_clear_mark(&xas, XA_FREE_MARK);
} while (__xas_nomem(&xas, gfp));
@@ -1407,45 +1396,33 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
EXPORT_SYMBOL(__xa_store);
/**
- * xa_cmpxchg() - Conditionally replace an entry in the XArray.
+ * xa_store() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
- * @old: Old value to test against.
- * @entry: New value to place in array.
+ * @entry: New entry.
* @gfp: Memory allocation flags.
*
- * If the entry at @index is the same as @old, replace it with @entry.
- * If the return value is equal to @old, then the exchange was successful.
+ * After this function returns, loads from this index will return @entry.
+ * Storing into an existing multislot entry updates the entry of every index.
+ * The marks associated with @index are unaffected unless @entry is %NULL.
*
- * Context: Process context. Takes and releases the xa_lock. May sleep
- * if the @gfp flags permit.
- * Return: The old value at this index or xa_err() if an error happened.
+ * Context: Any context. Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
+ * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
+ * failed.
*/
-void *xa_cmpxchg(struct xarray *xa, unsigned long index,
- void *old, void *entry, gfp_t gfp)
+void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
- XA_STATE(xas, xa, index);
void *curr;
- if (WARN_ON_ONCE(xa_is_internal(entry)))
- return XA_ERROR(-EINVAL);
-
- do {
- xas_lock(&xas);
- curr = xas_load(&xas);
- if (curr == XA_ZERO_ENTRY)
- curr = NULL;
- if (curr == old) {
- xas_store(&xas, entry);
- if (xa_track_free(xa) && entry)
- xas_clear_mark(&xas, XA_FREE_MARK);
- }
- xas_unlock(&xas);
- } while (xas_nomem(&xas, gfp));
+ xa_lock(xa);
+ curr = __xa_store(xa, index, entry, gfp);
+ xa_unlock(xa);
- return xas_result(&xas, curr);
+ return curr;
}
-EXPORT_SYMBOL(xa_cmpxchg);
+EXPORT_SYMBOL(xa_store);
/**
* __xa_cmpxchg() - Store this entry in the XArray.
@@ -1471,6 +1448,8 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
if (WARN_ON_ONCE(xa_is_internal(entry)))
return XA_ERROR(-EINVAL);
+ if (xa_track_free(xa) && !entry)
+ entry = XA_ZERO_ENTRY;
do {
curr = xas_load(&xas);
@@ -1478,7 +1457,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
curr = NULL;
if (curr == old) {
xas_store(&xas, entry);
- if (xa_track_free(xa) && entry)
+ if (xa_track_free(xa))
xas_clear_mark(&xas, XA_FREE_MARK);
}
} while (__xas_nomem(&xas, gfp));
@@ -1488,7 +1467,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
EXPORT_SYMBOL(__xa_cmpxchg);
/**
- * xa_reserve() - Reserve this index in the XArray.
+ * __xa_reserve() - Reserve this index in the XArray.
* @xa: XArray.
* @index: Index into array.
* @gfp: Memory allocation flags.
@@ -1496,33 +1475,32 @@ EXPORT_SYMBOL(__xa_cmpxchg);
* Ensures there is somewhere to store an entry at @index in the array.
* If there is already something stored at @index, this function does
* nothing. If there was nothing there, the entry is marked as reserved.
- * Loads from @index will continue to see a %NULL pointer until a
- * subsequent store to @index.
+ * Loading from a reserved entry returns a %NULL pointer.
*
* If you do not use the entry that you have reserved, call xa_release()
* or xa_erase() to free any unnecessary memory.
*
- * Context: Process context. Takes and releases the xa_lock, IRQ or BH safe
- * if specified in XArray flags. May sleep if the @gfp flags permit.
+ * Context: Any context. Expects the xa_lock to be held on entry. May
+ * release the lock, sleep and reacquire the lock if the @gfp flags permit.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
-int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
+int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
{
XA_STATE(xas, xa, index);
- unsigned int lock_type = xa_lock_type(xa);
void *curr;
do {
- xas_lock_type(&xas, lock_type);
curr = xas_load(&xas);
- if (!curr)
+ if (!curr) {
xas_store(&xas, XA_ZERO_ENTRY);
- xas_unlock_type(&xas, lock_type);
- } while (xas_nomem(&xas, gfp));
+ if (xa_track_free(xa))
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ }
+ } while (__xas_nomem(&xas, gfp));
return xas_error(&xas);
}
-EXPORT_SYMBOL(xa_reserve);
+EXPORT_SYMBOL(__xa_reserve);
#ifdef CONFIG_XARRAY_MULTI
static void xas_set_range(struct xa_state *xas, unsigned long first,
@@ -1587,8 +1565,9 @@ void *xa_store_range(struct xarray *xa, unsigned long first,
do {
xas_lock(&xas);
if (entry) {
- unsigned int order = (last == ~0UL) ? 64 :
- ilog2(last + 1);
+ unsigned int order = BITS_PER_LONG;
+ if (last + 1)
+ order = __ffs(last + 1);
xas_set_order(&xas, last, order);
xas_create(&xas);
if (xas_error(&xas))
@@ -1662,7 +1641,7 @@ EXPORT_SYMBOL(__xa_alloc);
* @index: Index of entry.
* @mark: Mark number.
*
- * Attempting to set a mark on a NULL entry does not succeed.
+ * Attempting to set a mark on a %NULL entry does not succeed.
*
* Context: Any context. Expects xa_lock to be held on entry.
*/
@@ -1674,7 +1653,7 @@ void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
if (entry)
xas_set_mark(&xas, mark);
}
-EXPORT_SYMBOL_GPL(__xa_set_mark);
+EXPORT_SYMBOL(__xa_set_mark);
/**
* __xa_clear_mark() - Clear this mark on this entry while locked.
@@ -1692,7 +1671,7 @@ void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
if (entry)
xas_clear_mark(&xas, mark);
}
-EXPORT_SYMBOL_GPL(__xa_clear_mark);
+EXPORT_SYMBOL(__xa_clear_mark);
/**
* xa_get_mark() - Inquire whether this mark is set on this entry.
@@ -1732,7 +1711,7 @@ EXPORT_SYMBOL(xa_get_mark);
* @index: Index of entry.
* @mark: Mark number.
*
- * Attempting to set a mark on a NULL entry does not succeed.
+ * Attempting to set a mark on a %NULL entry does not succeed.
*
* Context: Process context. Takes and releases the xa_lock.
*/
@@ -1829,6 +1808,8 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
entry = xas_find_marked(&xas, max, filter);
else
entry = xas_find(&xas, max);
+ if (xas.xa_node == XAS_BOUNDS)
+ break;
if (xas.xa_shift) {
if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
continue;
@@ -1899,7 +1880,7 @@ static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
*
* The @filter may be an XArray mark value, in which case entries which are
* marked with that mark will be copied. It may also be %XA_PRESENT, in
- * which case all entries which are not NULL will be copied.
+ * which case all entries which are not %NULL will be copied.
*
* The entries returned may not represent a snapshot of the XArray at a
* moment in time. For example, if another thread stores to index 5, then
diff --git a/mm/gup.c b/mm/gup.c
index f76e77a2d34b..aa43620a3270 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -385,11 +385,17 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
* @vma: vm_area_struct mapping @address
* @address: virtual address to look up
* @flags: flags modifying lookup behaviour
- * @page_mask: on output, *page_mask is set according to the size of the page
+ * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
+ * pointer to output page_mask
*
* @flags can have FOLL_ flags set, defined in <linux/mm.h>
*
- * Returns the mapped (struct page *), %NULL if no mapping exists, or
+ * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
+ * the device's dev_pagemap metadata to avoid repeating expensive lookups.
+ *
+ * On output, the @ctx->page_mask is set according to the size of the page.
+ *
+ * Return: the mapped (struct page *), %NULL if no mapping exists, or
* an error pointer if there is a mapping to something not represented
* by a page descriptor (see also vm_normal_page()).
*/
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c007fb5fb8d5..7f2a28ab46d5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3233,7 +3233,7 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
- pte_t *src_pte, *dst_pte, entry;
+ pte_t *src_pte, *dst_pte, entry, dst_entry;
struct page *ptepage;
unsigned long addr;
int cow;
@@ -3261,15 +3261,30 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
break;
}
- /* If the pagetables are shared don't copy or take references */
- if (dst_pte == src_pte)
+ /*
+ * If the pagetables are shared don't copy or take references.
+ * dst_pte == src_pte is the common case of src/dest sharing.
+ *
+ * However, src could have 'unshared' and dst shares with
+ * another vma. If dst_pte !none, this implies sharing.
+ * Check here before taking page table lock, and once again
+ * after taking the lock below.
+ */
+ dst_entry = huge_ptep_get(dst_pte);
+ if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
continue;
dst_ptl = huge_pte_lock(h, dst, dst_pte);
src_ptl = huge_pte_lockptr(h, src, src_pte);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
entry = huge_ptep_get(src_pte);
- if (huge_pte_none(entry)) { /* skip none entry */
+ dst_entry = huge_ptep_get(dst_pte);
+ if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
+ /*
+ * Skip if src entry none. Also, skip in the
+ * unlikely case dst entry !none as this implies
+ * sharing with another vma.
+ */
;
} else if (unlikely(is_hugetlb_entry_migration(entry) ||
is_hugetlb_entry_hwpoisoned(entry))) {
diff --git a/mm/memblock.c b/mm/memblock.c
index 7df468c8ebc8..9a2d5ae81ae1 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1179,7 +1179,7 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
- * Common iterator interface used to define for_each_mem_range().
+ * Common iterator interface used to define for_each_mem_pfn_range().
*/
void __init_memblock __next_mem_pfn_range(int *idx, int nid,
unsigned long *out_start_pfn,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a919ba5cb3c8..6847177dc4a1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4061,17 +4061,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
int reserve_flags;
/*
- * In the slowpath, we sanity check order to avoid ever trying to
- * reclaim >= MAX_ORDER areas which will never succeed. Callers may
- * be using allocators in order of preference for an area that is
- * too large.
- */
- if (order >= MAX_ORDER) {
- WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
- return NULL;
- }
-
- /*
* We also sanity check to catch abuse of atomic reserves being used by
* callers that are not in atomic context.
*/
@@ -4364,6 +4353,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = { };
+ /*
+ * There are several places where we assume that the order value is sane
+ * so bail out early if the request is out of bound.
+ */
+ if (unlikely(order >= MAX_ORDER)) {
+ WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
+ return NULL;
+ }
+
gfp_mask &= gfp_allowed_mask;
alloc_mask = gfp_mask;
if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
@@ -7789,6 +7787,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
goto unmovable;
/*
+ * If the zone is movable and we have ruled out all reserved
+ * pages then it should be reasonably safe to assume the rest
+ * is movable.
+ */
+ if (zone_idx(zone) == ZONE_MOVABLE)
+ continue;
+
+ /*
* Hugepages are not in LRU lists, but they're movable.
* We need not scan over tail pages bacause we don't
* handle each tail page individually in migration.
diff --git a/mm/shmem.c b/mm/shmem.c
index ea26d7a0342d..0e10b06fc7d6 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -756,7 +756,7 @@ void shmem_unlock_mapping(struct address_space *mapping)
break;
index = indices[pvec.nr - 1] + 1;
pagevec_remove_exceptionals(&pvec);
- check_move_unevictable_pages(pvec.pages, pvec.nr);
+ check_move_unevictable_pages(&pvec);
pagevec_release(&pvec);
cond_resched();
}
@@ -2563,9 +2563,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
inode_lock(inode);
/* We're holding i_mutex so we can access i_size directly */
- if (offset < 0)
- offset = -EINVAL;
- else if (offset >= inode->i_size)
+ if (offset < 0 || offset >= inode->i_size)
offset = -ENXIO;
else {
start = offset >> PAGE_SHIFT;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 644f746e167a..8688ae65ef58 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2813,7 +2813,7 @@ static struct swap_info_struct *alloc_swap_info(void)
unsigned int type;
int i;
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p = kvzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -2824,7 +2824,7 @@ static struct swap_info_struct *alloc_swap_info(void)
}
if (type >= MAX_SWAPFILES) {
spin_unlock(&swap_lock);
- kfree(p);
+ kvfree(p);
return ERR_PTR(-EPERM);
}
if (type >= nr_swapfiles) {
@@ -2838,7 +2838,7 @@ static struct swap_info_struct *alloc_swap_info(void)
smp_wmb();
nr_swapfiles++;
} else {
- kfree(p);
+ kvfree(p);
p = swap_info[type];
/*
* Do not memset this entry: a racing procfs swap_next()
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 62ac0c488624..24ab1f7394ab 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -46,6 +46,7 @@
#include <linux/delayacct.h>
#include <linux/sysctl.h>
#include <linux/oom.h>
+#include <linux/pagevec.h>
#include <linux/prefetch.h>
#include <linux/printk.h>
#include <linux/dax.h>
@@ -4182,17 +4183,16 @@ int page_evictable(struct page *page)
return ret;
}
-#ifdef CONFIG_SHMEM
/**
- * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
- * @pages: array of pages to check
- * @nr_pages: number of pages to check
+ * check_move_unevictable_pages - check pages for evictability and move to
+ * appropriate zone lru list
+ * @pvec: pagevec with lru pages to check
*
- * Checks pages for evictability and moves them to the appropriate lru list.
- *
- * This function is only used for SysV IPC SHM_UNLOCK.
+ * Checks pages for evictability, if an evictable page is in the unevictable
+ * lru list, moves it to the appropriate evictable lru list. This function
+ * should be only used for lru pages.
*/
-void check_move_unevictable_pages(struct page **pages, int nr_pages)
+void check_move_unevictable_pages(struct pagevec *pvec)
{
struct lruvec *lruvec;
struct pglist_data *pgdat = NULL;
@@ -4200,8 +4200,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
int pgrescued = 0;
int i;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pages[i];
+ for (i = 0; i < pvec->nr; i++) {
+ struct page *page = pvec->pages[i];
struct pglist_data *pagepgdat = page_pgdat(page);
pgscanned++;
@@ -4233,4 +4233,4 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
spin_unlock_irq(&pgdat->lru_lock);
}
}
-#endif /* CONFIG_SHMEM */
+EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6038ce593ce3..9c624595e904 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1827,12 +1827,13 @@ static bool need_update(int cpu)
/*
* The fast way of checking if there are any vmstat diffs.
- * This works because the diffs are byte sized items.
*/
- if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
+ if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
+ sizeof(p->vm_stat_diff[0])))
return true;
#ifdef CONFIG_NUMA
- if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS))
+ if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
+ sizeof(p->vm_numa_stat_diff[0])))
return true;
#endif
}
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 4b366d181f35..aee9b0b8d907 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -99,6 +99,7 @@ struct z3fold_header {
#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
#define BUDDY_MASK (0x3)
+#define BUDDY_SHIFT 2
/**
* struct z3fold_pool - stores metadata for each z3fold pool
@@ -145,7 +146,7 @@ enum z3fold_page_flags {
MIDDLE_CHUNK_MAPPED,
NEEDS_COMPACTING,
PAGE_STALE,
- UNDER_RECLAIM
+ PAGE_CLAIMED, /* by either reclaim or free */
};
/*****************
@@ -174,7 +175,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
clear_bit(NEEDS_COMPACTING, &page->private);
clear_bit(PAGE_STALE, &page->private);
- clear_bit(UNDER_RECLAIM, &page->private);
+ clear_bit(PAGE_CLAIMED, &page->private);
spin_lock_init(&zhdr->page_lock);
kref_init(&zhdr->refcount);
@@ -223,8 +224,11 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
unsigned long handle;
handle = (unsigned long)zhdr;
- if (bud != HEADLESS)
- handle += (bud + zhdr->first_num) & BUDDY_MASK;
+ if (bud != HEADLESS) {
+ handle |= (bud + zhdr->first_num) & BUDDY_MASK;
+ if (bud == LAST)
+ handle |= (zhdr->last_chunks << BUDDY_SHIFT);
+ }
return handle;
}
@@ -234,6 +238,12 @@ static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
return (struct z3fold_header *)(handle & PAGE_MASK);
}
+/* only for LAST bud, returns zero otherwise */
+static unsigned short handle_to_chunks(unsigned long handle)
+{
+ return (handle & ~PAGE_MASK) >> BUDDY_SHIFT;
+}
+
/*
* (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
* but that doesn't matter. because the masking will result in the
@@ -720,37 +730,39 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
page = virt_to_page(zhdr);
if (test_bit(PAGE_HEADLESS, &page->private)) {
- /* HEADLESS page stored */
- bud = HEADLESS;
- } else {
- z3fold_page_lock(zhdr);
- bud = handle_to_buddy(handle);
-
- switch (bud) {
- case FIRST:
- zhdr->first_chunks = 0;
- break;
- case MIDDLE:
- zhdr->middle_chunks = 0;
- zhdr->start_middle = 0;
- break;
- case LAST:
- zhdr->last_chunks = 0;
- break;
- default:
- pr_err("%s: unknown bud %d\n", __func__, bud);
- WARN_ON(1);
- z3fold_page_unlock(zhdr);
- return;
+ /* if a headless page is under reclaim, just leave.
+ * NB: we use test_and_set_bit for a reason: if the bit
+ * has not been set before, we release this page
+ * immediately so we don't care about its value any more.
+ */
+ if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
+ spin_lock(&pool->lock);
+ list_del(&page->lru);
+ spin_unlock(&pool->lock);
+ free_z3fold_page(page);
+ atomic64_dec(&pool->pages_nr);
}
+ return;
}
- if (bud == HEADLESS) {
- spin_lock(&pool->lock);
- list_del(&page->lru);
- spin_unlock(&pool->lock);
- free_z3fold_page(page);
- atomic64_dec(&pool->pages_nr);
+ /* Non-headless case */
+ z3fold_page_lock(zhdr);
+ bud = handle_to_buddy(handle);
+
+ switch (bud) {
+ case FIRST:
+ zhdr->first_chunks = 0;
+ break;
+ case MIDDLE:
+ zhdr->middle_chunks = 0;
+ break;
+ case LAST:
+ zhdr->last_chunks = 0;
+ break;
+ default:
+ pr_err("%s: unknown bud %d\n", __func__, bud);
+ WARN_ON(1);
+ z3fold_page_unlock(zhdr);
return;
}
@@ -758,7 +770,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
atomic64_dec(&pool->pages_nr);
return;
}
- if (test_bit(UNDER_RECLAIM, &page->private)) {
+ if (test_bit(PAGE_CLAIMED, &page->private)) {
z3fold_page_unlock(zhdr);
return;
}
@@ -836,20 +848,30 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
}
list_for_each_prev(pos, &pool->lru) {
page = list_entry(pos, struct page, lru);
+
+ /* this bit could have been set by free, in which case
+ * we pass over to the next page in the pool.
+ */
+ if (test_and_set_bit(PAGE_CLAIMED, &page->private))
+ continue;
+
+ zhdr = page_address(page);
if (test_bit(PAGE_HEADLESS, &page->private))
- /* candidate found */
break;
- zhdr = page_address(page);
- if (!z3fold_page_trylock(zhdr))
+ if (!z3fold_page_trylock(zhdr)) {
+ zhdr = NULL;
continue; /* can't evict at this point */
+ }
kref_get(&zhdr->refcount);
list_del_init(&zhdr->buddy);
zhdr->cpu = -1;
- set_bit(UNDER_RECLAIM, &page->private);
break;
}
+ if (!zhdr)
+ break;
+
list_del_init(&page->lru);
spin_unlock(&pool->lock);
@@ -898,6 +920,7 @@ next:
if (test_bit(PAGE_HEADLESS, &page->private)) {
if (ret == 0) {
free_z3fold_page(page);
+ atomic64_dec(&pool->pages_nr);
return 0;
}
spin_lock(&pool->lock);
@@ -905,7 +928,7 @@ next:
spin_unlock(&pool->lock);
} else {
z3fold_page_lock(zhdr);
- clear_bit(UNDER_RECLAIM, &page->private);
+ clear_bit(PAGE_CLAIMED, &page->private);
if (kref_put(&zhdr->refcount,
release_z3fold_page_locked)) {
atomic64_dec(&pool->pages_nr);
@@ -964,7 +987,7 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
break;
case LAST:
- addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
+ addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
break;
default:
pr_err("unknown buddy id %d\n", buddy);
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 9f481cfdf77d..e8090f099eb8 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -352,19 +352,21 @@ out:
*/
int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
{
+ static const size_t tvlv_padding = sizeof(__be32);
struct batadv_elp_packet *elp_packet;
unsigned char *elp_buff;
u32 random_seqno;
size_t size;
int res = -ENOMEM;
- size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN;
+ size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding;
hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
if (!hard_iface->bat_v.elp_skb)
goto out;
skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
- elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
+ elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb,
+ BATADV_ELP_HLEN + tvlv_padding);
elp_packet = (struct batadv_elp_packet *)elp_buff;
elp_packet->packet_type = BATADV_ELP;
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 0fddc17106bd..5b71a289d04f 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -275,7 +275,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
kfree(entry);
packet = (struct batadv_frag_packet *)skb_out->data;
- size = ntohs(packet->total_size);
+ size = ntohs(packet->total_size) + hdr_size;
/* Make room for the rest of the fragments. */
if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2920e06a5403..04c19a37e500 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -102,12 +102,18 @@ struct br_tunnel_info {
struct metadata_dst *tunnel_dst;
};
+/* private vlan flags */
+enum {
+ BR_VLFLAG_PER_PORT_STATS = BIT(0),
+};
+
/**
* struct net_bridge_vlan - per-vlan entry
*
* @vnode: rhashtable member
* @vid: VLAN id
* @flags: bridge vlan flags
+ * @priv_flags: private (in-kernel) bridge vlan flags
* @stats: per-cpu VLAN statistics
* @br: if MASTER flag set, this points to a bridge struct
* @port: if MASTER flag unset, this points to a port struct
@@ -127,6 +133,7 @@ struct net_bridge_vlan {
struct rhash_head tnode;
u16 vid;
u16 flags;
+ u16 priv_flags;
struct br_vlan_stats __percpu *stats;
union {
struct net_bridge *br;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 8c9297a01947..e84be08b8285 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -197,7 +197,7 @@ static void nbp_vlan_rcu_free(struct rcu_head *rcu)
v = container_of(rcu, struct net_bridge_vlan, rcu);
WARN_ON(br_vlan_is_master(v));
/* if we had per-port stats configured then free them here */
- if (v->brvlan->stats != v->stats)
+ if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
free_percpu(v->stats);
v->stats = NULL;
kfree(v);
@@ -264,6 +264,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
err = -ENOMEM;
goto out_filt;
}
+ v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
} else {
v->stats = masterv->stats;
}
diff --git a/net/can/raw.c b/net/can/raw.c
index 1051eee82581..3aab7664933f 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -745,18 +745,19 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
} else
ifindex = ro->ifindex;
- if (ro->fd_frames) {
+ dev = dev_get_by_index(sock_net(sk), ifindex);
+ if (!dev)
+ return -ENXIO;
+
+ err = -EINVAL;
+ if (ro->fd_frames && dev->mtu == CANFD_MTU) {
if (unlikely(size != CANFD_MTU && size != CAN_MTU))
- return -EINVAL;
+ goto put_dev;
} else {
if (unlikely(size != CAN_MTU))
- return -EINVAL;
+ goto put_dev;
}
- dev = dev_get_by_index(sock_net(sk), ifindex);
- if (!dev)
- return -ENXIO;
-
skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 57fcc6b4bf6e..2f126eff275d 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -580,9 +580,15 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
struct bio_vec bvec;
int ret;
- /* sendpage cannot properly handle pages with page_count == 0,
- * we need to fallback to sendmsg if that's the case */
- if (page_count(page) >= 1)
+ /*
+ * sendpage cannot properly handle pages with page_count == 0,
+ * we need to fall back to sendmsg if that's the case.
+ *
+ * Same goes for slab pages: skb_can_coalesce() allows
+ * coalescing neighboring slab objects into a single frag which
+ * triggers one of hardened usercopy checks.
+ */
+ if (page_count(page) >= 1 && !PageSlab(page))
return __ceph_tcp_sendpage(sock, page, offset, size, more);
bvec.bv_page = page;
diff --git a/net/core/dev.c b/net/core/dev.c
index 77d43ae2a7bb..ddc551f24ba2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3272,7 +3272,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
}
skb = next;
- if (netif_xmit_stopped(txq) && skb) {
+ if (netif_tx_queue_stopped(txq) && skb) {
rc = NETDEV_TX_BUSY;
break;
}
@@ -5655,6 +5655,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb->vlan_tci = 0;
skb->dev = napi->dev;
skb->skb_iif = 0;
+
+ /* eth_type_trans() assumes pkt_type is PACKET_HOST */
+ skb->pkt_type = PACKET_HOST;
+
skb->encapsulation = 0;
skb_shinfo(skb)->gso_type = 0;
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
@@ -5966,11 +5970,14 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
if (work_done)
timeout = n->dev->gro_flush_timeout;
+ /* When the NAPI instance uses a timeout and keeps postponing
+ * it, we need to bound somehow the time packets are kept in
+ * the GRO layer
+ */
+ napi_gro_flush(n, !!timeout);
if (timeout)
hrtimer_start(&n->timer, ns_to_ktime(timeout),
HRTIMER_MODE_REL_PINNED);
- else
- napi_gro_flush(n, false);
}
if (unlikely(!list_empty(&n->poll_list))) {
/* If n->poll_list is not empty, we need to mask irqs */
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 676f3ad629f9..588f475019d4 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1166,8 +1166,8 @@ ip_proto_again:
break;
}
- if (dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_PORTS)) {
+ if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
+ !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
key_ports = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS,
target_container);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 5da9552b186b..2b9fdbc43205 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -717,7 +717,8 @@ int netpoll_setup(struct netpoll *np)
read_lock_bh(&idev->lock);
list_for_each_entry(ifp, &idev->addr_list, if_list) {
- if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
+ if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
+ !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
continue;
np->local_ip.in6 = ifp->addr;
err = 0;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e01274bd5e3e..33d9227a8b80 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3367,7 +3367,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
cb->seq = 0;
}
ret = dumpit(skb, cb);
- if (ret < 0)
+ if (ret)
break;
}
cb->family = idx;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 946de0e24c87..a8217e221e19 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4854,6 +4854,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
nf_reset(skb);
nf_reset_trace(skb);
+#ifdef CONFIG_NET_SWITCHDEV
+ skb->offload_fwd_mark = 0;
+ skb->offload_mr_fwd_mark = 0;
+#endif
+
if (!xnet)
return;
@@ -4944,6 +4949,8 @@ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
*
* This is a helper to do that correctly considering GSO_BY_FRAGS.
*
+ * @skb: GSO skb
+ *
* @seg_len: The segmented length (from skb_gso_*_seglen). In the
* GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
*
diff --git a/net/core/sock.c b/net/core/sock.c
index 6fcc4bc07d19..080a880a1761 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3279,6 +3279,7 @@ int sock_load_diag_module(int family, int protocol)
#ifdef CONFIG_INET
if (family == AF_INET &&
+ protocol != IPPROTO_RAW &&
!rcu_access_pointer(inet_protos[protocol]))
return -ENOENT;
#endif
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index bcb11f3a27c0..760a9e52e02b 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -178,21 +178,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
}
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
- void *arg)
+ void *arg,
+ struct inet_frag_queue **prev)
{
struct inet_frags *f = nf->f;
struct inet_frag_queue *q;
- int err;
q = inet_frag_alloc(nf, f, arg);
- if (!q)
+ if (!q) {
+ *prev = ERR_PTR(-ENOMEM);
return NULL;
-
+ }
mod_timer(&q->timer, jiffies + nf->timeout);
- err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
- f->rhash_params);
- if (err < 0) {
+ *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
+ &q->node, f->rhash_params);
+ if (*prev) {
q->flags |= INET_FRAG_COMPLETE;
inet_frag_kill(q);
inet_frag_destroy(q);
@@ -204,22 +205,22 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
{
- struct inet_frag_queue *fq;
+ struct inet_frag_queue *fq = NULL, *prev;
if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
return NULL;
rcu_read_lock();
- fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
- if (fq) {
+ prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
+ if (!prev)
+ fq = inet_frag_create(nf, key, &prev);
+ if (prev && !IS_ERR(prev)) {
+ fq = prev;
if (!refcount_inc_not_zero(&fq->refcnt))
fq = NULL;
- rcu_read_unlock();
- return fq;
}
rcu_read_unlock();
-
- return inet_frag_create(nf, key);
+ return fq;
}
EXPORT_SYMBOL(inet_frag_find);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 9b0158fa431f..d6ee343fdb86 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -722,10 +722,14 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
if (ip_is_fragment(&iph)) {
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb) {
- if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
- return skb;
- if (pskb_trim_rcsum(skb, netoff + len))
- return skb;
+ if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+ if (pskb_trim_rcsum(skb, netoff + len)) {
+ kfree_skb(skb);
+ return NULL;
+ }
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
if (ip_defrag(net, skb, user))
return NULL;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 26c36cccabdc..fffcc130900e 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1246,7 +1246,7 @@ int ip_setsockopt(struct sock *sk, int level,
return -ENOPROTOOPT;
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
optname < BPFILTER_IPT_SET_MAX)
err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
@@ -1559,7 +1559,7 @@ int ip_getsockopt(struct sock *sk, int level,
int err;
err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
if (optname >= BPFILTER_IPT_SO_GET_INFO &&
optname < BPFILTER_IPT_GET_MAX)
err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
@@ -1596,7 +1596,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
err = do_ip_getsockopt(sk, level, optname, optval, optlen,
MSG_CMSG_COMPAT);
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
if (optname >= BPFILTER_IPT_SO_GET_INFO &&
optname < BPFILTER_IPT_GET_MAX)
err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index dde671e97829..c248e0dccbe1 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -80,7 +80,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
iph->version = 4;
iph->ihl = sizeof(struct iphdr) >> 2;
- iph->frag_off = df;
+ iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df;
iph->protocol = proto;
iph->tos = tos;
iph->daddr = dst;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2868ef28ce52..1e37c1388189 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4268,7 +4268,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
* If the sack array is full, forget about the last one.
*/
if (this_sack >= TCP_NUM_SACKS) {
- if (tp->compressed_ack)
+ if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
tcp_send_ack(sk);
this_sack--;
tp->rx_opt.num_sacks--;
@@ -4363,6 +4363,7 @@ static bool tcp_try_coalesce(struct sock *sk,
if (TCP_SKB_CB(from)->has_rxtstamp) {
TCP_SKB_CB(to)->has_rxtstamp = true;
to->tstamp = from->tstamp;
+ skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp;
}
return true;
@@ -5188,7 +5189,17 @@ send_now:
if (!tcp_is_sack(tp) ||
tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
goto send_now;
- tp->compressed_ack++;
+
+ if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
+ tp->compressed_ack_rcv_nxt = tp->rcv_nxt;
+ if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
+ NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
+ tp->compressed_ack - TCP_FASTRETRANS_THRESH);
+ tp->compressed_ack = 0;
+ }
+
+ if (++tp->compressed_ack <= TCP_FASTRETRANS_THRESH)
+ goto send_now;
if (hrtimer_is_queued(&tp->compressed_ack_timer))
return;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9c34b97d365d..3f510cad0b3e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -180,10 +180,10 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
{
struct tcp_sock *tp = tcp_sk(sk);
- if (unlikely(tp->compressed_ack)) {
+ if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) {
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
- tp->compressed_ack);
- tp->compressed_ack = 0;
+ tp->compressed_ack - TCP_FASTRETRANS_THRESH);
+ tp->compressed_ack = TCP_FASTRETRANS_THRESH;
if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
__sock_put(sk);
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 676020663ce8..5f8b6d3cd855 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -740,7 +740,7 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
- if (tp->compressed_ack)
+ if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
tcp_send_ack(sk);
} else {
if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 63a808d5af15..045597b9a7c0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -179,7 +179,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp);
static void addrconf_dad_work(struct work_struct *w);
static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
bool send_na);
-static void addrconf_dad_run(struct inet6_dev *idev);
+static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
static void addrconf_rs_timer(struct timer_list *t);
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
@@ -3439,6 +3439,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_change_info *change_info;
struct netdev_notifier_changeupper_info *info;
struct inet6_dev *idev = __in6_dev_get(dev);
struct net *net = dev_net(dev);
@@ -3513,7 +3514,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
break;
}
- if (idev) {
+ if (!IS_ERR_OR_NULL(idev)) {
if (idev->if_flags & IF_READY) {
/* device is already configured -
* but resend MLD reports, we might
@@ -3521,6 +3522,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
* multicast snooping switches
*/
ipv6_mc_up(idev);
+ change_info = ptr;
+ if (change_info->flags_changed & IFF_NOARP)
+ addrconf_dad_run(idev, true);
rt6_sync_up(dev, RTNH_F_LINKDOWN);
break;
}
@@ -3555,7 +3559,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (!IS_ERR_OR_NULL(idev)) {
if (run_pending)
- addrconf_dad_run(idev);
+ addrconf_dad_run(idev, false);
/* Device has an address by now */
rt6_sync_up(dev, RTNH_F_DEAD);
@@ -4173,16 +4177,19 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
addrconf_verify_rtnl();
}
-static void addrconf_dad_run(struct inet6_dev *idev)
+static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
{
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
list_for_each_entry(ifp, &idev->addr_list, if_list) {
spin_lock(&ifp->lock);
- if (ifp->flags & IFA_F_TENTATIVE &&
- ifp->state == INET6_IFADDR_STATE_DAD)
+ if ((ifp->flags & IFA_F_TENTATIVE &&
+ ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
+ if (restart)
+ ifp->state = INET6_IFADDR_STATE_PREDAD;
addrconf_dad_kick(ifp);
+ }
spin_unlock(&ifp->lock);
}
read_unlock_bh(&idev->lock);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3f4d61017a69..f0cd291034f0 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -1001,6 +1001,9 @@ static int __init inet6_init(void)
err = ip6_flowlabel_init();
if (err)
goto ip6_flowlabel_fail;
+ err = ipv6_anycast_init();
+ if (err)
+ goto ipv6_anycast_fail;
err = addrconf_init();
if (err)
goto addrconf_fail;
@@ -1091,6 +1094,8 @@ ipv6_frag_fail:
ipv6_exthdrs_fail:
addrconf_cleanup();
addrconf_fail:
+ ipv6_anycast_cleanup();
+ipv6_anycast_fail:
ip6_flowlabel_cleanup();
ip6_flowlabel_fail:
ndisc_late_cleanup();
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 4e0ff7031edd..94999058e110 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -44,8 +44,22 @@
#include <net/checksum.h>
+#define IN6_ADDR_HSIZE_SHIFT 8
+#define IN6_ADDR_HSIZE BIT(IN6_ADDR_HSIZE_SHIFT)
+/* anycast address hash table
+ */
+static struct hlist_head inet6_acaddr_lst[IN6_ADDR_HSIZE];
+static DEFINE_SPINLOCK(acaddr_hash_lock);
+
static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
+static u32 inet6_acaddr_hash(struct net *net, const struct in6_addr *addr)
+{
+ u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
+
+ return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
+}
+
/*
* socket join an anycast group
*/
@@ -204,16 +218,39 @@ void ipv6_sock_ac_close(struct sock *sk)
rtnl_unlock();
}
+static void ipv6_add_acaddr_hash(struct net *net, struct ifacaddr6 *aca)
+{
+ unsigned int hash = inet6_acaddr_hash(net, &aca->aca_addr);
+
+ spin_lock(&acaddr_hash_lock);
+ hlist_add_head_rcu(&aca->aca_addr_lst, &inet6_acaddr_lst[hash]);
+ spin_unlock(&acaddr_hash_lock);
+}
+
+static void ipv6_del_acaddr_hash(struct ifacaddr6 *aca)
+{
+ spin_lock(&acaddr_hash_lock);
+ hlist_del_init_rcu(&aca->aca_addr_lst);
+ spin_unlock(&acaddr_hash_lock);
+}
+
static void aca_get(struct ifacaddr6 *aca)
{
refcount_inc(&aca->aca_refcnt);
}
+static void aca_free_rcu(struct rcu_head *h)
+{
+ struct ifacaddr6 *aca = container_of(h, struct ifacaddr6, rcu);
+
+ fib6_info_release(aca->aca_rt);
+ kfree(aca);
+}
+
static void aca_put(struct ifacaddr6 *ac)
{
if (refcount_dec_and_test(&ac->aca_refcnt)) {
- fib6_info_release(ac->aca_rt);
- kfree(ac);
+ call_rcu(&ac->rcu, aca_free_rcu);
}
}
@@ -229,6 +266,7 @@ static struct ifacaddr6 *aca_alloc(struct fib6_info *f6i,
aca->aca_addr = *addr;
fib6_info_hold(f6i);
aca->aca_rt = f6i;
+ INIT_HLIST_NODE(&aca->aca_addr_lst);
aca->aca_users = 1;
/* aca_tstamp should be updated upon changes */
aca->aca_cstamp = aca->aca_tstamp = jiffies;
@@ -285,6 +323,8 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
aca_get(aca);
write_unlock_bh(&idev->lock);
+ ipv6_add_acaddr_hash(net, aca);
+
ip6_ins_rt(net, f6i);
addrconf_join_solict(idev->dev, &aca->aca_addr);
@@ -325,6 +365,7 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
else
idev->ac_list = aca->aca_next;
write_unlock_bh(&idev->lock);
+ ipv6_del_acaddr_hash(aca);
addrconf_leave_solict(idev, &aca->aca_addr);
ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
@@ -352,6 +393,8 @@ void ipv6_ac_destroy_dev(struct inet6_dev *idev)
idev->ac_list = aca->aca_next;
write_unlock_bh(&idev->lock);
+ ipv6_del_acaddr_hash(aca);
+
addrconf_leave_solict(idev, &aca->aca_addr);
ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
@@ -390,17 +433,25 @@ static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *ad
bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr)
{
+ unsigned int hash = inet6_acaddr_hash(net, addr);
+ struct net_device *nh_dev;
+ struct ifacaddr6 *aca;
bool found = false;
rcu_read_lock();
if (dev)
found = ipv6_chk_acast_dev(dev, addr);
else
- for_each_netdev_rcu(net, dev)
- if (ipv6_chk_acast_dev(dev, addr)) {
+ hlist_for_each_entry_rcu(aca, &inet6_acaddr_lst[hash],
+ aca_addr_lst) {
+ nh_dev = fib6_info_nh_dev(aca->aca_rt);
+ if (!nh_dev || !net_eq(dev_net(nh_dev), net))
+ continue;
+ if (ipv6_addr_equal(&aca->aca_addr, addr)) {
found = true;
break;
}
+ }
rcu_read_unlock();
return found;
}
@@ -540,3 +591,24 @@ void ac6_proc_exit(struct net *net)
remove_proc_entry("anycast6", net->proc_net);
}
#endif
+
+/* Init / cleanup code
+ */
+int __init ipv6_anycast_init(void)
+{
+ int i;
+
+ for (i = 0; i < IN6_ADDR_HSIZE; i++)
+ INIT_HLIST_HEAD(&inet6_acaddr_lst[i]);
+ return 0;
+}
+
+void ipv6_anycast_cleanup(void)
+{
+ int i;
+
+ spin_lock(&acaddr_hash_lock);
+ for (i = 0; i < IN6_ADDR_HSIZE; i++)
+ WARN_ON(!hlist_empty(&inet6_acaddr_lst[i]));
+ spin_unlock(&acaddr_hash_lock);
+}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1b8bc008b53b..ae3786132c23 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -591,7 +591,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
/* fib entries are never clones */
if (arg.filter.flags & RTM_F_CLONED)
- return skb->len;
+ goto out;
w = (void *)cb->args[2];
if (!w) {
@@ -621,7 +621,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
tb = fib6_get_table(net, arg.filter.table_id);
if (!tb) {
if (arg.filter.dump_all_families)
- return skb->len;
+ goto out;
NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
return -ENOENT;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index b8ac369f98ad..d219979c3e52 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -587,11 +587,16 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
*/
ret = -EINPROGRESS;
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
- fq->q.meat == fq->q.len &&
- nf_ct_frag6_reasm(fq, skb, dev))
- ret = 0;
- else
+ fq->q.meat == fq->q.len) {
+ unsigned long orefdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0UL;
+ if (nf_ct_frag6_reasm(fq, skb, dev))
+ ret = 0;
+ skb->_skb_refdst = orefdst;
+ } else {
skb_dst_drop(skb);
+ }
out_unlock:
spin_unlock_bh(&fq->q.lock);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 2a7423c39456..059f0531f7c1 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2232,8 +2232,7 @@ static void ip6_link_failure(struct sk_buff *skb)
if (rt) {
rcu_read_lock();
if (rt->rt6i_flags & RTF_CACHE) {
- if (dst_hold_safe(&rt->dst))
- rt6_remove_exception_rt(rt);
+ rt6_remove_exception_rt(rt);
} else {
struct fib6_info *from;
struct fib6_node *fn;
@@ -2360,10 +2359,13 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
{
+ int oif = sk->sk_bound_dev_if;
struct dst_entry *dst;
- ip6_update_pmtu(skb, sock_net(sk), mtu,
- sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
+ if (!oif && skb->dev)
+ oif = l3mdev_master_ifindex(skb->dev);
+
+ ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
dst = __sk_dst_get(sk);
if (!dst || !dst->obsolete ||
@@ -3214,8 +3216,8 @@ static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
if (cfg->fc_flags & RTF_GATEWAY &&
!ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
goto out;
- if (dst_hold_safe(&rt->dst))
- rc = rt6_remove_exception_rt(rt);
+
+ rc = rt6_remove_exception_rt(rt);
out:
return rc;
}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 82cdf9020b53..26f1d435696a 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1490,12 +1490,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
goto err_sock;
}
- sk = sock->sk;
-
- sock_hold(sk);
- tunnel->sock = sk;
tunnel->l2tp_net = net;
-
pn = l2tp_pernet(net);
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
@@ -1510,6 +1505,10 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+ sk = sock->sk;
+ sock_hold(sk);
+ tunnel->sock = sk;
+
if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
struct udp_tunnel_sock_cfg udp_cfg = {
.sk_user_data = tunnel,
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index bc4bd247bb7d..1577f2f76060 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -55,11 +55,15 @@ MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_DESCRIPTION("core IP set support");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
-/* When the nfnl mutex is held: */
+/* When the nfnl mutex or ip_set_ref_lock is held: */
#define ip_set_dereference(p) \
- rcu_dereference_protected(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
+ rcu_dereference_protected(p, \
+ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
+ lockdep_is_held(&ip_set_ref_lock))
#define ip_set(inst, id) \
ip_set_dereference((inst)->ip_set_list)[id]
+#define ip_set_ref_netlink(inst,id) \
+ rcu_dereference_raw((inst)->ip_set_list)[id]
/* The set types are implemented in modules and registered set types
* can be found in ip_set_type_list. Adding/deleting types is
@@ -693,21 +697,20 @@ ip_set_put_byindex(struct net *net, ip_set_id_t index)
EXPORT_SYMBOL_GPL(ip_set_put_byindex);
/* Get the name of a set behind a set index.
- * We assume the set is referenced, so it does exist and
- * can't be destroyed. The set cannot be renamed due to
- * the referencing either.
- *
+ * Set itself is protected by RCU, but its name isn't: to protect against
+ * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
+ * name.
*/
-const char *
-ip_set_name_byindex(struct net *net, ip_set_id_t index)
+void
+ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name)
{
- const struct ip_set *set = ip_set_rcu_get(net, index);
+ struct ip_set *set = ip_set_rcu_get(net, index);
BUG_ON(!set);
- BUG_ON(set->ref == 0);
- /* Referenced, so it's safe */
- return set->name;
+ read_lock_bh(&ip_set_ref_lock);
+ strncpy(name, set->name, IPSET_MAXNAMELEN);
+ read_unlock_bh(&ip_set_ref_lock);
}
EXPORT_SYMBOL_GPL(ip_set_name_byindex);
@@ -961,7 +964,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
/* Wraparound */
goto cleanup;
- list = kcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
+ list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
if (!list)
goto cleanup;
/* nfnl mutex is held, both lists are valid */
@@ -973,7 +976,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
/* Use new list */
index = inst->ip_set_max;
inst->ip_set_max = i;
- kfree(tmp);
+ kvfree(tmp);
ret = 0;
} else if (ret) {
goto cleanup;
@@ -1153,7 +1156,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
if (!set)
return -ENOENT;
- read_lock_bh(&ip_set_ref_lock);
+ write_lock_bh(&ip_set_ref_lock);
if (set->ref != 0) {
ret = -IPSET_ERR_REFERENCED;
goto out;
@@ -1170,7 +1173,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
strncpy(set->name, name2, IPSET_MAXNAMELEN);
out:
- read_unlock_bh(&ip_set_ref_lock);
+ write_unlock_bh(&ip_set_ref_lock);
return ret;
}
@@ -1252,7 +1255,7 @@ ip_set_dump_done(struct netlink_callback *cb)
struct ip_set_net *inst =
(struct ip_set_net *)cb->args[IPSET_CB_NET];
ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
- struct ip_set *set = ip_set(inst, index);
+ struct ip_set *set = ip_set_ref_netlink(inst, index);
if (set->variant->uref)
set->variant->uref(set, cb, false);
@@ -1441,7 +1444,7 @@ next_set:
release_refcount:
/* If there was an error or set is done, release set */
if (ret || !cb->args[IPSET_CB_ARG0]) {
- set = ip_set(inst, index);
+ set = ip_set_ref_netlink(inst, index);
if (set->variant->uref)
set->variant->uref(set, cb, false);
pr_debug("release set %s\n", set->name);
@@ -2059,7 +2062,7 @@ ip_set_net_init(struct net *net)
if (inst->ip_set_max >= IPSET_INVALID_ID)
inst->ip_set_max = IPSET_INVALID_ID - 1;
- list = kcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
+ list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
if (!list)
return -ENOMEM;
inst->is_deleted = false;
@@ -2087,7 +2090,7 @@ ip_set_net_exit(struct net *net)
}
}
nfnl_unlock(NFNL_SUBSYS_IPSET);
- kfree(rcu_dereference_protected(inst->ip_set_list, 1));
+ kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
}
static struct pernet_operations ip_set_net_ops = {
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index d391485a6acd..613e18e720a4 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -213,13 +213,13 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+ if (e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+ if (e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
@@ -493,13 +493,13 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+ if (e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+ if (e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 072a658fde04..4eef55da0878 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -148,9 +148,7 @@ __list_set_del_rcu(struct rcu_head * rcu)
{
struct set_elem *e = container_of(rcu, struct set_elem, rcu);
struct ip_set *set = e->set;
- struct list_set *map = set->data;
- ip_set_put_byindex(map->net, e->id);
ip_set_ext_destroy(set, e);
kfree(e);
}
@@ -158,15 +156,21 @@ __list_set_del_rcu(struct rcu_head * rcu)
static inline void
list_set_del(struct ip_set *set, struct set_elem *e)
{
+ struct list_set *map = set->data;
+
set->elements--;
list_del_rcu(&e->list);
+ ip_set_put_byindex(map->net, e->id);
call_rcu(&e->rcu, __list_set_del_rcu);
}
static inline void
-list_set_replace(struct set_elem *e, struct set_elem *old)
+list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
{
+ struct list_set *map = set->data;
+
list_replace_rcu(&old->list, &e->list);
+ ip_set_put_byindex(map->net, old->id);
call_rcu(&old->rcu, __list_set_del_rcu);
}
@@ -298,7 +302,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
INIT_LIST_HEAD(&e->list);
list_set_init_extensions(set, ext, e);
if (n)
- list_set_replace(e, n);
+ list_set_replace(set, e, n);
else if (next)
list_add_tail_rcu(&e->list, &next->list);
else if (prev)
@@ -486,6 +490,7 @@ list_set_list(const struct ip_set *set,
const struct list_set *map = set->data;
struct nlattr *atd, *nested;
u32 i = 0, first = cb->args[IPSET_CB_ARG0];
+ char name[IPSET_MAXNAMELEN];
struct set_elem *e;
int ret = 0;
@@ -504,8 +509,8 @@ list_set_list(const struct ip_set *set,
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
- if (nla_put_string(skb, IPSET_ATTR_NAME,
- ip_set_name_byindex(map->net, e->id)))
+ ip_set_name_byindex(map->net, e->id, name);
+ if (nla_put_string(skb, IPSET_ATTR_NAME, name))
goto nla_put_failure;
if (ip_set_put_extensions(skb, set, e, true))
goto nla_put_failure;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ca1168d67fac..e92e749aff53 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1073,19 +1073,22 @@ static unsigned int early_drop_list(struct net *net,
return drops;
}
-static noinline int early_drop(struct net *net, unsigned int _hash)
+static noinline int early_drop(struct net *net, unsigned int hash)
{
- unsigned int i;
+ unsigned int i, bucket;
for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
struct hlist_nulls_head *ct_hash;
- unsigned int hash, hsize, drops;
+ unsigned int hsize, drops;
rcu_read_lock();
nf_conntrack_get_ht(&ct_hash, &hsize);
- hash = reciprocal_scale(_hash++, hsize);
+ if (!i)
+ bucket = reciprocal_scale(hash, hsize);
+ else
+ bucket = (bucket + 1) % hsize;
- drops = early_drop_list(net, &ct_hash[hash]);
+ drops = early_drop_list(net, &ct_hash[bucket]);
rcu_read_unlock();
if (drops) {
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 171e9e122e5f..023c1445bc39 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -384,11 +384,6 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
},
};
-static inline struct nf_dccp_net *dccp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.dccp;
-}
-
static noinline bool
dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
const struct dccp_hdr *dh)
@@ -401,7 +396,7 @@ dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
switch (state) {
default:
- dn = dccp_pernet(net);
+ dn = nf_dccp_pernet(net);
if (dn->dccp_loose == 0) {
msg = "not picking up existing connection ";
goto out_invalid;
@@ -568,7 +563,7 @@ static int dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
- timeouts = dccp_pernet(nf_ct_net(ct))->dccp_timeout;
+ timeouts = nf_dccp_pernet(nf_ct_net(ct))->dccp_timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
return NF_ACCEPT;
@@ -681,7 +676,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
- struct nf_dccp_net *dn = dccp_pernet(net);
+ struct nf_dccp_net *dn = nf_dccp_pernet(net);
unsigned int *timeouts = data;
int i;
@@ -814,7 +809,7 @@ static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn,
static int dccp_init_net(struct net *net)
{
- struct nf_dccp_net *dn = dccp_pernet(net);
+ struct nf_dccp_net *dn = nf_dccp_pernet(net);
struct nf_proto_net *pn = &dn->pn;
if (!pn->users) {
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index e10e867e0b55..5da19d5fbc76 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -27,11 +27,6 @@ static bool nf_generic_should_process(u8 proto)
}
}
-static inline struct nf_generic_net *generic_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.generic;
-}
-
static bool generic_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple)
@@ -58,7 +53,7 @@ static int generic_packet(struct nf_conn *ct,
}
if (!timeout)
- timeout = &generic_pernet(nf_ct_net(ct))->timeout;
+ timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
@@ -72,7 +67,7 @@ static int generic_packet(struct nf_conn *ct,
static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
- struct nf_generic_net *gn = generic_pernet(net);
+ struct nf_generic_net *gn = nf_generic_pernet(net);
unsigned int *timeout = data;
if (!timeout)
@@ -138,7 +133,7 @@ static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int generic_init_net(struct net *net)
{
- struct nf_generic_net *gn = generic_pernet(net);
+ struct nf_generic_net *gn = nf_generic_pernet(net);
struct nf_proto_net *pn = &gn->pn;
gn->timeout = nf_ct_generic_timeout;
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index 3598520bd19b..de64d8a5fdfd 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -25,11 +25,6 @@
static const unsigned int nf_ct_icmp_timeout = 30*HZ;
-static inline struct nf_icmp_net *icmp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.icmp;
-}
-
static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple)
{
@@ -103,7 +98,7 @@ static int icmp_packet(struct nf_conn *ct,
}
if (!timeout)
- timeout = &icmp_pernet(nf_ct_net(ct))->timeout;
+ timeout = &nf_icmp_pernet(nf_ct_net(ct))->timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
@@ -275,7 +270,7 @@ static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeout = data;
- struct nf_icmp_net *in = icmp_pernet(net);
+ struct nf_icmp_net *in = nf_icmp_pernet(net);
if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
if (!timeout)
@@ -337,7 +332,7 @@ static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int icmp_init_net(struct net *net)
{
- struct nf_icmp_net *in = icmp_pernet(net);
+ struct nf_icmp_net *in = nf_icmp_pernet(net);
struct nf_proto_net *pn = &in->pn;
in->timeout = nf_ct_icmp_timeout;
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index 378618feed5d..a15eefb8e317 100644
--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -30,11 +30,6 @@
static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
-static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.icmpv6;
-}
-
static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct net *net,
@@ -87,7 +82,7 @@ static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
static unsigned int *icmpv6_get_timeouts(struct net *net)
{
- return &icmpv6_pernet(net)->timeout;
+ return &nf_icmpv6_pernet(net)->timeout;
}
/* Returns verdict for packet, or -1 for invalid. */
@@ -286,7 +281,7 @@ static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeout = data;
- struct nf_icmp_net *in = icmpv6_pernet(net);
+ struct nf_icmp_net *in = nf_icmpv6_pernet(net);
if (!timeout)
timeout = icmpv6_get_timeouts(net);
@@ -348,7 +343,7 @@ static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int icmpv6_init_net(struct net *net)
{
- struct nf_icmp_net *in = icmpv6_pernet(net);
+ struct nf_icmp_net *in = nf_icmpv6_pernet(net);
struct nf_proto_net *pn = &in->pn;
in->timeout = nf_ct_icmpv6_timeout;
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 3d719d3eb9a3..d53e3e78f605 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -146,11 +146,6 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
}
};
-static inline struct nf_sctp_net *sctp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.sctp;
-}
-
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@@ -480,7 +475,7 @@ static int sctp_packet(struct nf_conn *ct,
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
- timeouts = sctp_pernet(nf_ct_net(ct))->timeouts;
+ timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
@@ -599,7 +594,7 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeouts = data;
- struct nf_sctp_net *sn = sctp_pernet(net);
+ struct nf_sctp_net *sn = nf_sctp_pernet(net);
int i;
/* set default SCTP timeouts. */
@@ -736,7 +731,7 @@ static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int sctp_init_net(struct net *net)
{
- struct nf_sctp_net *sn = sctp_pernet(net);
+ struct nf_sctp_net *sn = nf_sctp_pernet(net);
struct nf_proto_net *pn = &sn->pn;
if (!pn->users) {
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 1bcf9984d45e..4dcbd51a8e97 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -272,11 +272,6 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
}
};
-static inline struct nf_tcp_net *tcp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.tcp;
-}
-
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@@ -475,7 +470,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
const struct tcphdr *tcph)
{
struct net *net = nf_ct_net(ct);
- struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
@@ -767,7 +762,7 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
{
enum tcp_conntrack new_state;
struct net *net = nf_ct_net(ct);
- const struct nf_tcp_net *tn = tcp_pernet(net);
+ const struct nf_tcp_net *tn = nf_tcp_pernet(net);
const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
@@ -841,7 +836,7 @@ static int tcp_packet(struct nf_conn *ct,
const struct nf_hook_state *state)
{
struct net *net = nf_ct_net(ct);
- struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct nf_conntrack_tuple *tuple;
enum tcp_conntrack new_state, old_state;
unsigned int index, *timeouts;
@@ -1283,7 +1278,7 @@ static unsigned int tcp_nlattr_tuple_size(void)
static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
- struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
unsigned int *timeouts = data;
int i;
@@ -1508,7 +1503,7 @@ static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int tcp_init_net(struct net *net)
{
- struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct nf_proto_net *pn = &tn->pn;
if (!pn->users) {
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index a7aa70370913..c879d8d78cfd 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -32,14 +32,9 @@ static const unsigned int udp_timeouts[UDP_CT_MAX] = {
[UDP_CT_REPLIED] = 180*HZ,
};
-static inline struct nf_udp_net *udp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.udp;
-}
-
static unsigned int *udp_get_timeouts(struct net *net)
{
- return udp_pernet(net)->timeouts;
+ return nf_udp_pernet(net)->timeouts;
}
static void udp_error_log(const struct sk_buff *skb,
@@ -212,7 +207,7 @@ static int udp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeouts = data;
- struct nf_udp_net *un = udp_pernet(net);
+ struct nf_udp_net *un = nf_udp_pernet(net);
if (!timeouts)
timeouts = un->timeouts;
@@ -292,7 +287,7 @@ static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int udp_init_net(struct net *net)
{
- struct nf_udp_net *un = udp_pernet(net);
+ struct nf_udp_net *un = nf_udp_pernet(net);
struct nf_proto_net *pn = &un->pn;
if (!pn->users) {
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index e7a50af1b3d6..a518eb162344 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -382,7 +382,8 @@ err:
static int
cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
u32 seq, u32 type, int event, u16 l3num,
- const struct nf_conntrack_l4proto *l4proto)
+ const struct nf_conntrack_l4proto *l4proto,
+ const unsigned int *timeouts)
{
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
@@ -408,7 +409,7 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
if (!nest_parms)
goto nla_put_failure;
- ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
+ ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
if (ret < 0)
goto nla_put_failure;
@@ -430,6 +431,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
struct netlink_ext_ack *extack)
{
const struct nf_conntrack_l4proto *l4proto;
+ unsigned int *timeouts = NULL;
struct sk_buff *skb2;
int ret, err;
__u16 l3num;
@@ -442,12 +444,44 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
l4proto = nf_ct_l4proto_find_get(l4num);
- /* This protocol is not supported, skip. */
- if (l4proto->l4proto != l4num) {
- err = -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ if (l4proto->l4proto != l4num)
goto err;
+
+ switch (l4proto->l4proto) {
+ case IPPROTO_ICMP:
+ timeouts = &nf_icmp_pernet(net)->timeout;
+ break;
+ case IPPROTO_TCP:
+ timeouts = nf_tcp_pernet(net)->timeouts;
+ break;
+ case IPPROTO_UDP:
+ timeouts = nf_udp_pernet(net)->timeouts;
+ break;
+ case IPPROTO_DCCP:
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ timeouts = nf_dccp_pernet(net)->dccp_timeout;
+#endif
+ break;
+ case IPPROTO_ICMPV6:
+ timeouts = &nf_icmpv6_pernet(net)->timeout;
+ break;
+ case IPPROTO_SCTP:
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ timeouts = nf_sctp_pernet(net)->timeouts;
+#endif
+ break;
+ case 255:
+ timeouts = &nf_generic_pernet(net)->timeout;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
}
+ if (!timeouts)
+ goto err;
+
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL) {
err = -ENOMEM;
@@ -458,8 +492,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
nlh->nlmsg_seq,
NFNL_MSG_TYPE(nlh->nlmsg_type),
IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
- l3num,
- l4proto);
+ l3num, l4proto, timeouts);
if (ret <= 0) {
kfree_skb(skb2);
err = -ENOMEM;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 768292eac2a4..9d0ede474224 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -54,9 +54,11 @@ static bool nft_xt_put(struct nft_xt *xt)
return false;
}
-static int nft_compat_chain_validate_dependency(const char *tablename,
- const struct nft_chain *chain)
+static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
+ const char *tablename)
{
+ enum nft_chain_types type = NFT_CHAIN_T_DEFAULT;
+ const struct nft_chain *chain = ctx->chain;
const struct nft_base_chain *basechain;
if (!tablename ||
@@ -64,9 +66,12 @@ static int nft_compat_chain_validate_dependency(const char *tablename,
return 0;
basechain = nft_base_chain(chain);
- if (strcmp(tablename, "nat") == 0 &&
- basechain->type->type != NFT_CHAIN_T_NAT)
- return -EINVAL;
+ if (strcmp(tablename, "nat") == 0) {
+ if (ctx->family != NFPROTO_BRIDGE)
+ type = NFT_CHAIN_T_NAT;
+ if (basechain->type->type != type)
+ return -EINVAL;
+ }
return 0;
}
@@ -342,8 +347,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
if (target->hooks && !(hook_mask & target->hooks))
return -EINVAL;
- ret = nft_compat_chain_validate_dependency(target->table,
- ctx->chain);
+ ret = nft_compat_chain_validate_dependency(ctx, target->table);
if (ret < 0)
return ret;
}
@@ -590,8 +594,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
if (match->hooks && !(hook_mask & match->hooks))
return -EINVAL;
- ret = nft_compat_chain_validate_dependency(match->table,
- ctx->chain);
+ ret = nft_compat_chain_validate_dependency(ctx, match->table);
if (ret < 0)
return ret;
}
diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
index 649d1700ec5b..3cc1b3dc3c3c 100644
--- a/net/netfilter/nft_numgen.c
+++ b/net/netfilter/nft_numgen.c
@@ -24,7 +24,6 @@ struct nft_ng_inc {
u32 modulus;
atomic_t counter;
u32 offset;
- struct nft_set *map;
};
static u32 nft_ng_inc_gen(struct nft_ng_inc *priv)
@@ -48,34 +47,11 @@ static void nft_ng_inc_eval(const struct nft_expr *expr,
regs->data[priv->dreg] = nft_ng_inc_gen(priv);
}
-static void nft_ng_inc_map_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_ng_inc *priv = nft_expr_priv(expr);
- const struct nft_set *map = priv->map;
- const struct nft_set_ext *ext;
- u32 result;
- bool found;
-
- result = nft_ng_inc_gen(priv);
- found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
-
- if (!found)
- return;
-
- nft_data_copy(&regs->data[priv->dreg],
- nft_set_ext_data(ext), map->dlen);
-}
-
static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = {
[NFTA_NG_DREG] = { .type = NLA_U32 },
[NFTA_NG_MODULUS] = { .type = NLA_U32 },
[NFTA_NG_TYPE] = { .type = NLA_U32 },
[NFTA_NG_OFFSET] = { .type = NLA_U32 },
- [NFTA_NG_SET_NAME] = { .type = NLA_STRING,
- .len = NFT_SET_MAXNAMELEN - 1 },
- [NFTA_NG_SET_ID] = { .type = NLA_U32 },
};
static int nft_ng_inc_init(const struct nft_ctx *ctx,
@@ -101,22 +77,6 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
NFT_DATA_VALUE, sizeof(u32));
}
-static int nft_ng_inc_map_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
-{
- struct nft_ng_inc *priv = nft_expr_priv(expr);
- u8 genmask = nft_genmask_next(ctx->net);
-
- nft_ng_inc_init(ctx, expr, tb);
-
- priv->map = nft_set_lookup_global(ctx->net, ctx->table,
- tb[NFTA_NG_SET_NAME],
- tb[NFTA_NG_SET_ID], genmask);
-
- return PTR_ERR_OR_ZERO(priv->map);
-}
-
static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
u32 modulus, enum nft_ng_types type, u32 offset)
{
@@ -143,27 +103,10 @@ static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
priv->offset);
}
-static int nft_ng_inc_map_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_ng_inc *priv = nft_expr_priv(expr);
-
- if (nft_ng_dump(skb, priv->dreg, priv->modulus,
- NFT_NG_INCREMENTAL, priv->offset) ||
- nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
- goto nla_put_failure;
-
- return 0;
-
-nla_put_failure:
- return -1;
-}
-
struct nft_ng_random {
enum nft_registers dreg:8;
u32 modulus;
u32 offset;
- struct nft_set *map;
};
static u32 nft_ng_random_gen(struct nft_ng_random *priv)
@@ -183,25 +126,6 @@ static void nft_ng_random_eval(const struct nft_expr *expr,
regs->data[priv->dreg] = nft_ng_random_gen(priv);
}
-static void nft_ng_random_map_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_ng_random *priv = nft_expr_priv(expr);
- const struct nft_set *map = priv->map;
- const struct nft_set_ext *ext;
- u32 result;
- bool found;
-
- result = nft_ng_random_gen(priv);
- found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
- if (!found)
- return;
-
- nft_data_copy(&regs->data[priv->dreg],
- nft_set_ext_data(ext), map->dlen);
-}
-
static int nft_ng_random_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
@@ -226,21 +150,6 @@ static int nft_ng_random_init(const struct nft_ctx *ctx,
NFT_DATA_VALUE, sizeof(u32));
}
-static int nft_ng_random_map_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
-{
- struct nft_ng_random *priv = nft_expr_priv(expr);
- u8 genmask = nft_genmask_next(ctx->net);
-
- nft_ng_random_init(ctx, expr, tb);
- priv->map = nft_set_lookup_global(ctx->net, ctx->table,
- tb[NFTA_NG_SET_NAME],
- tb[NFTA_NG_SET_ID], genmask);
-
- return PTR_ERR_OR_ZERO(priv->map);
-}
-
static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_ng_random *priv = nft_expr_priv(expr);
@@ -249,22 +158,6 @@ static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
priv->offset);
}
-static int nft_ng_random_map_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_ng_random *priv = nft_expr_priv(expr);
-
- if (nft_ng_dump(skb, priv->dreg, priv->modulus,
- NFT_NG_RANDOM, priv->offset) ||
- nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
- goto nla_put_failure;
-
- return 0;
-
-nla_put_failure:
- return -1;
-}
-
static struct nft_expr_type nft_ng_type;
static const struct nft_expr_ops nft_ng_inc_ops = {
.type = &nft_ng_type,
@@ -274,14 +167,6 @@ static const struct nft_expr_ops nft_ng_inc_ops = {
.dump = nft_ng_inc_dump,
};
-static const struct nft_expr_ops nft_ng_inc_map_ops = {
- .type = &nft_ng_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_inc)),
- .eval = nft_ng_inc_map_eval,
- .init = nft_ng_inc_map_init,
- .dump = nft_ng_inc_map_dump,
-};
-
static const struct nft_expr_ops nft_ng_random_ops = {
.type = &nft_ng_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
@@ -290,14 +175,6 @@ static const struct nft_expr_ops nft_ng_random_ops = {
.dump = nft_ng_random_dump,
};
-static const struct nft_expr_ops nft_ng_random_map_ops = {
- .type = &nft_ng_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
- .eval = nft_ng_random_map_eval,
- .init = nft_ng_random_map_init,
- .dump = nft_ng_random_map_dump,
-};
-
static const struct nft_expr_ops *
nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
{
@@ -312,12 +189,8 @@ nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
switch (type) {
case NFT_NG_INCREMENTAL:
- if (tb[NFTA_NG_SET_NAME])
- return &nft_ng_inc_map_ops;
return &nft_ng_inc_ops;
case NFT_NG_RANDOM:
- if (tb[NFTA_NG_SET_NAME])
- return &nft_ng_random_map_ops;
return &nft_ng_random_ops;
}
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index ca5e5d8c5ef8..b13618c764ec 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -50,7 +50,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
int err;
u8 ttl;
- if (nla_get_u8(tb[NFTA_OSF_TTL])) {
+ if (tb[NFTA_OSF_TTL]) {
ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
if (ttl > 2)
return -EINVAL;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index c6acfc2d9c84..eb4cbd244c3d 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -114,6 +114,22 @@ static void idletimer_tg_expired(struct timer_list *t)
schedule_work(&timer->work);
}
+static int idletimer_check_sysfs_name(const char *name, unsigned int size)
+{
+ int ret;
+
+ ret = xt_check_proc_name(name, size);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(name, "power") ||
+ !strcmp(name, "subsystem") ||
+ !strcmp(name, "uevent"))
+ return -EINVAL;
+
+ return 0;
+}
+
static int idletimer_tg_create(struct idletimer_tg_info *info)
{
int ret;
@@ -124,6 +140,10 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
goto out;
}
+ ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
+ if (ret < 0)
+ goto out_free_timer;
+
sysfs_attr_init(&info->timer->attr.attr);
info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
if (!info->timer->attr.attr.name) {
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 6bec37ab4472..a4660c48ff01 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1203,7 +1203,8 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
&info->labels.mask);
if (err)
return err;
- } else if (labels_nonzero(&info->labels.mask)) {
+ } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+ labels_nonzero(&info->labels.mask)) {
err = ovs_ct_set_labels(ct, key, &info->labels.value,
&info->labels.mask);
if (err)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ec3095f13aae..a74650e98f42 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2394,7 +2394,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
void *ph;
__u32 ts;
- ph = skb_shinfo(skb)->destructor_arg;
+ ph = skb_zcopy_get_nouarg(skb);
packet_dec_pending(&po->tx_ring);
ts = __packet_set_timestamp(po, ph, skb);
@@ -2461,7 +2461,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb->mark = po->sk.sk_mark;
skb->tstamp = sockc->transmit_time;
sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
- skb_shinfo(skb)->destructor_arg = ph.raw;
+ skb_zcopy_set_nouarg(skb, ph.raw);
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 64362d078da8..a2522f9d71e2 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -375,17 +375,36 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
* getting ACKs from the server. Returns a number representing the life state
* which can be compared to that returned by a previous call.
*
- * If this is a client call, ping ACKs will be sent to the server to find out
- * whether it's still responsive and whether the call is still alive on the
- * server.
+ * If the life state stalls, rxrpc_kernel_probe_life() should be called and
+ * then 2RTT waited.
*/
-u32 rxrpc_kernel_check_life(struct socket *sock, struct rxrpc_call *call)
+u32 rxrpc_kernel_check_life(const struct socket *sock,
+ const struct rxrpc_call *call)
{
return call->acks_latest;
}
EXPORT_SYMBOL(rxrpc_kernel_check_life);
/**
+ * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive
+ * @sock: The socket the call is on
+ * @call: The call to check
+ *
+ * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to
+ * find out whether a call is still alive by pinging it. This should cause the
+ * life state to be bumped in about 2*RTT.
+ *
+ * The must be called in TASK_RUNNING state on pain of might_sleep() objecting.
+ */
+void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
+{
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+ rxrpc_propose_ack_ping_for_check_life);
+ rxrpc_send_ack_packet(call, true, NULL);
+}
+EXPORT_SYMBOL(rxrpc_kernel_probe_life);
+
+/**
* rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
* @sock: The socket the call is on
* @call: The call to query
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 382196e57a26..bc628acf4f4f 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -611,6 +611,7 @@ struct rxrpc_call {
* not hard-ACK'd packet follows this.
*/
rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
+ u16 tx_backoff; /* Delay to insert due to Tx failure */
/* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
* is fixed, we keep these numbers in terms of segments (ie. DATA
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 8e7434e92097..468efc3660c0 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -123,6 +123,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
else
ack_at = expiry;
+ ack_at += READ_ONCE(call->tx_backoff);
ack_at += now;
if (time_before(ack_at, call->ack_at)) {
WRITE_ONCE(call->ack_at, ack_at);
@@ -311,6 +312,7 @@ void rxrpc_process_call(struct work_struct *work)
container_of(work, struct rxrpc_call, processor);
rxrpc_serial_t *send_ack;
unsigned long now, next, t;
+ unsigned int iterations = 0;
rxrpc_see_call(call);
@@ -319,6 +321,11 @@ void rxrpc_process_call(struct work_struct *work)
call->debug_id, rxrpc_call_states[call->state], call->events);
recheck_state:
+ /* Limit the number of times we do this before returning to the manager */
+ iterations++;
+ if (iterations > 5)
+ goto requeue;
+
if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
rxrpc_send_abort_packet(call);
goto recheck_state;
@@ -447,13 +454,16 @@ recheck_state:
rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
/* other events may have been raised since we started checking */
- if (call->events && call->state < RXRPC_CALL_COMPLETE) {
- __rxrpc_queue_call(call);
- goto out;
- }
+ if (call->events && call->state < RXRPC_CALL_COMPLETE)
+ goto requeue;
out_put:
rxrpc_put_call(call, rxrpc_call_put);
out:
_leave("");
+ return;
+
+requeue:
+ __rxrpc_queue_call(call);
+ goto out;
}
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 189418888839..736aa9281100 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -35,6 +35,21 @@ struct rxrpc_abort_buffer {
static const char rxrpc_keepalive_string[] = "";
/*
+ * Increase Tx backoff on transmission failure and clear it on success.
+ */
+static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
+{
+ if (ret < 0) {
+ u16 tx_backoff = READ_ONCE(call->tx_backoff);
+
+ if (tx_backoff < HZ)
+ WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
+ } else {
+ WRITE_ONCE(call->tx_backoff, 0);
+ }
+}
+
+/*
* Arrange for a keepalive ping a certain time after we last transmitted. This
* lets the far side know we're still interested in this call and helps keep
* the route through any intervening firewall open.
@@ -210,6 +225,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
else
trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
rxrpc_tx_point_call_ack);
+ rxrpc_tx_backoff(call, ret);
if (call->state < RXRPC_CALL_COMPLETE) {
if (ret < 0) {
@@ -218,7 +234,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
rxrpc_propose_ACK(call, pkt->ack.reason,
ntohs(pkt->ack.maxSkew),
ntohl(pkt->ack.serial),
- true, true,
+ false, true,
rxrpc_propose_ack_retry_tx);
} else {
spin_lock_bh(&call->lock);
@@ -300,7 +316,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
else
trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
rxrpc_tx_point_call_abort);
-
+ rxrpc_tx_backoff(call, ret);
rxrpc_put_connection(conn);
return ret;
@@ -413,6 +429,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
else
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_nofrag);
+ rxrpc_tx_backoff(call, ret);
if (ret == -EMSGSIZE)
goto send_fragmentable;
@@ -445,9 +462,18 @@ done:
rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
rxrpc_timer_set_for_normal);
}
- }
- rxrpc_set_keepalive(call);
+ rxrpc_set_keepalive(call);
+ } else {
+ /* Cancel the call if the initial transmission fails,
+ * particularly if that's due to network routing issues that
+ * aren't going away anytime soon. The layer above can arrange
+ * the retransmission.
+ */
+ if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
+ rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
+ RX_USER_ABORT, ret);
+ }
_leave(" = %d [%u]", ret, call->peer->maxdata);
return ret;
@@ -506,6 +532,7 @@ send_fragmentable:
else
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_frag);
+ rxrpc_tx_backoff(call, ret);
up_write(&conn->params.local->defrag_sem);
goto done;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 1dae5f2b358f..c8cf4d10c435 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -258,7 +258,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
if (is_redirect) {
skb2->tc_redirected = 1;
skb2->tc_from_ingress = skb2->tc_at_ingress;
-
+ if (skb2->tc_from_ingress)
+ skb2->tstamp = 0;
/* let's the caller reinsert the packet, if possible */
if (use_reinsert) {
res->ingress = want_ingress;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index da3dd0f68cc2..2b372a06b432 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -201,7 +201,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
goto out_release;
}
} else {
- return err;
+ ret = err;
+ goto out_free;
}
p = to_pedit(*a);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 052855d47354..37c9b8f0e10f 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -27,10 +27,7 @@ struct tcf_police_params {
u32 tcfp_ewma_rate;
s64 tcfp_burst;
u32 tcfp_mtu;
- s64 tcfp_toks;
- s64 tcfp_ptoks;
s64 tcfp_mtu_ptoks;
- s64 tcfp_t_c;
struct psched_ratecfg rate;
bool rate_present;
struct psched_ratecfg peak;
@@ -41,6 +38,11 @@ struct tcf_police_params {
struct tcf_police {
struct tc_action common;
struct tcf_police_params __rcu *params;
+
+ spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
+ s64 tcfp_toks;
+ s64 tcfp_ptoks;
+ s64 tcfp_t_c;
};
#define to_police(pc) ((struct tcf_police *)pc)
@@ -122,6 +124,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
return ret;
}
ret = ACT_P_CREATED;
+ spin_lock_init(&(to_police(*a)->tcfp_lock));
} else if (!ovr) {
tcf_idr_release(*a, bind);
return -EEXIST;
@@ -186,12 +189,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
- new->tcfp_toks = new->tcfp_burst;
- if (new->peak_present) {
+ if (new->peak_present)
new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
new->tcfp_mtu);
- new->tcfp_ptoks = new->tcfp_mtu_ptoks;
- }
if (tb[TCA_POLICE_AVRATE])
new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
@@ -207,7 +207,12 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
spin_lock_bh(&police->tcf_lock);
- new->tcfp_t_c = ktime_get_ns();
+ spin_lock_bh(&police->tcfp_lock);
+ police->tcfp_t_c = ktime_get_ns();
+ police->tcfp_toks = new->tcfp_burst;
+ if (new->peak_present)
+ police->tcfp_ptoks = new->tcfp_mtu_ptoks;
+ spin_unlock_bh(&police->tcfp_lock);
police->tcf_action = parm->action;
rcu_swap_protected(police->params,
new,
@@ -257,25 +262,28 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
}
now = ktime_get_ns();
- toks = min_t(s64, now - p->tcfp_t_c, p->tcfp_burst);
+ spin_lock_bh(&police->tcfp_lock);
+ toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
if (p->peak_present) {
- ptoks = toks + p->tcfp_ptoks;
+ ptoks = toks + police->tcfp_ptoks;
if (ptoks > p->tcfp_mtu_ptoks)
ptoks = p->tcfp_mtu_ptoks;
ptoks -= (s64)psched_l2t_ns(&p->peak,
qdisc_pkt_len(skb));
}
- toks += p->tcfp_toks;
+ toks += police->tcfp_toks;
if (toks > p->tcfp_burst)
toks = p->tcfp_burst;
toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
if ((toks|ptoks) >= 0) {
- p->tcfp_t_c = now;
- p->tcfp_toks = toks;
- p->tcfp_ptoks = ptoks;
+ police->tcfp_t_c = now;
+ police->tcfp_toks = toks;
+ police->tcfp_ptoks = ptoks;
+ spin_unlock_bh(&police->tcfp_lock);
ret = p->tcfp_result;
goto inc_drops;
}
+ spin_unlock_bh(&police->tcfp_lock);
}
inc_overlimits:
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 9aada2d0ef06..c6c327874abc 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -709,11 +709,23 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
struct netlink_ext_ack *extack)
{
const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
- int option_len, key_depth, msk_depth = 0;
+ int err, option_len, key_depth, msk_depth = 0;
+
+ err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
+ TCA_FLOWER_KEY_ENC_OPTS_MAX,
+ enc_opts_policy, extack);
+ if (err)
+ return err;
nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
+ err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
+ TCA_FLOWER_KEY_ENC_OPTS_MAX,
+ enc_opts_policy, extack);
+ if (err)
+ return err;
+
nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
}
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 4b1af706896c..25a7cf6d380f 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -469,22 +469,29 @@ begin:
goto begin;
}
prefetch(&skb->end);
- f->credit -= qdisc_pkt_len(skb);
+ plen = qdisc_pkt_len(skb);
+ f->credit -= plen;
- if (ktime_to_ns(skb->tstamp) || !q->rate_enable)
+ if (!q->rate_enable)
goto out;
rate = q->flow_max_rate;
- if (skb->sk)
- rate = min(skb->sk->sk_pacing_rate, rate);
-
- if (rate <= q->low_rate_threshold) {
- f->credit = 0;
- plen = qdisc_pkt_len(skb);
- } else {
- plen = max(qdisc_pkt_len(skb), q->quantum);
- if (f->credit > 0)
- goto out;
+
+ /* If EDT time was provided for this skb, we need to
+ * update f->time_next_packet only if this qdisc enforces
+ * a flow max rate.
+ */
+ if (!skb->tstamp) {
+ if (skb->sk)
+ rate = min(skb->sk->sk_pacing_rate, rate);
+
+ if (rate <= q->low_rate_threshold) {
+ f->credit = 0;
+ } else {
+ plen = max(plen, q->quantum);
+ if (f->credit > 0)
+ goto out;
+ }
}
if (rate != ~0UL) {
u64 len = (u64)plen * NSEC_PER_SEC;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 57b3ad9394ad..2c38e3d07924 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -648,15 +648,6 @@ deliver:
*/
skb->dev = qdisc_dev(sch);
-#ifdef CONFIG_NET_CLS_ACT
- /*
- * If it's at ingress let's pretend the delay is
- * from the network (tstamp will be updated).
- */
- if (skb->tc_redirected && skb->tc_from_ingress)
- skb->tstamp = 0;
-#endif
-
if (q->slot.slot_next) {
q->slot.packets_left--;
q->slot.bytes_left -= qdisc_pkt_len(skb);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 67939ad99c01..b0e74a3e77ec 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -118,6 +118,9 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
sctp_transport_route(tp, NULL, sp);
if (asoc->param_flags & SPP_PMTUD_ENABLE)
sctp_assoc_sync_pmtu(asoc);
+ } else if (!sctp_transport_pmtu_check(tp)) {
+ if (asoc->param_flags & SPP_PMTUD_ENABLE)
+ sctp_assoc_sync_pmtu(asoc);
}
if (asoc->pmtu_pending) {
@@ -396,25 +399,6 @@ finish:
return retval;
}
-static void sctp_packet_release_owner(struct sk_buff *skb)
-{
- sk_free(skb->sk);
-}
-
-static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
-{
- skb_orphan(skb);
- skb->sk = sk;
- skb->destructor = sctp_packet_release_owner;
-
- /*
- * The data chunks have already been accounted for in sctp_sendmsg(),
- * therefore only reserve a single byte to keep socket around until
- * the packet has been transmitted.
- */
- refcount_inc(&sk->sk_wmem_alloc);
-}
-
static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
{
if (SCTP_OUTPUT_CB(head)->last == head)
@@ -601,7 +585,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
if (!head)
goto out;
skb_reserve(head, packet->overhead + MAX_HEADER);
- sctp_packet_set_owner_w(head, sk);
+ skb_set_owner_w(head, sk);
/* set sctp header */
sh = skb_push(head, sizeof(struct sctphdr));
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 9cb854b05342..c37e1c2dec9d 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -212,7 +212,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
INIT_LIST_HEAD(&q->retransmit);
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
- sctp_sched_set_sched(asoc, SCTP_SS_FCFS);
+ sctp_sched_set_sched(asoc, SCTP_SS_DEFAULT);
}
/* Free the outqueue structure and any related pending chunks.
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 739f3e50120d..bf618d1b41fd 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3940,32 +3940,16 @@ static int sctp_setsockopt_pr_supported(struct sock *sk,
unsigned int optlen)
{
struct sctp_assoc_value params;
- struct sctp_association *asoc;
- int retval = -EINVAL;
if (optlen != sizeof(params))
- goto out;
-
- if (copy_from_user(&params, optval, optlen)) {
- retval = -EFAULT;
- goto out;
- }
-
- asoc = sctp_id2assoc(sk, params.assoc_id);
- if (asoc) {
- asoc->prsctp_enable = !!params.assoc_value;
- } else if (!params.assoc_id) {
- struct sctp_sock *sp = sctp_sk(sk);
+ return -EINVAL;
- sp->ep->prsctp_enable = !!params.assoc_value;
- } else {
- goto out;
- }
+ if (copy_from_user(&params, optval, optlen))
+ return -EFAULT;
- retval = 0;
+ sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
-out:
- return retval;
+ return 0;
}
static int sctp_setsockopt_default_prinfo(struct sock *sk,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index ffb940d3b57c..3892e7630f3a 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -535,7 +535,6 @@ int sctp_send_add_streams(struct sctp_association *asoc,
goto out;
}
- stream->incnt = incnt;
stream->outcnt = outcnt;
asoc->strreset_outstanding = !!out + !!in;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 80e2119f1c70..5fbaf1901571 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -127,6 +127,8 @@ static int smc_release(struct socket *sock)
smc = smc_sk(sk);
/* cleanup for a dangling non-blocking connect */
+ if (smc->connect_info && sk->sk_state == SMC_INIT)
+ tcp_abort(smc->clcsock->sk, ECONNABORTED);
flush_work(&smc->connect_work);
kfree(smc->connect_info);
smc->connect_info = NULL;
@@ -547,7 +549,8 @@ static int smc_connect_rdma(struct smc_sock *smc,
mutex_lock(&smc_create_lgr_pending);
local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev,
- ibport, &aclc->lcl, NULL, 0);
+ ibport, ntoh24(aclc->qpn), &aclc->lcl,
+ NULL, 0);
if (local_contact < 0) {
if (local_contact == -ENOMEM)
reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
@@ -618,7 +621,7 @@ static int smc_connect_ism(struct smc_sock *smc,
int rc = 0;
mutex_lock(&smc_create_lgr_pending);
- local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0,
+ local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 0,
NULL, ismdev, aclc->gid);
if (local_contact < 0)
return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0);
@@ -1083,7 +1086,7 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
int *local_contact)
{
/* allocate connection / link group */
- *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport,
+ *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, 0,
&pclc->lcl, NULL, 0);
if (*local_contact < 0) {
if (*local_contact == -ENOMEM)
@@ -1107,7 +1110,7 @@ static int smc_listen_ism_init(struct smc_sock *new_smc,
struct smc_clc_msg_smcd *pclc_smcd;
pclc_smcd = smc_get_clc_msg_smcd(pclc);
- *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL,
+ *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, 0, NULL,
ismdev, pclc_smcd->gid);
if (*local_contact < 0) {
if (*local_contact == -ENOMEM)
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index ed5dcf03fe0b..db83332ac1c8 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -81,7 +81,7 @@ static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
"must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
BUILD_BUG_ON_MSG(
- sizeof(struct smc_cdc_msg) != SMC_WR_TX_SIZE,
+ offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE,
"must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
BUILD_BUG_ON_MSG(
sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
@@ -177,23 +177,24 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
int smcd_cdc_msg_send(struct smc_connection *conn)
{
struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+ union smc_host_cursor curs;
struct smcd_cdc_msg cdc;
int rc, diff;
memset(&cdc, 0, sizeof(cdc));
cdc.common.type = SMC_CDC_MSG_TYPE;
- cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap;
- cdc.prod_count = conn->local_tx_ctrl.prod.count;
-
- cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap;
- cdc.cons_count = conn->local_tx_ctrl.cons.count;
- cdc.prod_flags = conn->local_tx_ctrl.prod_flags;
- cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
+ curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs);
+ cdc.prod.wrap = curs.wrap;
+ cdc.prod.count = curs.count;
+ curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs);
+ cdc.cons.wrap = curs.wrap;
+ cdc.cons.count = curs.count;
+ cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags;
+ cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
if (rc)
return rc;
- smc_curs_copy(&conn->rx_curs_confirmed, &conn->local_tx_ctrl.cons,
- conn);
+ smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
/* Calculate transmitted data and increment free send buffer space */
diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
&conn->tx_curs_sent);
@@ -331,13 +332,16 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
static void smcd_cdc_rx_tsklet(unsigned long data)
{
struct smc_connection *conn = (struct smc_connection *)data;
+ struct smcd_cdc_msg *data_cdc;
struct smcd_cdc_msg cdc;
struct smc_sock *smc;
if (!conn)
return;
- memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc));
+ data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr;
+ smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn);
+ smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn);
smc = container_of(conn, struct smc_sock, conn);
smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
}
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index 934df4473a7c..b5bfe38c7f9b 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -48,21 +48,31 @@ struct smc_cdc_msg {
struct smc_cdc_producer_flags prod_flags;
struct smc_cdc_conn_state_flags conn_state_flags;
u8 reserved[18];
-} __packed; /* format defined in RFC7609 */
+};
+
+/* SMC-D cursor format */
+union smcd_cdc_cursor {
+ struct {
+ u16 wrap;
+ u32 count;
+ struct smc_cdc_producer_flags prod_flags;
+ struct smc_cdc_conn_state_flags conn_state_flags;
+ } __packed;
+#ifdef KERNEL_HAS_ATOMIC64
+ atomic64_t acurs; /* for atomic processing */
+#else
+ u64 acurs; /* for atomic processing */
+#endif
+} __aligned(8);
/* CDC message for SMC-D */
struct smcd_cdc_msg {
struct smc_wr_rx_hdr common; /* Type = 0xFE */
u8 res1[7];
- u16 prod_wrap;
- u32 prod_count;
- u8 res2[2];
- u16 cons_wrap;
- u32 cons_count;
- struct smc_cdc_producer_flags prod_flags;
- struct smc_cdc_conn_state_flags conn_state_flags;
+ union smcd_cdc_cursor prod;
+ union smcd_cdc_cursor cons;
u8 res3[8];
-} __packed;
+} __aligned(8);
static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
{
@@ -135,6 +145,21 @@ static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt,
#endif
}
+static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
+ union smcd_cdc_cursor *src,
+ struct smc_connection *conn)
+{
+#ifndef KERNEL_HAS_ATOMIC64
+ unsigned long flags;
+
+ spin_lock_irqsave(&conn->acurs_lock, flags);
+ tgt->acurs = src->acurs;
+ spin_unlock_irqrestore(&conn->acurs_lock, flags);
+#else
+ atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
+#endif
+}
+
/* calculate cursor difference between old and new, where old <= new */
static inline int smc_curs_diff(unsigned int size,
union smc_host_cursor *old,
@@ -222,12 +247,17 @@ static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local,
static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local,
struct smcd_cdc_msg *peer)
{
- local->prod.wrap = peer->prod_wrap;
- local->prod.count = peer->prod_count;
- local->cons.wrap = peer->cons_wrap;
- local->cons.count = peer->cons_count;
- local->prod_flags = peer->prod_flags;
- local->conn_state_flags = peer->conn_state_flags;
+ union smc_host_cursor temp;
+
+ temp.wrap = peer->prod.wrap;
+ temp.count = peer->prod.count;
+ atomic64_set(&local->prod.acurs, atomic64_read(&temp.acurs));
+
+ temp.wrap = peer->cons.wrap;
+ temp.count = peer->cons.count;
+ atomic64_set(&local->cons.acurs, atomic64_read(&temp.acurs));
+ local->prod_flags = peer->cons.prod_flags;
+ local->conn_state_flags = peer->cons.conn_state_flags;
}
static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 18daebcef181..1c9fa7f0261a 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -184,6 +184,8 @@ free:
if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
smc_llc_link_inactive(lnk);
+ if (lgr->is_smcd)
+ smc_ism_signal_shutdown(lgr);
smc_lgr_free(lgr);
}
}
@@ -485,7 +487,7 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
}
/* Called when SMC-D device is terminated or peer is lost */
-void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
+void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
{
struct smc_link_group *lgr, *l;
LIST_HEAD(lgr_free_list);
@@ -495,7 +497,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
if (lgr->is_smcd && lgr->smcd == dev &&
(!peer_gid || lgr->peer_gid == peer_gid) &&
- !list_empty(&lgr->list)) {
+ (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
__smc_lgr_terminate(lgr);
list_move(&lgr->list, &lgr_free_list);
}
@@ -506,6 +508,8 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
list_del_init(&lgr->list);
cancel_delayed_work_sync(&lgr->free_work);
+ if (!peer_gid && vlan == VLAN_VID_MASK) /* dev terminated? */
+ smc_ism_signal_shutdown(lgr);
smc_lgr_free(lgr);
}
}
@@ -559,7 +563,7 @@ out:
static bool smcr_lgr_match(struct smc_link_group *lgr,
struct smc_clc_msg_local *lcl,
- enum smc_lgr_role role)
+ enum smc_lgr_role role, u32 clcqpn)
{
return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
SMC_SYSTEMID_LEN) &&
@@ -567,7 +571,9 @@ static bool smcr_lgr_match(struct smc_link_group *lgr,
SMC_GID_SIZE) &&
!memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
sizeof(lcl->mac)) &&
- lgr->role == role;
+ lgr->role == role &&
+ (lgr->role == SMC_SERV ||
+ lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
}
static bool smcd_lgr_match(struct smc_link_group *lgr,
@@ -578,7 +584,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,
/* create a new SMC connection (and a new link group if necessary) */
int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
- struct smc_ib_device *smcibdev, u8 ibport,
+ struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
u64 peer_gid)
{
@@ -603,7 +609,7 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
list_for_each_entry(lgr, &smc_lgr_list.list, list) {
write_lock_bh(&lgr->conns_lock);
if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) :
- smcr_lgr_match(lgr, lcl, role)) &&
+ smcr_lgr_match(lgr, lcl, role, clcqpn)) &&
!lgr->sync_err &&
lgr->vlan_id == vlan_id &&
(role == SMC_CLNT ||
@@ -1024,6 +1030,8 @@ void smc_core_exit(void)
smc_llc_link_inactive(lnk);
}
cancel_delayed_work_sync(&lgr->free_work);
+ if (lgr->is_smcd)
+ smc_ism_signal_shutdown(lgr);
smc_lgr_free(lgr); /* free link group */
}
}
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index c156674733c9..cf98f4d6093e 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -247,7 +247,8 @@ void smc_lgr_free(struct smc_link_group *lgr);
void smc_lgr_forget(struct smc_link_group *lgr);
void smc_lgr_terminate(struct smc_link_group *lgr);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
-void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid);
+void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
+ unsigned short vlan);
int smc_buf_create(struct smc_sock *smc, bool is_smcd);
int smc_uncompress_bufsize(u8 compressed);
int smc_rmb_rtoken_handling(struct smc_connection *conn,
@@ -262,7 +263,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id);
void smc_conn_free(struct smc_connection *conn);
int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
- struct smc_ib_device *smcibdev, u8 ibport,
+ struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
u64 peer_gid);
void smcd_conn_free(struct smc_connection *conn);
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index e36f21ce7252..2fff79db1a59 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -187,22 +187,28 @@ struct smc_ism_event_work {
#define ISM_EVENT_REQUEST 0x0001
#define ISM_EVENT_RESPONSE 0x0002
#define ISM_EVENT_REQUEST_IR 0x00000001
+#define ISM_EVENT_CODE_SHUTDOWN 0x80
#define ISM_EVENT_CODE_TESTLINK 0x83
+union smcd_sw_event_info {
+ u64 info;
+ struct {
+ u8 uid[SMC_LGR_ID_SIZE];
+ unsigned short vlan_id;
+ u16 code;
+ };
+};
+
static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
{
- union {
- u64 info;
- struct {
- u32 uid;
- unsigned short vlanid;
- u16 code;
- };
- } ev_info;
+ union smcd_sw_event_info ev_info;
+ ev_info.info = wrk->event.info;
switch (wrk->event.code) {
+ case ISM_EVENT_CODE_SHUTDOWN: /* Peer shut down DMBs */
+ smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id);
+ break;
case ISM_EVENT_CODE_TESTLINK: /* Activity timer */
- ev_info.info = wrk->event.info;
if (ev_info.code == ISM_EVENT_REQUEST) {
ev_info.code = ISM_EVENT_RESPONSE;
wrk->smcd->ops->signal_event(wrk->smcd,
@@ -215,6 +221,21 @@ static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
}
}
+int smc_ism_signal_shutdown(struct smc_link_group *lgr)
+{
+ int rc;
+ union smcd_sw_event_info ev_info;
+
+ memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
+ ev_info.vlan_id = lgr->vlan_id;
+ ev_info.code = ISM_EVENT_REQUEST;
+ rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid,
+ ISM_EVENT_REQUEST_IR,
+ ISM_EVENT_CODE_SHUTDOWN,
+ ev_info.info);
+ return rc;
+}
+
/* worker for SMC-D events */
static void smc_ism_event_work(struct work_struct *work)
{
@@ -223,7 +244,7 @@ static void smc_ism_event_work(struct work_struct *work)
switch (wrk->event.type) {
case ISM_EVENT_GID: /* GID event, token is peer GID */
- smc_smcd_terminate(wrk->smcd, wrk->event.tok);
+ smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK);
break;
case ISM_EVENT_DMB:
break;
@@ -289,7 +310,7 @@ void smcd_unregister_dev(struct smcd_dev *smcd)
spin_unlock(&smcd_dev_list.lock);
flush_workqueue(smcd->event_wq);
destroy_workqueue(smcd->event_wq);
- smc_smcd_terminate(smcd, 0);
+ smc_smcd_terminate(smcd, 0, VLAN_VID_MASK);
device_del(&smcd->dev);
}
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
index aee45b860b79..4da946cbfa29 100644
--- a/net/smc/smc_ism.h
+++ b/net/smc/smc_ism.h
@@ -45,4 +45,5 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size,
int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc);
int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos,
void *data, size_t len);
+int smc_ism_signal_shutdown(struct smc_link_group *lgr);
#endif
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 3c458d279855..c2694750a6a8 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -215,12 +215,14 @@ int smc_wr_tx_put_slot(struct smc_link *link,
pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
if (pend->idx < link->wr_tx_cnt) {
+ u32 idx = pend->idx;
+
/* clear the full struct smc_wr_tx_pend including .priv */
memset(&link->wr_tx_pends[pend->idx], 0,
sizeof(link->wr_tx_pends[pend->idx]));
memset(&link->wr_tx_bufs[pend->idx], 0,
sizeof(link->wr_tx_bufs[pend->idx]));
- test_and_clear_bit(pend->idx, link->wr_tx_mask);
+ test_and_clear_bit(idx, link->wr_tx_mask);
return 1;
}
diff --git a/net/socket.c b/net/socket.c
index 593826e11a53..334fcc617ef2 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -853,7 +853,7 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
struct socket *sock = file->private_data;
if (unlikely(!sock->ops->splice_read))
- return -EINVAL;
+ return generic_file_splice_read(file, ppos, pipe, len, flags);
return sock->ops->splice_read(sock, ppos, pipe, len, flags);
}
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index d8831b988b1e..ab4a3be1542a 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -281,13 +281,7 @@ static bool generic_key_to_expire(struct rpc_cred *cred)
{
struct auth_cred *acred = &container_of(cred, struct generic_cred,
gc_base)->acred;
- bool ret;
-
- get_rpccred(cred);
- ret = test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
- put_rpccred(cred);
-
- return ret;
+ return test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
}
static const struct rpc_credops generic_credops = {
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 30f970cdc7f6..5d3f252659f1 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1239,36 +1239,59 @@ gss_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
return &gss_auth->rpc_auth;
}
+static struct gss_cred *
+gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
+{
+ struct gss_cred *new;
+
+ /* Make a copy of the cred so that we can reference count it */
+ new = kzalloc(sizeof(*gss_cred), GFP_NOIO);
+ if (new) {
+ struct auth_cred acred = {
+ .uid = gss_cred->gc_base.cr_uid,
+ };
+ struct gss_cl_ctx *ctx =
+ rcu_dereference_protected(gss_cred->gc_ctx, 1);
+
+ rpcauth_init_cred(&new->gc_base, &acred,
+ &gss_auth->rpc_auth,
+ &gss_nullops);
+ new->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
+ new->gc_service = gss_cred->gc_service;
+ new->gc_principal = gss_cred->gc_principal;
+ kref_get(&gss_auth->kref);
+ rcu_assign_pointer(new->gc_ctx, ctx);
+ gss_get_ctx(ctx);
+ }
+ return new;
+}
+
/*
- * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call
+ * gss_send_destroy_context will cause the RPCSEC_GSS to send a NULL RPC call
* to the server with the GSS control procedure field set to
* RPC_GSS_PROC_DESTROY. This should normally cause the server to release
* all RPCSEC_GSS state associated with that context.
*/
-static int
-gss_destroying_context(struct rpc_cred *cred)
+static void
+gss_send_destroy_context(struct rpc_cred *cred)
{
struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
+ struct gss_cred *new;
struct rpc_task *task;
- if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
- return 0;
-
- ctx->gc_proc = RPC_GSS_PROC_DESTROY;
- cred->cr_ops = &gss_nullops;
-
- /* Take a reference to ensure the cred will be destroyed either
- * by the RPC call or by the put_rpccred() below */
- get_rpccred(cred);
+ new = gss_dup_cred(gss_auth, gss_cred);
+ if (new) {
+ ctx->gc_proc = RPC_GSS_PROC_DESTROY;
- task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT);
- if (!IS_ERR(task))
- rpc_put_task(task);
+ task = rpc_call_null(gss_auth->client, &new->gc_base,
+ RPC_TASK_ASYNC|RPC_TASK_SOFT);
+ if (!IS_ERR(task))
+ rpc_put_task(task);
- put_rpccred(cred);
- return 1;
+ put_rpccred(&new->gc_base);
+ }
}
/* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
@@ -1330,8 +1353,8 @@ static void
gss_destroy_cred(struct rpc_cred *cred)
{
- if (gss_destroying_context(cred))
- return;
+ if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
+ gss_send_destroy_context(cred);
gss_destroy_nullcred(cred);
}
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 2bbb8d38d2bf..f302c6eb8779 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -546,7 +546,7 @@ EXPORT_SYMBOL_GPL(xdr_commit_encode);
static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
size_t nbytes)
{
- static __be32 *p;
+ __be32 *p;
int space_left;
int frag1bytes, frag2bytes;
@@ -673,11 +673,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
WARN_ON_ONCE(xdr->iov);
return;
}
- if (fraglen) {
+ if (fraglen)
xdr->end = head->iov_base + head->iov_len;
- xdr->page_ptr--;
- }
/* (otherwise assume xdr->end is already set) */
+ xdr->page_ptr--;
head->iov_len = len;
buf->len = len;
xdr->p = head->iov_base + head->iov_len;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 2830709957bd..c138d68e8a69 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -166,7 +166,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
/* Apply trial address if we just left trial period */
if (!trial && !self) {
- tipc_net_finalize(net, tn->trial_addr);
+ tipc_sched_net_finalize(net, tn->trial_addr);
+ msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
}
@@ -300,14 +301,12 @@ static void tipc_disc_timeout(struct timer_list *t)
goto exit;
}
- /* Trial period over ? */
- if (!time_before(jiffies, tn->addr_trial_end)) {
- /* Did we just leave it ? */
- if (!tipc_own_addr(net))
- tipc_net_finalize(net, tn->trial_addr);
-
- msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
- msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
+ /* Did we just leave trial period ? */
+ if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
+ mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
+ spin_unlock_bh(&d->lock);
+ tipc_sched_net_finalize(net, tn->trial_addr);
+ return;
}
/* Adjust timeout interval according to discovery phase */
@@ -319,6 +318,8 @@ static void tipc_disc_timeout(struct timer_list *t)
d->timer_intv = TIPC_DISC_SLOW;
else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST)
d->timer_intv = TIPC_DISC_FAST;
+ msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+ msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
}
mod_timer(&d->timer, jiffies + d->timer_intv);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 201c3b5bc96b..836727e363c4 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1594,14 +1594,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
l->priority = peers_prio;
- /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
- if (msg_peer_stopping(hdr))
+ /* If peer is going down we want full re-establish cycle */
+ if (msg_peer_stopping(hdr)) {
rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
- else if ((mtyp == RESET_MSG) || !link_is_up(l))
+ break;
+ }
+ /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
+ if (mtyp == RESET_MSG || !link_is_up(l))
rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
/* ACTIVATE_MSG takes up link if it was already locally reset */
- if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
+ if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
rc = TIPC_LINK_UP_EVT;
l->peer_session = msg_session(hdr);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 62199cf5a56c..f076edb74338 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -104,6 +104,14 @@
* - A local spin_lock protecting the queue of subscriber events.
*/
+struct tipc_net_work {
+ struct work_struct work;
+ struct net *net;
+ u32 addr;
+};
+
+static void tipc_net_finalize(struct net *net, u32 addr);
+
int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
{
if (tipc_own_id(net)) {
@@ -119,17 +127,38 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
return 0;
}
-void tipc_net_finalize(struct net *net, u32 addr)
+static void tipc_net_finalize(struct net *net, u32 addr)
{
struct tipc_net *tn = tipc_net(net);
- if (!cmpxchg(&tn->node_addr, 0, addr)) {
- tipc_set_node_addr(net, addr);
- tipc_named_reinit(net);
- tipc_sk_reinit(net);
- tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
- TIPC_CLUSTER_SCOPE, 0, addr);
- }
+ if (cmpxchg(&tn->node_addr, 0, addr))
+ return;
+ tipc_set_node_addr(net, addr);
+ tipc_named_reinit(net);
+ tipc_sk_reinit(net);
+ tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
+ TIPC_CLUSTER_SCOPE, 0, addr);
+}
+
+static void tipc_net_finalize_work(struct work_struct *work)
+{
+ struct tipc_net_work *fwork;
+
+ fwork = container_of(work, struct tipc_net_work, work);
+ tipc_net_finalize(fwork->net, fwork->addr);
+ kfree(fwork);
+}
+
+void tipc_sched_net_finalize(struct net *net, u32 addr)
+{
+ struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC);
+
+ if (!fwork)
+ return;
+ INIT_WORK(&fwork->work, tipc_net_finalize_work);
+ fwork->net = net;
+ fwork->addr = addr;
+ schedule_work(&fwork->work);
}
void tipc_net_stop(struct net *net)
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 09ad02b50bb1..b7f2e364eb99 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -42,7 +42,7 @@
extern const struct nla_policy tipc_nl_net_policy[];
int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
-void tipc_net_finalize(struct net *net, u32 addr);
+void tipc_sched_net_finalize(struct net *net, u32 addr);
void tipc_net_stop(struct net *net);
int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 636e6131769d..b57b1be7252b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1555,16 +1555,17 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
/**
* tipc_sk_anc_data_recv - optionally capture ancillary data for received message
* @m: descriptor for message info
- * @msg: received message header
+ * @skb: received message buffer
* @tsk: TIPC port associated with message
*
* Note: Ancillary data is not captured if not requested by receiver.
*
* Returns 0 if successful, otherwise errno
*/
-static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
+static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
struct tipc_sock *tsk)
{
+ struct tipc_msg *msg;
u32 anc_data[3];
u32 err;
u32 dest_type;
@@ -1573,6 +1574,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
if (likely(m->msg_controllen == 0))
return 0;
+ msg = buf_msg(skb);
/* Optionally capture errored message object(s) */
err = msg ? msg_errcode(msg) : 0;
@@ -1583,6 +1585,9 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
if (res)
return res;
if (anc_data[1]) {
+ if (skb_linearize(skb))
+ return -ENOMEM;
+ msg = buf_msg(skb);
res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
msg_data(msg));
if (res)
@@ -1744,9 +1749,10 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
/* Collect msg meta data, including error code and rejected data */
tipc_sk_set_orig_addr(m, skb);
- rc = tipc_sk_anc_data_recv(m, hdr, tsk);
+ rc = tipc_sk_anc_data_recv(m, skb, tsk);
if (unlikely(rc))
goto exit;
+ hdr = buf_msg(skb);
/* Capture data if non-error msg, otherwise just set return value */
if (likely(!err)) {
@@ -1856,9 +1862,10 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
/* Collect msg meta data, incl. error code and rejected data */
if (!copied) {
tipc_sk_set_orig_addr(m, skb);
- rc = tipc_sk_anc_data_recv(m, hdr, tsk);
+ rc = tipc_sk_anc_data_recv(m, skb, tsk);
if (rc)
break;
+ hdr = buf_msg(skb);
}
/* Copy data if msg ok, otherwise return error/partial data */
diff --git a/scripts/coccinelle/api/drm-get-put.cocci b/scripts/coccinelle/api/drm-get-put.cocci
deleted file mode 100644
index 3a09c97ad87d..000000000000
--- a/scripts/coccinelle/api/drm-get-put.cocci
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-///
-/// Use drm_*_get() and drm_*_put() helpers instead of drm_*_reference() and
-/// drm_*_unreference() helpers.
-///
-// Confidence: High
-// Copyright: (C) 2017 NVIDIA Corporation
-// Options: --no-includes --include-headers
-//
-
-virtual patch
-virtual report
-
-@depends on patch@
-expression object;
-@@
-
-(
-- drm_connector_reference(object)
-+ drm_connector_get(object)
-|
-- drm_connector_unreference(object)
-+ drm_connector_put(object)
-|
-- drm_framebuffer_reference(object)
-+ drm_framebuffer_get(object)
-|
-- drm_framebuffer_unreference(object)
-+ drm_framebuffer_put(object)
-|
-- drm_gem_object_reference(object)
-+ drm_gem_object_get(object)
-|
-- drm_gem_object_unreference(object)
-+ drm_gem_object_put(object)
-|
-- __drm_gem_object_unreference(object)
-+ __drm_gem_object_put(object)
-|
-- drm_gem_object_unreference_unlocked(object)
-+ drm_gem_object_put_unlocked(object)
-|
-- drm_dev_unref(object)
-+ drm_dev_put(object)
-)
-
-@r depends on report@
-expression object;
-position p;
-@@
-
-(
-drm_connector_unreference@p(object)
-|
-drm_connector_reference@p(object)
-|
-drm_framebuffer_unreference@p(object)
-|
-drm_framebuffer_reference@p(object)
-|
-drm_gem_object_unreference@p(object)
-|
-drm_gem_object_reference@p(object)
-|
-__drm_gem_object_unreference(object)
-|
-drm_gem_object_unreference_unlocked(object)
-|
-drm_dev_unref@p(object)
-)
-
-@script:python depends on report@
-object << r.object;
-p << r.p;
-@@
-
-msg="WARNING: use get/put helpers to reference and dereference %s" % (object)
-coccilib.report.print_report(p[0], msg)
diff --git a/scripts/faddr2line b/scripts/faddr2line
index a0149db00be7..6c6439f69a72 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -71,7 +71,7 @@ die() {
# Try to figure out the source directory prefix so we can remove it from the
# addr2line output. HACK ALERT: This assumes that start_kernel() is in
-# kernel/init.c! This only works for vmlinux. Otherwise it falls back to
+# init/main.c! This only works for vmlinux. Otherwise it falls back to
# printing the absolute path.
find_dir_prefix() {
local objfile=$1
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
index da66e7742282..0ef906499646 100755
--- a/scripts/kconfig/merge_config.sh
+++ b/scripts/kconfig/merge_config.sh
@@ -102,7 +102,8 @@ if [ ! -r "$INITFILE" ]; then
fi
MERGE_LIST=$*
-SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)[= ].*/\2/p"
+SED_CONFIG_EXP1="s/^\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)=.*/\1/p"
+SED_CONFIG_EXP2="s/^# \(${CONFIG_PREFIX}[a-zA-Z0-9_]*\) is not set$/\1/p"
TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX)
@@ -116,7 +117,7 @@ for MERGE_FILE in $MERGE_LIST ; do
echo "The merge file '$MERGE_FILE' does not exist. Exit." >&2
exit 1
fi
- CFG_LIST=$(sed -n "$SED_CONFIG_EXP" $MERGE_FILE)
+ CFG_LIST=$(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $MERGE_FILE)
for CFG in $CFG_LIST ; do
grep -q -w $CFG $TMP_FILE || continue
@@ -159,7 +160,7 @@ make KCONFIG_ALLCONFIG=$TMP_FILE $OUTPUT_ARG $ALLTARGET
# Check all specified config values took (might have missed-dependency issues)
-for CFG in $(sed -n "$SED_CONFIG_EXP" $TMP_FILE); do
+for CFG in $(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $TMP_FILE); do
REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE)
ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG")
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 90c9a8ac7adb..f43a274f4f1d 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -81,11 +81,11 @@ else
cp System.map "$tmpdir/boot/System.map-$version"
cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
fi
-cp "$($MAKE -s image_name)" "$tmpdir/$installed_image_path"
+cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
-if grep -q "^CONFIG_OF=y" $KCONFIG_CONFIG ; then
+if grep -q "^CONFIG_OF_EARLY_FLATTREE=y" $KCONFIG_CONFIG ; then
# Only some architectures with OF support have this target
- if grep -q dtbs_install "${srctree}/arch/$SRCARCH/Makefile"; then
+ if [ -d "${srctree}/arch/$SRCARCH/boot/dts" ]; then
$MAKE KBUILD_SRC= INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
fi
fi
diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
index 663a7f343b42..edcad61fe3cd 100755
--- a/scripts/package/mkdebian
+++ b/scripts/package/mkdebian
@@ -88,6 +88,7 @@ set_debarch() {
version=$KERNELRELEASE
if [ -n "$KDEB_PKGVERSION" ]; then
packageversion=$KDEB_PKGVERSION
+ revision=${packageversion##*-}
else
revision=$(cat .version 2>/dev/null||echo 1)
packageversion=$version-$revision
@@ -205,10 +206,12 @@ cat <<EOF > debian/rules
#!$(command -v $MAKE) -f
build:
- \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC=
+ \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
+ KBUILD_BUILD_VERSION=${revision} KBUILD_SRC=
binary-arch:
- \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC= intdeb-pkg
+ \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
+ KBUILD_BUILD_VERSION=${revision} KBUILD_SRC= intdeb-pkg
clean:
rm -rf debian/*tmp debian/files
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index e05646dc24dc..009147d4718e 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -12,6 +12,7 @@
# how we were called determines which rpms we build and how we build them
if [ "$1" = prebuilt ]; then
S=DEL
+ MAKE="$MAKE -f $srctree/Makefile"
else
S=
fi
@@ -78,19 +79,19 @@ $S %prep
$S %setup -q
$S
$S %build
-$S make %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release}
+$S $MAKE %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release}
$S
%install
mkdir -p %{buildroot}/boot
%ifarch ia64
mkdir -p %{buildroot}/boot/efi
- cp \$(make image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE
+ cp \$($MAKE image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE
ln -s efi/vmlinuz-$KERNELRELEASE %{buildroot}/boot/
%else
- cp \$(make image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE
+ cp \$($MAKE image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE
%endif
-$M make %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} KBUILD_SRC= modules_install
- make %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr KBUILD_SRC= headers_install
+$M $MAKE %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} modules_install
+ $MAKE %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
cp System.map %{buildroot}/boot/System.map-$KERNELRELEASE
cp .config %{buildroot}/boot/config-$KERNELRELEASE
bzip2 -9 --keep vmlinux
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 79f7dd57d571..71f39410691b 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -74,7 +74,7 @@ scm_version()
fi
# Check for uncommitted changes
- if git status -uno --porcelain | grep -qv '^.. scripts/package'; then
+ if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
printf '%s' -dirty
fi
diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py
index 839e190bbd7a..5056fb3b897d 100755
--- a/scripts/spdxcheck.py
+++ b/scripts/spdxcheck.py
@@ -168,7 +168,6 @@ class id_parser(object):
self.curline = 0
try:
for line in fd:
- line = line.decode(locale.getpreferredencoding(False), errors='ignore')
self.curline += 1
if self.curline > maxlines:
break
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index 6dc075144508..d775e03fbbcc 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -106,6 +106,7 @@ int asymmetric_verify(struct key *keyring, const char *sig,
pks.pkey_algo = "rsa";
pks.hash_algo = hash_algo_name[hdr->hash_algo];
+ pks.encoding = "pkcs1";
pks.digest = (u8 *)data;
pks.digest_size = datalen;
pks.s = hdr->sig;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 7ce683259357..a67459eb62d5 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -5318,6 +5318,9 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
addr_buf = address;
while (walk_size < addrlen) {
+ if (walk_size + sizeof(sa_family_t) > addrlen)
+ return -EINVAL;
+
addr = addr_buf;
switch (addr->sa_family) {
case AF_UNSPEC:
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 2fe459df3c85..b7efa2296969 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -245,9 +245,13 @@ int mls_context_to_sid(struct policydb *pol,
char *rangep[2];
if (!pol->mls_enabled) {
- if ((def_sid != SECSID_NULL && oldc) || (*scontext) == '\0')
- return 0;
- return -EINVAL;
+ /*
+ * With no MLS, only return -EINVAL if there is a MLS field
+ * and it did not come from an xattr.
+ */
+ if (oldc && def_sid == SECSID_NULL)
+ return -EINVAL;
+ return 0;
}
/*
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index f8d4a419f3af..467039b342b5 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1062,8 +1062,8 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
runtime->oss.channels = params_channels(params);
runtime->oss.rate = params_rate(params);
- vfree(runtime->oss.buffer);
- runtime->oss.buffer = vmalloc(runtime->oss.period_bytes);
+ kvfree(runtime->oss.buffer);
+ runtime->oss.buffer = kvzalloc(runtime->oss.period_bytes, GFP_KERNEL);
if (!runtime->oss.buffer) {
err = -ENOMEM;
goto failure;
@@ -2328,7 +2328,7 @@ static void snd_pcm_oss_release_substream(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
runtime = substream->runtime;
- vfree(runtime->oss.buffer);
+ kvfree(runtime->oss.buffer);
runtime->oss.buffer = NULL;
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
snd_pcm_oss_plugin_clear(substream);
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index 141c5f3a9575..31cb2acf8afc 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -66,8 +66,8 @@ static int snd_pcm_plugin_alloc(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t
return -ENXIO;
size /= 8;
if (plugin->buf_frames < frames) {
- vfree(plugin->buf);
- plugin->buf = vmalloc(size);
+ kvfree(plugin->buf);
+ plugin->buf = kvzalloc(size, GFP_KERNEL);
plugin->buf_frames = frames;
}
if (!plugin->buf) {
@@ -191,7 +191,7 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin)
if (plugin->private_free)
plugin->private_free(plugin);
kfree(plugin->buf_channels);
- vfree(plugin->buf);
+ kvfree(plugin->buf);
kfree(plugin);
return 0;
}
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0a24037184c3..0a567634e5fa 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1177,6 +1177,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
+ SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ),
SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
@@ -8413,7 +8414,7 @@ static void ca0132_free(struct hda_codec *codec)
snd_hda_power_down(codec);
if (spec->mem_base)
- iounmap(spec->mem_base);
+ pci_iounmap(codec->bus->pci, spec->mem_base);
kfree(spec->spec_init_verbs);
kfree(codec->spec);
}
@@ -8488,7 +8489,7 @@ static void ca0132_config(struct hda_codec *codec)
break;
case QUIRK_AE5:
codec_dbg(codec, "%s: QUIRK_AE5 applied.\n", __func__);
- snd_hda_apply_pincfgs(codec, r3di_pincfgs);
+ snd_hda_apply_pincfgs(codec, ae5_pincfgs);
break;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index fa61674a5605..970bc44a378b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6481,6 +6481,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 97f49b751e6e..568575b72f2f 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -58,8 +58,8 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
removefunc = false;
}
if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0 &&
- snd_hda_gen_add_micmute_led(codec,
- update_tpacpi_micmute) > 0)
+ !snd_hda_gen_add_micmute_led(codec,
+ update_tpacpi_micmute))
removefunc = false;
}
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index 83d76c345940..00c92eb854ce 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -1648,7 +1648,7 @@ static int had_create_jack(struct snd_intelhad *ctx,
* PM callbacks
*/
-static int hdmi_lpe_audio_runtime_suspend(struct device *dev)
+static int __maybe_unused hdmi_lpe_audio_suspend(struct device *dev)
{
struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
int port;
@@ -1664,23 +1664,8 @@ static int hdmi_lpe_audio_runtime_suspend(struct device *dev)
}
}
- return 0;
-}
-
-static int __maybe_unused hdmi_lpe_audio_suspend(struct device *dev)
-{
- struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
- int err;
+ snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D3hot);
- err = hdmi_lpe_audio_runtime_suspend(dev);
- if (!err)
- snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D3hot);
- return err;
-}
-
-static int hdmi_lpe_audio_runtime_resume(struct device *dev)
-{
- pm_runtime_mark_last_busy(dev);
return 0;
}
@@ -1688,8 +1673,10 @@ static int __maybe_unused hdmi_lpe_audio_resume(struct device *dev)
{
struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
- hdmi_lpe_audio_runtime_resume(dev);
+ pm_runtime_mark_last_busy(dev);
+
snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D0);
+
return 0;
}
@@ -1877,7 +1864,6 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
dev_dbg(&pdev->dev, "%s: handle pending notification\n", __func__);
for_each_port(card_ctx, port) {
@@ -1908,8 +1894,6 @@ static int hdmi_lpe_audio_remove(struct platform_device *pdev)
static const struct dev_pm_ops hdmi_lpe_audio_pm = {
SET_SYSTEM_SLEEP_PM_OPS(hdmi_lpe_audio_suspend, hdmi_lpe_audio_resume)
- SET_RUNTIME_PM_OPS(hdmi_lpe_audio_runtime_suspend,
- hdmi_lpe_audio_runtime_resume, NULL)
};
static struct platform_driver hdmi_lpe_audio_driver = {
diff --git a/tools/arch/arm64/include/asm/barrier.h b/tools/arch/arm64/include/asm/barrier.h
index 12835ea0e417..378c051fa177 100644
--- a/tools/arch/arm64/include/asm/barrier.h
+++ b/tools/arch/arm64/include/asm/barrier.h
@@ -14,74 +14,75 @@
#define wmb() asm volatile("dmb ishst" ::: "memory")
#define rmb() asm volatile("dmb ishld" ::: "memory")
-#define smp_store_release(p, v) \
-do { \
- union { typeof(*p) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(*p)) (v) }; \
- \
- switch (sizeof(*p)) { \
- case 1: \
- asm volatile ("stlrb %w1, %0" \
- : "=Q" (*p) \
- : "r" (*(__u8 *)__u.__c) \
- : "memory"); \
- break; \
- case 2: \
- asm volatile ("stlrh %w1, %0" \
- : "=Q" (*p) \
- : "r" (*(__u16 *)__u.__c) \
- : "memory"); \
- break; \
- case 4: \
- asm volatile ("stlr %w1, %0" \
- : "=Q" (*p) \
- : "r" (*(__u32 *)__u.__c) \
- : "memory"); \
- break; \
- case 8: \
- asm volatile ("stlr %1, %0" \
- : "=Q" (*p) \
- : "r" (*(__u64 *)__u.__c) \
- : "memory"); \
- break; \
- default: \
- /* Only to shut up gcc ... */ \
- mb(); \
- break; \
- } \
+#define smp_store_release(p, v) \
+do { \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (v) }; \
+ \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("stlrb %w1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u8_alias_t *)__u.__c) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("stlrh %w1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u16_alias_t *)__u.__c) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm volatile ("stlr %w1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u32_alias_t *)__u.__c) \
+ : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("stlr %1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u64_alias_t *)__u.__c) \
+ : "memory"); \
+ break; \
+ default: \
+ /* Only to shut up gcc ... */ \
+ mb(); \
+ break; \
+ } \
} while (0)
-#define smp_load_acquire(p) \
-({ \
- union { typeof(*p) __val; char __c[1]; } __u; \
- \
- switch (sizeof(*p)) { \
- case 1: \
- asm volatile ("ldarb %w0, %1" \
- : "=r" (*(__u8 *)__u.__c) \
- : "Q" (*p) : "memory"); \
- break; \
- case 2: \
- asm volatile ("ldarh %w0, %1" \
- : "=r" (*(__u16 *)__u.__c) \
- : "Q" (*p) : "memory"); \
- break; \
- case 4: \
- asm volatile ("ldar %w0, %1" \
- : "=r" (*(__u32 *)__u.__c) \
- : "Q" (*p) : "memory"); \
- break; \
- case 8: \
- asm volatile ("ldar %0, %1" \
- : "=r" (*(__u64 *)__u.__c) \
- : "Q" (*p) : "memory"); \
- break; \
- default: \
- /* Only to shut up gcc ... */ \
- mb(); \
- break; \
- } \
- __u.__val; \
+#define smp_load_acquire(p) \
+({ \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__c = { 0 } }; \
+ \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("ldarb %w0, %1" \
+ : "=r" (*(__u8_alias_t *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("ldarh %w0, %1" \
+ : "=r" (*(__u16_alias_t *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 4: \
+ asm volatile ("ldar %w0, %1" \
+ : "=r" (*(__u32_alias_t *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("ldar %0, %1" \
+ : "=r" (*(__u64_alias_t *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ default: \
+ /* Only to shut up gcc ... */ \
+ mb(); \
+ break; \
+ } \
+ __u.__val; \
})
#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 236b9b97dfdb..667c14e56031 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -55,7 +55,6 @@ counted. The following modifiers exist:
S - read sample value (PERF_SAMPLE_READ)
D - pin the event to the PMU
W - group is weak and will fallback to non-group if not schedulable,
- only supported in 'perf stat' for now.
The 'p' modifier can be used for specifying how precise the instruction
address should be. The 'p' modifier can be specified multiple times:
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 3ccb4f0bf088..d95655489f7e 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -387,7 +387,7 @@ SHELL = $(SHELL_PATH)
linux_uapi_dir := $(srctree)/tools/include/uapi/linux
asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic
-arch_asm_uapi_dir := $(srctree)/tools/arch/$(ARCH)/include/uapi/asm/
+arch_asm_uapi_dir := $(srctree)/tools/arch/$(SRCARCH)/include/uapi/asm/
beauty_outdir := $(OUTPUT)trace/beauty/generated
beauty_ioctl_outdir := $(beauty_outdir)/ioctl
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 10cf889c6d75..488779bc4c8d 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -391,7 +391,12 @@ try_again:
ui__warning("%s\n", msg);
goto try_again;
}
-
+ if ((errno == EINVAL || errno == EBADF) &&
+ pos->leader != pos &&
+ pos->weak_group) {
+ pos = perf_evlist__reset_weak_group(evlist, pos);
+ goto try_again;
+ }
rc = -errno;
perf_evsel__open_strerror(pos, &opts->target,
errno, msg, sizeof(msg));
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index d1028d7755bb..a635abfa77b6 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -383,32 +383,6 @@ static bool perf_evsel__should_store_id(struct perf_evsel *counter)
return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
}
-static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel)
-{
- struct perf_evsel *c2, *leader;
- bool is_open = true;
-
- leader = evsel->leader;
- pr_debug("Weak group for %s/%d failed\n",
- leader->name, leader->nr_members);
-
- /*
- * for_each_group_member doesn't work here because it doesn't
- * include the first entry.
- */
- evlist__for_each_entry(evsel_list, c2) {
- if (c2 == evsel)
- is_open = false;
- if (c2->leader == leader) {
- if (is_open)
- perf_evsel__close(c2);
- c2->leader = c2;
- c2->nr_members = 0;
- }
- }
- return leader;
-}
-
static bool is_target_alive(struct target *_target,
struct thread_map *threads)
{
@@ -477,7 +451,7 @@ try_again:
if ((errno == EINVAL || errno == EBADF) &&
counter->leader != counter &&
counter->weak_group) {
- counter = perf_evsel__reset_weak_group(counter);
+ counter = perf_evlist__reset_weak_group(evsel_list, counter);
goto try_again;
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index b2838de13de0..aa0c73e57924 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1429,6 +1429,9 @@ int cmd_top(int argc, const char **argv)
}
}
+ if (opts->branch_stack && callchain_param.enabled)
+ symbol_conf.show_branchflag_count = true;
+
sort__mode = SORT_MODE__TOP;
/* display thread wants entries to be collapsed in a different tree */
perf_hpp_list.need_collapse = 1;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index dc8a6c4986ce..835619476370 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -108,6 +108,7 @@ struct trace {
} stats;
unsigned int max_stack;
unsigned int min_stack;
+ bool raw_augmented_syscalls;
bool not_ev_qualifier;
bool live;
bool full_time;
@@ -1724,13 +1725,28 @@ static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel,
return printed;
}
-static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size)
+static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, bool raw_augmented)
{
void *augmented_args = NULL;
+ /*
+ * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
+ * and there we get all 6 syscall args plus the tracepoint common
+ * fields (sizeof(long)) and the syscall_nr (another long). So we check
+ * if that is the case and if so don't look after the sc->args_size,
+ * but always after the full raw_syscalls:sys_enter payload, which is
+ * fixed.
+ *
+ * We'll revisit this later to pass s->args_size to the BPF augmenter
+ * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
+ * copies only what we need for each syscall, like what happens when we
+ * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
+ * traffic to just what is needed for each syscall.
+ */
+ int args_size = raw_augmented ? (8 * (int)sizeof(long)) : sc->args_size;
- *augmented_args_size = sample->raw_size - sc->args_size;
+ *augmented_args_size = sample->raw_size - args_size;
if (*augmented_args_size > 0)
- augmented_args = sample->raw_data + sc->args_size;
+ augmented_args = sample->raw_data + args_size;
return augmented_args;
}
@@ -1780,7 +1796,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
* here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
*/
if (evsel != trace->syscalls.events.sys_enter)
- augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size);
+ augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
ttrace->entry_time = sample->time;
msg = ttrace->entry_str;
printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
@@ -1833,7 +1849,7 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct perf_evsel *evse
goto out_put;
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
- augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size);
+ augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
fprintf(trace->output, "%s", msg);
err = 0;
@@ -3501,7 +3517,15 @@ int cmd_trace(int argc, const char **argv)
evsel->handler = trace__sys_enter;
evlist__for_each_entry(trace.evlist, evsel) {
+ bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
+
+ if (raw_syscalls_sys_exit) {
+ trace.raw_augmented_syscalls = true;
+ goto init_augmented_syscall_tp;
+ }
+
if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
+init_augmented_syscall_tp:
perf_evsel__init_augmented_syscall_tp(evsel);
perf_evsel__init_augmented_syscall_tp_ret(evsel);
evsel->handler = trace__sys_exit;
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
new file mode 100644
index 000000000000..90a19336310b
--- /dev/null
+++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
+ *
+ * Test it with:
+ *
+ * perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c cat /etc/passwd > /dev/null
+ *
+ * This exactly matches what is marshalled into the raw_syscall:sys_enter
+ * payload expected by the 'perf trace' beautifiers.
+ *
+ * For now it just uses the existing tracepoint augmentation code in 'perf
+ * trace', in the next csets we'll hook up these with the sys_enter/sys_exit
+ * code that will combine entry/exit in a strace like way.
+ */
+
+#include <stdio.h>
+#include <linux/socket.h>
+
+/* bpf-output associated map */
+struct bpf_map SEC("maps") __augmented_syscalls__ = {
+ .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(u32),
+ .max_entries = __NR_CPUS__,
+};
+
+struct syscall_enter_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ unsigned long args[6];
+};
+
+struct syscall_exit_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ long ret;
+};
+
+struct augmented_filename {
+ unsigned int size;
+ int reserved;
+ char value[256];
+};
+
+#define SYS_OPEN 2
+#define SYS_OPENAT 257
+
+SEC("raw_syscalls:sys_enter")
+int sys_enter(struct syscall_enter_args *args)
+{
+ struct {
+ struct syscall_enter_args args;
+ struct augmented_filename filename;
+ } augmented_args;
+ unsigned int len = sizeof(augmented_args);
+ const void *filename_arg = NULL;
+
+ probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
+ /*
+ * Yonghong and Edward Cree sayz:
+ *
+ * https://www.spinics.net/lists/netdev/msg531645.html
+ *
+ * >> R0=inv(id=0) R1=inv2 R6=ctx(id=0,off=0,imm=0) R7=inv64 R10=fp0,call_-1
+ * >> 10: (bf) r1 = r6
+ * >> 11: (07) r1 += 16
+ * >> 12: (05) goto pc+2
+ * >> 15: (79) r3 = *(u64 *)(r1 +0)
+ * >> dereference of modified ctx ptr R1 off=16 disallowed
+ * > Aha, we at least got a different error message this time.
+ * > And indeed llvm has done that optimisation, rather than the more obvious
+ * > 11: r3 = *(u64 *)(r1 +16)
+ * > because it wants to have lots of reads share a single insn. You may be able
+ * > to defeat that optimisation by adding compiler barriers, idk. Maybe someone
+ * > with llvm knowledge can figure out how to stop it (ideally, llvm would know
+ * > when it's generating for bpf backend and not do that). -O0? ¯\_(ツ)_/¯
+ *
+ * The optimization mostly likes below:
+ *
+ * br1:
+ * ...
+ * r1 += 16
+ * goto merge
+ * br2:
+ * ...
+ * r1 += 20
+ * goto merge
+ * merge:
+ * *(u64 *)(r1 + 0)
+ *
+ * The compiler tries to merge common loads. There is no easy way to
+ * stop this compiler optimization without turning off a lot of other
+ * optimizations. The easiest way is to add barriers:
+ *
+ * __asm__ __volatile__("": : :"memory")
+ *
+ * after the ctx memory access to prevent their down stream merging.
+ */
+ switch (augmented_args.args.syscall_nr) {
+ case SYS_OPEN: filename_arg = (const void *)args->args[0];
+ __asm__ __volatile__("": : :"memory");
+ break;
+ case SYS_OPENAT: filename_arg = (const void *)args->args[1];
+ break;
+ }
+
+ if (filename_arg != NULL) {
+ augmented_args.filename.reserved = 0;
+ augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
+ sizeof(augmented_args.filename.value),
+ filename_arg);
+ if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
+ len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
+ len &= sizeof(augmented_args.filename.value) - 1;
+ }
+ } else {
+ len = sizeof(augmented_args.args);
+ }
+
+ perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
+ return 0;
+}
+
+SEC("raw_syscalls:sys_exit")
+int sys_exit(struct syscall_exit_args *args)
+{
+ return 1; /* 0 as soon as we start copying data returned by the kernel, e.g. 'read' */
+}
+
+license(GPL);
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index ac1bcdc17dae..f7eb63cbbc65 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -125,7 +125,7 @@ perf_get_timestamp(void)
}
static int
-debug_cache_init(void)
+create_jit_cache_dir(void)
{
char str[32];
char *base, *p;
@@ -144,8 +144,13 @@ debug_cache_init(void)
strftime(str, sizeof(str), JIT_LANG"-jit-%Y%m%d", &tm);
- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/", base);
-
+ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/", base);
+ if (ret >= PATH_MAX) {
+ warnx("jvmti: cannot generate jit cache dir because %s/.debug/"
+ " is too long, please check the cwd, JITDUMPDIR, and"
+ " HOME variables", base);
+ return -1;
+ }
ret = mkdir(jit_path, 0755);
if (ret == -1) {
if (errno != EEXIST) {
@@ -154,20 +159,32 @@ debug_cache_init(void)
}
}
- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit", base);
+ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit", base);
+ if (ret >= PATH_MAX) {
+ warnx("jvmti: cannot generate jit cache dir because"
+ " %s/.debug/jit is too long, please check the cwd,"
+ " JITDUMPDIR, and HOME variables", base);
+ return -1;
+ }
ret = mkdir(jit_path, 0755);
if (ret == -1) {
if (errno != EEXIST) {
- warn("cannot create jit cache dir %s", jit_path);
+ warn("jvmti: cannot create jit cache dir %s", jit_path);
return -1;
}
}
- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit/%s.XXXXXXXX", base, str);
-
+ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit/%s.XXXXXXXX", base, str);
+ if (ret >= PATH_MAX) {
+ warnx("jvmti: cannot generate jit cache dir because"
+ " %s/.debug/jit/%s.XXXXXXXX is too long, please check"
+ " the cwd, JITDUMPDIR, and HOME variables",
+ base, str);
+ return -1;
+ }
p = mkdtemp(jit_path);
if (p != jit_path) {
- warn("cannot create jit cache dir %s", jit_path);
+ warn("jvmti: cannot create jit cache dir %s", jit_path);
return -1;
}
@@ -228,7 +245,7 @@ void *jvmti_open(void)
{
char dump_path[PATH_MAX];
struct jitheader header;
- int fd;
+ int fd, ret;
FILE *fp;
init_arch_timestamp();
@@ -245,12 +262,22 @@ void *jvmti_open(void)
memset(&header, 0, sizeof(header));
- debug_cache_init();
+ /*
+ * jitdump file dir
+ */
+ if (create_jit_cache_dir() < 0)
+ return NULL;
/*
* jitdump file name
*/
- scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+ ret = snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+ if (ret >= PATH_MAX) {
+ warnx("jvmti: cannot generate jitdump file full path because"
+ " %s/jit-%i.dump is too long, please check the cwd,"
+ " JITDUMPDIR, and HOME variables", jit_path, getpid());
+ return NULL;
+ }
fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
if (fd == -1)
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index 24cb0bd56afa..f278ce5ebab7 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -119,6 +119,14 @@ def dsoname(name):
return "[kernel]"
return name
+def findnth(s, sub, n, offs=0):
+ pos = s.find(sub)
+ if pos < 0:
+ return pos
+ if n <= 1:
+ return offs + pos
+ return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1)
+
# Percent to one decimal place
def PercentToOneDP(n, d):
@@ -1464,6 +1472,317 @@ class BranchWindow(QMdiSubWindow):
else:
self.find_bar.NotFound()
+# Dialog data item converted and validated using a SQL table
+
+class SQLTableDialogDataItem():
+
+ def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+ self.glb = glb
+ self.label = label
+ self.placeholder_text = placeholder_text
+ self.table_name = table_name
+ self.match_column = match_column
+ self.column_name1 = column_name1
+ self.column_name2 = column_name2
+ self.parent = parent
+
+ self.value = ""
+
+ self.widget = QLineEdit()
+ self.widget.editingFinished.connect(self.Validate)
+ self.widget.textChanged.connect(self.Invalidate)
+ self.red = False
+ self.error = ""
+ self.validated = True
+
+ self.last_id = 0
+ self.first_time = 0
+ self.last_time = 2 ** 64
+ if self.table_name == "<timeranges>":
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
+ if query.next():
+ self.last_id = int(query.value(0))
+ self.last_time = int(query.value(1))
+ QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
+ if query.next():
+ self.first_time = int(query.value(0))
+ if placeholder_text:
+ placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
+
+ if placeholder_text:
+ self.widget.setPlaceholderText(placeholder_text)
+
+ def ValueToIds(self, value):
+ ids = []
+ query = QSqlQuery(self.glb.db)
+ stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'"
+ ret = query.exec_(stmt)
+ if ret:
+ while query.next():
+ ids.append(str(query.value(0)))
+ return ids
+
+ def IdBetween(self, query, lower_id, higher_id, order):
+ QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
+ if query.next():
+ return True, int(query.value(0))
+ else:
+ return False, 0
+
+ def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor):
+ query = QSqlQuery(self.glb.db)
+ while True:
+ next_id = int((lower_id + higher_id) / 2)
+ QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
+ if not query.next():
+ ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC")
+ if not ok:
+ ok, dbid = self.IdBetween(query, next_id, higher_id, "")
+ if not ok:
+ return str(higher_id)
+ next_id = dbid
+ QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
+ next_time = int(query.value(0))
+ if get_floor:
+ if target_time > next_time:
+ lower_id = next_id
+ else:
+ higher_id = next_id
+ if higher_id <= lower_id + 1:
+ return str(higher_id)
+ else:
+ if target_time >= next_time:
+ lower_id = next_id
+ else:
+ higher_id = next_id
+ if higher_id <= lower_id + 1:
+ return str(lower_id)
+
+ def ConvertRelativeTime(self, val):
+ print "val ", val
+ mult = 1
+ suffix = val[-2:]
+ if suffix == "ms":
+ mult = 1000000
+ elif suffix == "us":
+ mult = 1000
+ elif suffix == "ns":
+ mult = 1
+ else:
+ return val
+ val = val[:-2].strip()
+ if not self.IsNumber(val):
+ return val
+ val = int(val) * mult
+ if val >= 0:
+ val += self.first_time
+ else:
+ val += self.last_time
+ return str(val)
+
+ def ConvertTimeRange(self, vrange):
+ print "vrange ", vrange
+ if vrange[0] == "":
+ vrange[0] = str(self.first_time)
+ if vrange[1] == "":
+ vrange[1] = str(self.last_time)
+ vrange[0] = self.ConvertRelativeTime(vrange[0])
+ vrange[1] = self.ConvertRelativeTime(vrange[1])
+ print "vrange2 ", vrange
+ if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+ return False
+ print "ok1"
+ beg_range = max(int(vrange[0]), self.first_time)
+ end_range = min(int(vrange[1]), self.last_time)
+ if beg_range > self.last_time or end_range < self.first_time:
+ return False
+ print "ok2"
+ vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
+ vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
+ print "vrange3 ", vrange
+ return True
+
+ def AddTimeRange(self, value, ranges):
+ print "value ", value
+ n = value.count("-")
+ if n == 1:
+ pass
+ elif n == 2:
+ if value.split("-")[1].strip() == "":
+ n = 1
+ elif n == 3:
+ n = 2
+ else:
+ return False
+ pos = findnth(value, "-", n)
+ vrange = [value[:pos].strip() ,value[pos+1:].strip()]
+ if self.ConvertTimeRange(vrange):
+ ranges.append(vrange)
+ return True
+ return False
+
+ def InvalidValue(self, value):
+ self.value = ""
+ palette = QPalette()
+ palette.setColor(QPalette.Text,Qt.red)
+ self.widget.setPalette(palette)
+ self.red = True
+ self.error = self.label + " invalid value '" + value + "'"
+ self.parent.ShowMessage(self.error)
+
+ def IsNumber(self, value):
+ try:
+ x = int(value)
+ except:
+ x = 0
+ return str(x) == value
+
+ def Invalidate(self):
+ self.validated = False
+
+ def Validate(self):
+ input_string = self.widget.text()
+ self.validated = True
+ if self.red:
+ palette = QPalette()
+ self.widget.setPalette(palette)
+ self.red = False
+ if not len(input_string.strip()):
+ self.error = ""
+ self.value = ""
+ return
+ if self.table_name == "<timeranges>":
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if not self.AddTimeRange(value, ranges):
+ return self.InvalidValue(value)
+ ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
+ self.value = " OR ".join(ranges)
+ elif self.table_name == "<ranges>":
+ singles = []
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if "-" in value:
+ vrange = value.split("-")
+ if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+ return self.InvalidValue(value)
+ ranges.append(vrange)
+ else:
+ if not self.IsNumber(value):
+ return self.InvalidValue(value)
+ singles.append(value)
+ ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
+ if len(singles):
+ ranges.append(self.column_name1 + " IN (" + ",".join(singles) + ")")
+ self.value = " OR ".join(ranges)
+ elif self.table_name:
+ all_ids = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ ids = self.ValueToIds(value)
+ if len(ids):
+ all_ids.extend(ids)
+ else:
+ return self.InvalidValue(value)
+ self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
+ if self.column_name2:
+ self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
+ else:
+ self.value = input_string.strip()
+ self.error = ""
+ self.parent.ClearMessage()
+
+ def IsValid(self):
+ if not self.validated:
+ self.Validate()
+ if len(self.error):
+ self.parent.ShowMessage(self.error)
+ return False
+ return True
+
+# Selected branch report creation dialog
+
+class SelectedBranchDialog(QDialog):
+
+ def __init__(self, glb, parent=None):
+ super(SelectedBranchDialog, self).__init__(parent)
+
+ self.glb = glb
+
+ self.name = ""
+ self.where_clause = ""
+
+ self.setWindowTitle("Selected Branches")
+ self.setMinimumWidth(600)
+
+ items = (
+ ("Report name:", "Enter a name to appear in the window title bar", "", "", "", ""),
+ ("Time ranges:", "Enter time ranges", "<timeranges>", "", "samples.id", ""),
+ ("CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "<ranges>", "", "cpu", ""),
+ ("Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", ""),
+ ("PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", ""),
+ ("TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", ""),
+ ("DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id"),
+ ("Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id"),
+ ("Raw SQL clause: ", "Enter a raw SQL WHERE clause", "", "", "", ""),
+ )
+ self.data_items = [SQLTableDialogDataItem(glb, *x, parent=self) for x in items]
+
+ self.grid = QGridLayout()
+
+ for row in xrange(len(self.data_items)):
+ self.grid.addWidget(QLabel(self.data_items[row].label), row, 0)
+ self.grid.addWidget(self.data_items[row].widget, row, 1)
+
+ self.status = QLabel()
+
+ self.ok_button = QPushButton("Ok", self)
+ self.ok_button.setDefault(True)
+ self.ok_button.released.connect(self.Ok)
+ self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+ self.cancel_button = QPushButton("Cancel", self)
+ self.cancel_button.released.connect(self.reject)
+ self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+ self.hbox = QHBoxLayout()
+ #self.hbox.addStretch()
+ self.hbox.addWidget(self.status)
+ self.hbox.addWidget(self.ok_button)
+ self.hbox.addWidget(self.cancel_button)
+
+ self.vbox = QVBoxLayout()
+ self.vbox.addLayout(self.grid)
+ self.vbox.addLayout(self.hbox)
+
+ self.setLayout(self.vbox);
+
+ def Ok(self):
+ self.name = self.data_items[0].value
+ if not self.name:
+ self.ShowMessage("Report name is required")
+ return
+ for d in self.data_items:
+ if not d.IsValid():
+ return
+ for d in self.data_items[1:]:
+ if len(d.value):
+ if len(self.where_clause):
+ self.where_clause += " AND "
+ self.where_clause += d.value
+ if len(self.where_clause):
+ self.where_clause = " AND ( " + self.where_clause + " ) "
+ else:
+ self.ShowMessage("No selection")
+ return
+ self.accept()
+
+ def ShowMessage(self, msg):
+ self.status.setText("<font color=#FF0000>" + msg)
+
+ def ClearMessage(self):
+ self.status.setText("")
+
# Event list
def GetEventList(db):
@@ -1656,7 +1975,7 @@ class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
- self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
+ self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex())))
else:
self.find_bar.NotFound()
@@ -1765,6 +2084,149 @@ class WindowMenu():
def setActiveSubWindow(self, nr):
self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1])
+# Help text
+
+glb_help_text = """
+<h1>Contents</h1>
+<style>
+p.c1 {
+ text-indent: 40px;
+}
+p.c2 {
+ text-indent: 80px;
+}
+}
+</style>
+<p class=c1><a href=#reports>1. Reports</a></p>
+<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
+<p class=c2><a href=#allbranches>1.2 All branches</a></p>
+<p class=c2><a href=#selectedbranches>1.3 Selected branches</a></p>
+<p class=c1><a href=#tables>2. Tables</a></p>
+<h1 id=reports>1. Reports</h1>
+<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
+The result is a GUI window with a tree representing a context-sensitive
+call-graph. Expanding a couple of levels of the tree and adjusting column
+widths to suit will display something like:
+<pre>
+ Call Graph: pt_example
+Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
+v- ls
+ v- 2638:2638
+ v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
+ |- unknown unknown 1 13198 0.1 1 0.0
+ >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
+ >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
+ v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
+ >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
+ >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
+ >- __libc_csu_init ls 1 10354 0.1 10 0.0
+ |- _setjmp libc-2.19.so 1 0 0.0 4 0.0
+ v- main ls 1 8182043 99.6 180254 99.9
+</pre>
+<h3>Points to note:</h3>
+<ul>
+<li>The top level is a command name (comm)</li>
+<li>The next level is a thread (pid:tid)</li>
+<li>Subsequent levels are functions</li>
+<li>'Count' is the number of calls</li>
+<li>'Time' is the elapsed time until the function returns</li>
+<li>Percentages are relative to the level above</li>
+<li>'Branch Count' is the total number of branches for that function and all functions that it calls
+</ul>
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
+The pattern matching symbols are ? for any character and * for zero or more characters.
+<h2 id=allbranches>1.2 All branches</h2>
+The All branches report displays all branches in chronological order.
+Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
+<h3>Disassembly</h3>
+Open a branch to display disassembly. This only works if:
+<ol>
+<li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li>
+<li>The object code is available. Currently, only the perf build ID cache is searched for object code.
+The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR.
+One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu),
+or alternatively, set environment variable PERF_KCORE to the kcore file name.</li>
+</ol>
+<h4 id=xed>Intel XED Setup</h4>
+To use Intel XED, libxed.so must be present. To build and install libxed.so:
+<pre>
+git clone https://github.com/intelxed/mbuild.git mbuild
+git clone https://github.com/intelxed/xed
+cd xed
+./mfile.py --share
+sudo ./mfile.py --prefix=/usr/local install
+sudo ldconfig
+</pre>
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
+Refer to Python documentation for the regular expression syntax.
+All columns are searched, but only currently fetched rows are searched.
+<h2 id=selectedbranches>1.3 Selected branches</h2>
+This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
+by various selection criteria. A dialog box displays available criteria which are AND'ed together.
+<h3>1.3.1 Time ranges</h3>
+The time ranges hint text shows the total time range. Relative time ranges can also be entered in
+ms, us or ns. Also, negative values are relative to the end of trace. Examples:
+<pre>
+ 81073085947329-81073085958238 From 81073085947329 to 81073085958238
+ 100us-200us From 100us to 200us
+ 10ms- From 10ms to the end
+ -100ns The first 100ns
+ -10ms- The last 10ms
+</pre>
+N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
+<h1 id=tables>2. Tables</h1>
+The Tables menu shows all tables and views in the database. Most tables have an associated view
+which displays the information in a more friendly way. Not all data for large tables is fetched
+immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
+but that can be slow for large tables.
+<p>There are also tables of database meta-information.
+For SQLite3 databases, the sqlite_master table is included.
+For PostgreSQL databases, information_schema.tables/views/columns are included.
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
+Refer to Python documentation for the regular expression syntax.
+All columns are searched, but only currently fetched rows are searched.
+<p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous
+will go to the next/previous result in id order, instead of display order.
+"""
+
+# Help window
+
+class HelpWindow(QMdiSubWindow):
+
+ def __init__(self, glb, parent=None):
+ super(HelpWindow, self).__init__(parent)
+
+ self.text = QTextBrowser()
+ self.text.setHtml(glb_help_text)
+ self.text.setReadOnly(True)
+ self.text.setOpenExternalLinks(True)
+
+ self.setWidget(self.text)
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help")
+
+# Main window that only displays the help text
+
+class HelpOnlyWindow(QMainWindow):
+
+ def __init__(self, parent=None):
+ super(HelpOnlyWindow, self).__init__(parent)
+
+ self.setMinimumSize(200, 100)
+ self.resize(800, 600)
+ self.setWindowTitle("Exported SQL Viewer Help")
+ self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation))
+
+ self.text = QTextBrowser()
+ self.text.setHtml(glb_help_text)
+ self.text.setReadOnly(True)
+ self.text.setOpenExternalLinks(True)
+
+ self.setCentralWidget(self.text)
+
# Font resize
def ResizeFont(widget, diff):
@@ -1851,6 +2313,9 @@ class MainWindow(QMainWindow):
self.window_menu = WindowMenu(self.mdi_area, menu)
+ help_menu = menu.addMenu("&Help")
+ help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
+
def Find(self):
win = self.mdi_area.activeSubWindow()
if win:
@@ -1888,6 +2353,8 @@ class MainWindow(QMainWindow):
if event == "branches":
label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewBranchView(x), self))
+ label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
+ reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewSelectedBranchView(x), self))
def TableMenu(self, tables, menu):
table_menu = menu.addMenu("&Tables")
@@ -1900,9 +2367,18 @@ class MainWindow(QMainWindow):
def NewBranchView(self, event_id):
BranchWindow(self.glb, event_id, "", "", self)
+ def NewSelectedBranchView(self, event_id):
+ dialog = SelectedBranchDialog(self.glb, self)
+ ret = dialog.exec_()
+ if ret:
+ BranchWindow(self.glb, event_id, dialog.name, dialog.where_clause, self)
+
def NewTableView(self, table_name):
TableWindow(self.glb, table_name, self)
+ def Help(self):
+ HelpWindow(self.glb, self)
+
# XED Disassembler
class xed_state_t(Structure):
@@ -1929,7 +2405,12 @@ class XEDInstruction():
class LibXED():
def __init__(self):
- self.libxed = CDLL("libxed.so")
+ try:
+ self.libxed = CDLL("libxed.so")
+ except:
+ self.libxed = None
+ if not self.libxed:
+ self.libxed = CDLL("/usr/local/lib/libxed.so")
self.xed_tables_init = self.libxed.xed_tables_init
self.xed_tables_init.restype = None
@@ -2097,10 +2578,16 @@ class DBRef():
def Main():
if (len(sys.argv) < 2):
- print >> sys.stderr, "Usage is: exported-sql-viewer.py <database name>"
+ print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
raise Exception("Too few arguments")
dbname = sys.argv[1]
+ if dbname == "--help-only":
+ app = QApplication(sys.argv)
+ mainwindow = HelpOnlyWindow()
+ mainwindow.show()
+ err = app.exec_()
+ sys.exit(err)
is_sqlite3 = False
try:
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
index 8a33ca4f9e1f..f0729c454f16 100644
--- a/tools/perf/tests/attr/test-record-group-sampling
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -37,4 +37,3 @@ sample_freq=0
sample_period=0
freq=0
write_backward=0
-sample_id_all=0
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index e88e6f9b1463..668d2a9ef0f4 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1810,3 +1810,30 @@ void perf_evlist__force_leader(struct perf_evlist *evlist)
leader->forced_leader = true;
}
}
+
+struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
+ struct perf_evsel *evsel)
+{
+ struct perf_evsel *c2, *leader;
+ bool is_open = true;
+
+ leader = evsel->leader;
+ pr_debug("Weak group for %s/%d failed\n",
+ leader->name, leader->nr_members);
+
+ /*
+ * for_each_group_member doesn't work here because it doesn't
+ * include the first entry.
+ */
+ evlist__for_each_entry(evsel_list, c2) {
+ if (c2 == evsel)
+ is_open = false;
+ if (c2->leader == leader) {
+ if (is_open)
+ perf_evsel__close(c2);
+ c2->leader = c2;
+ c2->nr_members = 0;
+ }
+ }
+ return leader;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index dc66436add98..9919eed6d15b 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -312,4 +312,7 @@ bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
void perf_evlist__force_leader(struct perf_evlist *evlist);
+struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 6d187059a373..d37bb1566cd9 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -956,7 +956,6 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->sample_freq = 0;
attr->sample_period = 0;
attr->write_backward = 0;
- attr->sample_id_all = 0;
}
if (opts->no_samples)
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 58f6a9ceb590..4503f3ca45ab 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1474,6 +1474,8 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
decoder->have_calc_cyc_to_tsc = false;
intel_pt_calc_cyc_to_tsc(decoder, true);
}
+
+ intel_pt_log_to("Setting timestamp", decoder->timestamp);
}
static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
@@ -1514,6 +1516,8 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
decoder->timestamp = timestamp;
decoder->timestamp_insn_cnt = 0;
+
+ intel_pt_log_to("Setting timestamp", decoder->timestamp);
}
/* Walk PSB+ packets when already in sync. */
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.c b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
index e02bc7b166a0..5e64da270f97 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
@@ -31,6 +31,11 @@ static FILE *f;
static char log_name[MAX_LOG_NAME];
bool intel_pt_enable_logging;
+void *intel_pt_log_fp(void)
+{
+ return f;
+}
+
void intel_pt_log_enable(void)
{
intel_pt_enable_logging = true;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.h b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
index 45b64f93f358..cc084937f701 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
@@ -22,6 +22,7 @@
struct intel_pt_pkt;
+void *intel_pt_log_fp(void);
void intel_pt_log_enable(void);
void intel_pt_log_disable(void);
void intel_pt_log_set_name(const char *name);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 86cc9a64e982..149ff361ca78 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -206,6 +206,16 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
intel_pt_dump(pt, buf, len);
}
+static void intel_pt_log_event(union perf_event *event)
+{
+ FILE *f = intel_pt_log_fp();
+
+ if (!intel_pt_enable_logging || !f)
+ return;
+
+ perf_event__fprintf(event, f);
+}
+
static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
struct auxtrace_buffer *b)
{
@@ -2010,9 +2020,9 @@ static int intel_pt_process_event(struct perf_session *session,
event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
err = intel_pt_context_switch(pt, event, sample);
- intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
- perf_event__name(event->header.type), event->header.type,
- sample->cpu, sample->time, timestamp);
+ intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
+ event->header.type, sample->cpu, sample->time, timestamp);
+ intel_pt_log_event(event);
return err;
}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 7799788f662f..7e49baad304d 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -773,7 +773,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
if (!is_arm_pmu_core(name)) {
pname = pe->pmu ? pe->pmu : "cpu";
- if (strncmp(pname, name, strlen(pname)))
+ if (strcmp(pname, name))
continue;
}
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 1dd5f4fcffd5..db66a952c173 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -129,7 +129,7 @@ WARNINGS += $(call cc-supports,-Wno-pointer-sign)
WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
WARNINGS += -Wshadow
-CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \
+override CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \
-DPACKAGE_BUGREPORT=\"$(PACKAGE_BUGREPORT)\" -D_GNU_SOURCE
UTIL_OBJS = utils/helpers/amd.o utils/helpers/msr.o \
@@ -156,12 +156,12 @@ LIB_SRC = lib/cpufreq.c lib/cpupower.c lib/cpuidle.c
LIB_OBJS = lib/cpufreq.o lib/cpupower.o lib/cpuidle.o
LIB_OBJS := $(addprefix $(OUTPUT),$(LIB_OBJS))
-CFLAGS += -pipe
+override CFLAGS += -pipe
ifeq ($(strip $(NLS)),true)
INSTALL_NLS += install-gmo
COMPILE_NLS += create-gmo
- CFLAGS += -DNLS
+ override CFLAGS += -DNLS
endif
ifeq ($(strip $(CPUFREQ_BENCH)),true)
@@ -175,7 +175,7 @@ ifeq ($(strip $(STATIC)),true)
UTIL_SRC += $(LIB_SRC)
endif
-CFLAGS += $(WARNINGS)
+override CFLAGS += $(WARNINGS)
ifeq ($(strip $(V)),false)
QUIET=@
@@ -188,10 +188,10 @@ export QUIET ECHO
# if DEBUG is enabled, then we do not strip or optimize
ifeq ($(strip $(DEBUG)),true)
- CFLAGS += -O1 -g -DDEBUG
+ override CFLAGS += -O1 -g -DDEBUG
STRIPCMD = /bin/true -Since_we_are_debugging
else
- CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
+ override CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
endif
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
index d79ab161cc75..f68b4bc55273 100644
--- a/tools/power/cpupower/bench/Makefile
+++ b/tools/power/cpupower/bench/Makefile
@@ -9,7 +9,7 @@ endif
ifeq ($(strip $(STATIC)),true)
LIBS = -L../ -L$(OUTPUT) -lm
OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \
- $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/sysfs.o
+ $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/cpupower.o
else
LIBS = -L../ -L$(OUTPUT) -lm -lcpupower
OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o
diff --git a/tools/power/cpupower/debug/x86_64/Makefile b/tools/power/cpupower/debug/x86_64/Makefile
index 59af84b8ef45..b1b6c43644e7 100644
--- a/tools/power/cpupower/debug/x86_64/Makefile
+++ b/tools/power/cpupower/debug/x86_64/Makefile
@@ -13,10 +13,10 @@ INSTALL = /usr/bin/install
default: all
$(OUTPUT)centrino-decode: ../i386/centrino-decode.c
- $(CC) $(CFLAGS) -o $@ $<
+ $(CC) $(CFLAGS) -o $@ $(LDFLAGS) $<
$(OUTPUT)powernow-k8-decode: ../i386/powernow-k8-decode.c
- $(CC) $(CFLAGS) -o $@ $<
+ $(CC) $(CFLAGS) -o $@ $(LDFLAGS) $<
all: $(OUTPUT)centrino-decode $(OUTPUT)powernow-k8-decode
diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c
index 1b993fe1ce23..0c0f3e3f0d80 100644
--- a/tools/power/cpupower/lib/cpufreq.c
+++ b/tools/power/cpupower/lib/cpufreq.c
@@ -28,7 +28,7 @@ static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname,
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s",
cpu, fname);
- return sysfs_read_file(path, buf, buflen);
+ return cpupower_read_sysfs(path, buf, buflen);
}
/* helper function to write a new value to a /sys file */
diff --git a/tools/power/cpupower/lib/cpuidle.c b/tools/power/cpupower/lib/cpuidle.c
index 9bd4c7655fdb..852d25462388 100644
--- a/tools/power/cpupower/lib/cpuidle.c
+++ b/tools/power/cpupower/lib/cpuidle.c
@@ -319,7 +319,7 @@ static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf,
snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname);
- return sysfs_read_file(path, buf, buflen);
+ return cpupower_read_sysfs(path, buf, buflen);
}
diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c
index 9c395ec924de..9711d628b0f4 100644
--- a/tools/power/cpupower/lib/cpupower.c
+++ b/tools/power/cpupower/lib/cpupower.c
@@ -15,7 +15,7 @@
#include "cpupower.h"
#include "cpupower_intern.h"
-unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen)
+unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen)
{
int fd;
ssize_t numread;
@@ -95,7 +95,7 @@ static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *re
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s",
cpu, fname);
- if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0)
+ if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0)
return -1;
*result = strtol(linebuf, &endp, 0);
if (endp == linebuf || errno == ERANGE)
diff --git a/tools/power/cpupower/lib/cpupower_intern.h b/tools/power/cpupower/lib/cpupower_intern.h
index 92affdfbe417..4887c76d23f8 100644
--- a/tools/power/cpupower/lib/cpupower_intern.h
+++ b/tools/power/cpupower/lib/cpupower_intern.h
@@ -3,4 +3,4 @@
#define MAX_LINE_LEN 4096
#define SYSFS_PATH_MAX 255
-unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen);
+unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen);
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 9527d47a1070..01ec04bf91b5 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -140,8 +140,8 @@ static u32 handle[] = {
[6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
};
-static unsigned long dimm_fail_cmd_flags[NUM_DCR];
-static int dimm_fail_cmd_code[NUM_DCR];
+static unsigned long dimm_fail_cmd_flags[ARRAY_SIZE(handle)];
+static int dimm_fail_cmd_code[ARRAY_SIZE(handle)];
static const struct nd_intel_smart smart_def = {
.flags = ND_INTEL_SMART_HEALTH_VALID
@@ -205,7 +205,7 @@ struct nfit_test {
unsigned long deadline;
spinlock_t lock;
} ars_state;
- struct device *dimm_dev[NUM_DCR];
+ struct device *dimm_dev[ARRAY_SIZE(handle)];
struct nd_intel_smart *smart;
struct nd_intel_smart_threshold *smart_threshold;
struct badrange badrange;
@@ -2680,7 +2680,7 @@ static int nfit_test_probe(struct platform_device *pdev)
u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
int i;
- for (i = 0; i < NUM_DCR; i++)
+ for (i = 0; i < ARRAY_SIZE(handle); i++)
if (nfit_handle == handle[i])
dev_set_drvdata(nfit_test->dimm_dev[i],
nfit_mem);
diff --git a/tools/testing/selftests/powerpc/mm/wild_bctr.c b/tools/testing/selftests/powerpc/mm/wild_bctr.c
index 1b0e9e9a2ddc..f2fa101c5a6a 100644
--- a/tools/testing/selftests/powerpc/mm/wild_bctr.c
+++ b/tools/testing/selftests/powerpc/mm/wild_bctr.c
@@ -47,8 +47,9 @@ static int ok(void)
return 0;
}
-#define REG_POISON 0x5a5aUL
-#define POISONED_REG(n) ((REG_POISON << 48) | ((n) << 32) | (REG_POISON << 16) | (n))
+#define REG_POISON 0x5a5a
+#define POISONED_REG(n) ((((unsigned long)REG_POISON) << 48) | ((n) << 32) | \
+ (((unsigned long)REG_POISON) << 16) | (n))
static inline void poison_regs(void)
{
@@ -105,6 +106,20 @@ static void dump_regs(void)
}
}
+#ifdef _CALL_AIXDESC
+struct opd {
+ unsigned long ip;
+ unsigned long toc;
+ unsigned long env;
+};
+static struct opd bad_opd = {
+ .ip = BAD_NIP,
+};
+#define BAD_FUNC (&bad_opd)
+#else
+#define BAD_FUNC BAD_NIP
+#endif
+
int test_wild_bctr(void)
{
int (*func_ptr)(void);
@@ -133,7 +148,7 @@ int test_wild_bctr(void)
poison_regs();
- func_ptr = (int (*)(void))BAD_NIP;
+ func_ptr = (int (*)(void))BAD_FUNC;
func_ptr();
FAIL_IF(1); /* we didn't segv? */
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index 87a04a8a5945..7607ba3e3cbe 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -134,9 +134,9 @@ def exec_cmd(args, pm, stage, command):
(rawout, serr) = proc.communicate()
if proc.returncode != 0 and len(serr) > 0:
- foutput = serr.decode("utf-8")
+ foutput = serr.decode("utf-8", errors="ignore")
else:
- foutput = rawout.decode("utf-8")
+ foutput = rawout.decode("utf-8", errors="ignore")
proc.stdout.close()
proc.stderr.close()
@@ -169,6 +169,8 @@ def prepare_env(args, pm, stage, prefix, cmdlist, output = None):
file=sys.stderr)
print("\n{} *** Error message: \"{}\"".format(prefix, foutput),
file=sys.stderr)
+ print("returncode {}; expected {}".format(proc.returncode,
+ exit_codes))
print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr)
print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr)
print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr)
@@ -195,12 +197,18 @@ def run_one_test(pm, args, index, tidx):
print('-----> execute stage')
pm.call_pre_execute()
(p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"])
- exit_code = p.returncode
+ if p:
+ exit_code = p.returncode
+ else:
+ exit_code = None
+
pm.call_post_execute()
- if (exit_code != int(tidx["expExitCode"])):
+ if (exit_code is None or exit_code != int(tidx["expExitCode"])):
result = False
- print("exit:", exit_code, int(tidx["expExitCode"]))
+ print("exit: {!r}".format(exit_code))
+ print("exit: {}".format(int(tidx["expExitCode"])))
+ #print("exit: {!r} {}".format(exit_code, int(tidx["expExitCode"])))
print(procout)
else:
if args.verbose > 0: