aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/DocBook/device-drivers.tmpl2
-rw-r--r--Documentation/DocBook/media_api.tmpl4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/palmas-pmic.txt4
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/sysctl/net.txt4
-rw-r--r--MAINTAINERS40
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/arc/include/asm/entry.h1
-rw-r--r--arch/arc/lib/strchr-700.S10
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/Kconfig.debug14
-rw-r--r--arch/arm/Makefile18
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi5
-rw-r--r--arch/arm/boot/dts/msm8960-cdp.dts4
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts78
-rw-r--r--arch/arm/boot/dts/stih41x.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra20-colibri-512.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-trimslice.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-whistler.dts4
-rw-r--r--arch/arm/include/asm/a.out-core.h45
-rw-r--r--arch/arm/include/asm/cputype.h7
-rw-r--r--arch/arm/include/asm/elf.h6
-rw-r--r--arch/arm/include/asm/mmu.h3
-rw-r--r--arch/arm/include/asm/mmu_context.h20
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/processor.h4
-rw-r--r--arch/arm/include/asm/smp_plat.h3
-rw-r--r--arch/arm/include/asm/spinlock.h51
-rw-r--r--arch/arm/include/asm/thread_info.h1
-rw-r--r--arch/arm/include/asm/tlb.h7
-rw-r--r--arch/arm/include/asm/tlbflush.h16
-rw-r--r--arch/arm/include/asm/virt.h12
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/include/uapi/asm/a.out.h34
-rw-r--r--arch/arm/kernel/entry-armv.S106
-rw-r--r--arch/arm/kernel/entry-v7m.S2
-rw-r--r--arch/arm/kernel/fiq.c24
-rw-r--r--arch/arm/kernel/head-nommu.S1
-rw-r--r--arch/arm/kernel/head.S1
-rw-r--r--arch/arm/kernel/hyp-stub.S4
-rw-r--r--arch/arm/kernel/machine_kexec.c21
-rw-r--r--arch/arm/kernel/perf_event.c10
-rw-r--r--arch/arm/kernel/process.c49
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/signal.c56
-rw-r--r--arch/arm/kernel/signal.h12
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/kernel/smp_tlb.c17
-rw-r--r--arch/arm/kernel/traps.c46
-rw-r--r--arch/arm/kernel/vmlinux.lds.S17
-rw-r--r--arch/arm/kvm/coproc.c26
-rw-r--r--arch/arm/kvm/coproc.h3
-rw-r--r--arch/arm/kvm/coproc_a15.c6
-rw-r--r--arch/arm/kvm/mmio.c3
-rw-r--r--arch/arm/kvm/mmu.c36
-rw-r--r--arch/arm/mach-at91/at91sam9x5.c2
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c1
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c1
-rw-r--r--arch/arm/mach-msm/Kconfig3
-rw-r--r--arch/arm/mach-msm/gpiomux-v1.c33
-rw-r--r--arch/arm/mach-msm/gpiomux.h10
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c4
-rw-r--r--arch/arm/mach-omap2/board-rx51.c2
-rw-r--r--arch/arm/mach-omap2/dss-common.c2
-rw-r--r--arch/arm/mach-omap2/omap_device.c18
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h50
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c3
-rw-r--r--arch/arm/mach-omap2/serial.c11
-rw-r--r--arch/arm/mach-omap2/usb-musb.c5
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c3
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c8
-rw-r--r--arch/arm/mach-shmobile/board-lager.c2
-rw-r--r--arch/arm/mach-sti/headsmp.S2
-rw-r--r--arch/arm/mm/Kconfig37
-rw-r--r--arch/arm/mm/context.c3
-rw-r--r--arch/arm/mm/mmu.c57
-rw-r--r--arch/arm/mm/proc-v7-2level.S2
-rw-r--r--arch/arm/mm/proc-v7-3level.S2
-rw-r--r--arch/arm/mm/proc-v7.S11
-rw-r--r--arch/arm/plat-samsung/init.c5
-rw-r--r--arch/arm/xen/enlighten.c3
-rw-r--r--arch/arm64/include/asm/kvm_asm.h17
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/include/asm/tlb.h7
-rw-r--r--arch/arm64/kernel/perf_event.c10
-rw-r--r--arch/arm64/kvm/hyp.S13
-rw-r--r--arch/arm64/kvm/sys_regs.c3
-rw-r--r--arch/avr32/boards/atngw100/mrmt.c1
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/ia64/configs/generic_defconfig2
-rw-r--r--arch/ia64/configs/gensparse_defconfig2
-rw-r--r--arch/ia64/configs/tiger_defconfig2
-rw-r--r--arch/ia64/configs/xen_domu_defconfig2
-rw-r--r--arch/ia64/include/asm/tlb.h9
-rw-r--r--arch/m68k/emu/natfeat.c23
-rw-r--r--arch/m68k/include/asm/div64.h9
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/bcm47xx/Kconfig1
-rw-r--r--arch/mips/include/asm/cpu-features.h2
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h4
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h7
-rw-r--r--arch/mips/kernel/bmips_vec.S6
-rw-r--r--arch/mips/kernel/smp-bmips.c22
-rw-r--r--arch/mips/math-emu/cp1emu.c26
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c2
-rw-r--r--arch/mips/pnx833x/common/platform.c2
-rw-r--r--arch/mips/powertv/asic/asic_devices.c3
-rw-r--r--arch/openrisc/Kconfig1
-rw-r--r--arch/parisc/configs/c8000_defconfig279
-rw-r--r--arch/parisc/include/asm/parisc-device.h3
-rw-r--r--arch/parisc/kernel/cache.c135
-rw-r--r--arch/parisc/kernel/inventory.c1
-rw-r--r--arch/parisc/kernel/signal.c7
-rw-r--r--arch/parisc/kernel/signal32.c1
-rw-r--r--arch/parisc/kernel/sys32.h36
-rw-r--r--arch/parisc/kernel/sys_parisc32.c2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h6
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/include/asm/reg.h31
-rw-r--r--arch/powerpc/include/asm/smp.h4
-rw-r--r--arch/powerpc/include/asm/switch_to.h9
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/perf_event.h18
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/eeh.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S36
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S5
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/tm.S20
-rw-r--r--arch/powerpc/kernel/traps.c58
-rw-r--r--arch/powerpc/kvm/book3s_hv.c4
-rw-r--r--arch/powerpc/kvm/book3s_pr.c5
-rw-r--r--arch/powerpc/mm/numa.c59
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/power8-pmu.c6
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c80
-rw-r--r--arch/s390/Kconfig9
-rw-r--r--arch/s390/boot/compressed/Makefile9
-rw-r--r--arch/s390/boot/compressed/misc.c4
-rw-r--r--arch/s390/include/asm/bitops.h2
-rw-r--r--arch/s390/include/asm/tlb.h8
-rw-r--r--arch/s390/kernel/perf_event.c9
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kvm/kvm-s390.c21
-rw-r--r--arch/s390/kvm/priv.c4
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/oprofile/init.c2
-rw-r--r--arch/score/Kconfig2
-rw-r--r--arch/sh/configs/sh03_defconfig2
-rw-r--r--arch/sh/include/asm/tlb.h6
-rw-r--r--arch/um/include/asm/tlb.h6
-rw-r--r--arch/x86/boot/compressed/eboot.c2
-rw-r--r--arch/x86/include/asm/bootparam_utils.h4
-rw-r--r--arch/x86/include/asm/microcode_amd.h2
-rw-r--r--arch/x86/include/asm/pgtable-2level.h48
-rw-r--r--arch/x86/include/asm/pgtable-3level.h3
-rw-r--r--arch/x86/include/asm/pgtable.h30
-rw-r--r--arch/x86/include/asm/pgtable_types.h17
-rw-r--r--arch/x86/include/asm/spinlock.h4
-rw-r--r--arch/x86/kernel/cpu/amd.c20
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c4
-rw-r--r--arch/x86/kernel/early-quirks.c14
-rw-r--r--arch/x86/kernel/i387.c2
-rw-r--r--arch/x86/kernel/microcode_amd.c36
-rw-r--r--arch/x86/kernel/microcode_amd_early.c27
-rw-r--r--arch/x86/kernel/sys_x86_64.c2
-rw-r--r--arch/x86/mm/mmap.c6
-rw-r--r--arch/x86/platform/ce4100/ce4100.c1
-rw-r--r--arch/x86/xen/setup.c22
-rw-r--r--arch/x86/xen/smp.c11
-rw-r--r--drivers/accessibility/braille/braille_console.c9
-rw-r--r--drivers/acpi/acpi_processor.c3
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/glue.c133
-rw-r--r--drivers/acpi/proc.c8
-rw-r--r--drivers/acpi/video.c13
-rw-r--r--drivers/ata/libata-pmp.c12
-rw-r--r--drivers/ata/pata_imx.c1
-rw-r--r--drivers/ata/sata_fsl.c5
-rw-r--r--drivers/ata/sata_highbank.c4
-rw-r--r--drivers/base/regmap/regcache.c3
-rw-r--r--drivers/block/aoe/aoecmd.c17
-rw-r--r--drivers/bluetooth/ath3k.c46
-rw-r--r--drivers/bluetooth/btusb.c18
-rw-r--r--drivers/char/agp/parisc-agp.c6
-rw-r--r--drivers/char/virtio_console.c70
-rw-r--r--drivers/clk/samsung/clk-exynos4.c64
-rw-r--r--drivers/clk/zynq/clkc.c13
-rw-r--r--drivers/cpufreq/cpufreq.c19
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c20
-rw-r--r--drivers/cpufreq/cpufreq_governor.c8
-rw-r--r--drivers/cpufreq/cpufreq_governor.h4
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c20
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c11
-rw-r--r--drivers/cpuidle/governors/menu.c106
-rw-r--r--drivers/dma/pch_dma.c1
-rw-r--r--drivers/dma/pl330.c93
-rw-r--r--drivers/dma/sh/shdma.c4
-rw-r--r--drivers/firewire/core-cdev.c3
-rw-r--r--drivers/firewire/ohci.c10
-rw-r--r--drivers/firmware/dmi_scan.c14
-rw-r--r--drivers/gpio/gpio-msm-v1.c1
-rw-r--r--drivers/gpio/gpio-omap.c84
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c55
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h16
-rw-r--r--drivers/gpu/drm/i915/intel_display.c90
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c18
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c18
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c46
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/xtensa.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c27
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c58
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c14
-rw-r--r--drivers/gpu/drm/radeon/Makefile24
-rw-r--r--drivers/gpu/drm/radeon/atom.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios.h615
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c16
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c23
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c54
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5239
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h332
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c262
-rw-r--r--drivers/gpu/drm/radeon/cik.c3127
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c785
-rw-r--r--drivers/gpu/drm/radeon/cikd.h594
-rw-r--r--drivers/gpu/drm/radeon/clearstate_cayman.h2
-rw-r--r--drivers/gpu/drm/radeon/clearstate_ci.h944
-rw-r--r--drivers/gpu/drm/radeon/clearstate_evergreen.h2
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c20
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c278
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c534
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c729
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c54
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c190
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c98
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h14
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2645
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.h199
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c207
-rw-r--r--drivers/gpu/drm/radeon/ni.c381
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c338
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c38
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h57
-rw-r--r--drivers/gpu/drm/radeon/pptable.h682
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c796
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c60
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c785
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h1
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c497
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c300
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h6
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c150
-rw-r--r--drivers/gpu/drm/radeon/r600d.h39
-rw-r--r--drivers/gpu/drm/radeon/radeon.h278
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1262
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h119
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c188
-rw-r--r--drivers/gpu/drm/radeon/radeon_blit_common.h44
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c97
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c159
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c27
-rw-r--r--drivers/gpu/drm/radeon/rv770.c217
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c101
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c48
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h16
-rw-r--r--drivers/gpu/drm/radeon/si.c843
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c235
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c180
-rw-r--r--drivers/gpu/drm/radeon/sid.h71
-rw-r--r--drivers/gpu/drm/radeon/smu7.h170
-rw-r--r--drivers/gpu/drm/radeon/smu7_discrete.h486
-rw-r--r--drivers/gpu/drm/radeon/smu7_fusion.h300
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c22
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c436
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c165
-rw-r--r--drivers/gpu/drm/radeon/uvd_v3_1.c55
-rw-r--r--drivers/gpu/drm/radeon/uvd_v4_2.c68
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c58
-rw-r--r--drivers/hid/hid-logitech-dj.c45
-rw-r--r--drivers/hid/hid-logitech-dj.h1
-rw-r--r--drivers/hid/hid-sony.c3
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hwmon/adt7470.c2
-rw-r--r--drivers/hwmon/max6697.c4
-rw-r--r--drivers/i2c/busses/i2c-kempld.c4
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c30
-rw-r--r--drivers/iio/industrialio-trigger.c34
-rw-r--r--drivers/iio/light/adjd_s311.c3
-rw-r--r--drivers/infiniband/core/cma.c29
-rw-r--r--drivers/infiniband/core/mad.c8
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c10
-rw-r--r--drivers/infiniband/hw/mlx5/main.c11
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c76
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c9
-rw-r--r--drivers/macintosh/windfarm_rm31.c18
-rw-r--r--drivers/md/dm-cache-policy-mq.c16
-rw-r--r--drivers/media/i2c/ml86v7667.c4
-rw-r--r--drivers/media/platform/coda.c2
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c79
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c46
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c11
-rw-r--r--drivers/media/usb/usbtv/Kconfig2
-rw-r--r--drivers/media/usb/usbtv/usbtv.c51
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/can/usb/esd_usb2.c10
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c2
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/ethernet/allwinner/Kconfig26
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c90
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c66
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c107
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c38
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c31
-rw-r--r--drivers/net/ethernet/marvell/skge.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c135
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c98
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c85
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c62
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c27
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c101
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c29
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c46
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c49
-rw-r--r--drivers/net/ethernet/realtek/r8169.c8
-rw-r--r--drivers/net/ethernet/sfc/filter.c6
-rw-r--r--drivers/net/ethernet/sis/sis900.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c111
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/irda/via-ircc.c6
-rw-r--r--drivers/net/macvlan.c27
-rw-r--r--drivers/net/macvtap.c30
-rw-r--r--drivers/net/phy/mdio-sun4i.c14
-rw-r--r--drivers/net/phy/realtek.c4
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/usb/ax88179_178a.c9
-rw-r--r--drivers/net/usb/hso.c15
-rw-r--r--drivers/net/usb/r8152.c126
-rw-r--r--drivers/net/usb/r815x.c62
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/vxlan.c59
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c44
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c5
-rw-r--r--drivers/net/wireless/cw1200/sta.c7
-rw-r--r--drivers/net/wireless/cw1200/txrx.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c16
-rw-r--r--drivers/net/wireless/iwlegacy/common.c1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c15
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c65
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c34
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c33
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c15
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c4
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c3
-rw-r--r--drivers/net/wireless/mwifiex/init.c10
-rw-r--r--drivers/net/wireless/mwifiex/join.c6
-rw-r--r--drivers/net/wireless/mwifiex/main.c13
-rw-r--r--drivers/net/wireless/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c95
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h3
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c18
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig72
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile10
-rw-r--r--drivers/net/wireless/rtlwifi/base.c19
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c1
-rw-r--r--drivers/net/wireless/rtlwifi/debug.c1
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c22
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c16
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c9
-rw-r--r--drivers/net/wireless/zd1201.c4
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/parisc/iosapic.c38
-rw-r--r--drivers/pci/host/pci-mvebu.c27
-rw-r--r--drivers/pci/hotplug/Kconfig5
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c9
-rw-r--r--drivers/pci/pci-acpi.c15
-rw-r--r--drivers/pci/pcie/Kconfig5
-rw-r--r--drivers/pci/setup-bus.c69
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.c66
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.h2
-rw-r--r--drivers/platform/olpc/olpc-ec.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c16
-rw-r--r--drivers/platform/x86/sony-laptop.c8
-rw-r--r--drivers/rapidio/rio.c4
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c35
-rw-r--r--drivers/rtc/rtc-twl.c3
-rw-r--r--drivers/s390/block/dasd.c6
-rw-r--r--drivers/s390/scsi/zfcp_erp.c29
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c8
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c14
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_main.c22
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c20
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/zcache/zcache-main.c6
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c3
-rw-r--r--drivers/tty/serial/arc_uart.c2
-rw-r--r--drivers/tty/serial/mxs-auart.c38
-rw-r--r--drivers/tty/tty_port.c5
-rw-r--r--drivers/usb/chipidea/Kconfig4
-rw-r--r--drivers/usb/chipidea/bits.h4
-rw-r--r--drivers/usb/class/usbtmc.c8
-rw-r--r--drivers/usb/core/hub.c5
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/gadget/ether.c14
-rw-r--r--drivers/usb/gadget/f_phonet.c2
-rw-r--r--drivers/usb/gadget/multi.c10
-rw-r--r--drivers/usb/gadget/udc-core.c2
-rw-r--r--drivers/usb/host/ehci-sched.c13
-rw-r--r--drivers/usb/host/ohci-pci.c5
-rw-r--r--drivers/usb/host/xhci-mem.c1
-rw-r--r--drivers/usb/host/xhci.c1
-rw-r--r--drivers/usb/misc/adutux.c2
-rw-r--r--drivers/usb/musb/omap2430.c7
-rw-r--r--drivers/usb/musb/tusb6010.c7
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h2
-rw-r--r--drivers/usb/phy/phy-fsm-usb.c2
-rw-r--r--drivers/usb/serial/Kconfig7
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/ftdi_sio.c31
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h34
-rw-r--r--drivers/usb/serial/keyspan.c2
-rw-r--r--drivers/usb/serial/mos7720.c21
-rw-r--r--drivers/usb/serial/mos7840.c150
-rw-r--r--drivers/usb/serial/suunto.c41
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c9
-rw-r--r--drivers/usb/serial/usb_wwan.c20
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c9
-rw-r--r--drivers/vfio/pci/vfio_pci.c23
-rw-r--r--drivers/vfio/vfio.c37
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/video/mxsfb.c26
-rw-r--r--drivers/video/nuc900fb.c3
-rw-r--r--drivers/video/omap2/displays-new/connector-analog-tv.c18
-rw-r--r--drivers/video/sgivwfb.c2
-rw-r--r--drivers/video/sh7760fb.c2
-rw-r--r--drivers/video/vga16fb.c1
-rw-r--r--drivers/video/xilinxfb.c4
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/Makefile5
-rw-r--r--drivers/xen/events.c13
-rw-r--r--drivers/xen/evtchn.c21
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c19
-rw-r--r--fs/bfs/inode.c2
-rw-r--r--fs/bio.c20
-rw-r--r--fs/btrfs/backref.c48
-rw-r--r--fs/btrfs/ctree.c1
-rw-r--r--fs/btrfs/extent_io.c9
-rw-r--r--fs/btrfs/file.c62
-rw-r--r--fs/btrfs/inode.c52
-rw-r--r--fs/btrfs/transaction.c8
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-log.c5
-rw-r--r--fs/cifs/cifsencrypt.c14
-rw-r--r--fs/cifs/cifsfs.c11
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/connect.c7
-rw-r--r--fs/cifs/file.c1
-rw-r--r--fs/cifs/link.c84
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c6
-rw-r--r--fs/cifs/smb1ops.c1
-rw-r--r--fs/cifs/smb2transport.c9
-rw-r--r--fs/dcache.c11
-rw-r--r--fs/debugfs/inode.c69
-rw-r--r--fs/dlm/user.c1
-rw-r--r--fs/efs/inode.c2
-rw-r--r--fs/exec.c4
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/ext4_jbd2.c8
-rw-r--r--fs/ext4/extents.c2
-rw-r--r--fs/ext4/file.c21
-rw-r--r--fs/ext4/ialloc.c10
-rw-r--r--fs/ext4/inode.c82
-rw-r--r--fs/ext4/ioctl.c6
-rw-r--r--fs/ext4/super.c20
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/gfs2/glock.c8
-rw-r--r--fs/gfs2/glops.c18
-rw-r--r--fs/gfs2/inode.c6
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/hugetlbfs/inode.c18
-rw-r--r--fs/lockd/clntlock.c13
-rw-r--r--fs/lockd/clntproc.c5
-rw-r--r--fs/namei.c10
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/inode.c11
-rw-r--r--fs/nfs/nfs4proc.c8
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfsd/nfs4proc.c2
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/nfsd/nfs4xdr.c5
-rw-r--r--fs/nilfs2/segbuf.c5
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/dir.c4
-rw-r--r--fs/ocfs2/file.c6
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/refcounttree.c58
-rw-r--r--fs/ocfs2/refcounttree.h6
-rw-r--r--fs/open.c2
-rw-r--r--fs/proc/fd.c2
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/root.c4
-rw-r--r--fs/proc/task_mmu.c31
-rw-r--r--fs/reiserfs/procfs.c99
-rw-r--r--fs/reiserfs/super.c3
-rw-r--r--include/acpi/acpi_bus.h14
-rw-r--r--include/asm-generic/pgtable.h30
-rw-r--r--include/asm-generic/tlb.h2
-rw-r--r--include/drm/drm_edid.h1
-rw-r--r--include/drm/drm_fixed.h14
-rw-r--r--include/drm/drm_pciids.h18
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/firewire.h1
-rw-r--r--include/linux/ftrace_event.h12
-rw-r--r--include/linux/iio/trigger.h3
-rw-r--r--include/linux/inetdevice.h34
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h16
-rw-r--r--include/linux/mlx5/device.h42
-rw-r--r--include/linux/mlx5/driver.h11
-rw-r--r--include/linux/mm_types.h1
-rw-r--r--include/linux/mod_devicetable.h5
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/regmap.h1
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/spinlock.h14
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/swapops.h2
-rw-r--r--include/linux/syscalls.h5
-rw-r--r--include/linux/tick.h6
-rw-r--r--include/linux/user_namespace.h1
-rw-r--r--include/linux/vmpressure.h3
-rw-r--r--include/linux/wait.h57
-rw-r--r--include/media/v4l2-ctrls.h1
-rw-r--r--include/net/busy_poll.h18
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip6_route.h2
-rw-r--r--include/net/ip_tunnels.h14
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/nfc/hci.h2
-rw-r--r--include/net/nfc/nfc.h4
-rw-r--r--include/net/sch_generic.h9
-rw-r--r--include/net/sock.h2
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/firewire-cdev.h4
-rw-r--r--include/uapi/linux/ip.h34
-rw-r--r--include/uapi/linux/nfc.h6
-rw-r--r--include/uapi/linux/pkt_sched.h10
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/cgroup.c4
-rw-r--r--kernel/cpuset.c20
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/mutex.c4
-rw-r--r--kernel/power/process.c11
-rw-r--r--kernel/power/qos.c20
-rw-r--r--kernel/printk/Makefile2
-rw-r--r--kernel/printk/braille.c49
-rw-r--r--kernel/printk/braille.h48
-rw-r--r--kernel/printk/console_cmdline.h14
-rw-r--r--kernel/printk/printk.c (renamed from kernel/printk.c)183
-rw-r--r--kernel/ptrace.c1
-rw-r--r--kernel/sched/core.c96
-rw-r--r--kernel/sched/cpupri.c4
-rw-r--r--kernel/sched/fair.c14
-rw-r--r--kernel/sysctl.c6
-rw-r--r--kernel/time/sched_clock.c2
-rw-r--r--kernel/time/tick-sched.c14
-rw-r--r--kernel/trace/ftrace.c87
-rw-r--r--kernel/trace/trace.c27
-rw-r--r--kernel/trace/trace_events.c200
-rw-r--r--kernel/trace/trace_events_filter.c17
-rw-r--r--kernel/trace/trace_kprobe.c21
-rw-r--r--kernel/trace/trace_uprobe.c51
-rw-r--r--kernel/user_namespace.c17
-rw-r--r--kernel/wait.c3
-rw-r--r--kernel/workqueue.c44
-rw-r--r--lib/lz4/lz4_compress.c4
-rw-r--r--lib/lz4/lz4_decompress.c6
-rw-r--r--lib/lz4/lz4hc_compress.c4
-rw-r--r--mm/fremap.c11
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/memory.c49
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/mmap.c6
-rw-r--r--mm/rmap.c14
-rw-r--r--mm/shmem.c11
-rw-r--r--mm/slub.c3
-rw-r--r--mm/swap.c29
-rw-r--r--mm/swapfile.c19
-rw-r--r--mm/vmpressure.c28
-rw-r--r--mm/zbud.c2
-rw-r--r--net/8021q/vlan_core.c7
-rw-r--r--net/Kconfig2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2
-rw-r--r--net/batman-adv/gateway_client.c13
-rw-r--r--net/batman-adv/gateway_client.h3
-rw-r--r--net/batman-adv/soft-interface.c9
-rw-r--r--net/batman-adv/unicast.c23
-rw-r--r--net/bluetooth/hci_core.c26
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_fdb.c10
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/bridge/br_multicast.c46
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_private.h12
-rw-r--r--net/bridge/br_sysfs_br.c2
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/core/flow_dissector.c1
-rw-r--r--net/core/neighbour.c39
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/sock.c6
-rw-r--r--net/core/sysctl_net_core.c8
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_trie.c7
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_tunnel_core.c4
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_cubic.c12
-rw-r--r--net/ipv6/addrconf.c47
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/ip6_fib.c41
-rw-r--r--net/ipv6/ip6mr.c5
-rw-r--r--net/ipv6/ndisc.c8
-rw-r--r--net/ipv6/reassembly.c5
-rw-r--r--net/ipv6/route.c29
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/mesh_ps.c4
-rw-r--r--net/mac80211/mlme.c54
-rw-r--r--net/mac80211/pm.c7
-rw-r--r--net/mac80211/rc80211_minstrel.c3
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c10
-rw-r--r--net/mac80211/rx.c10
-rw-r--r--net/netfilter/nf_conntrack_expect.c5
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c12
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue_core.c5
-rw-r--r--net/netfilter/xt_TCPMSS.c28
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c10
-rw-r--r--net/netfilter/xt_socket.c10
-rw-r--r--net/netlabel/netlabel_cipso_v4.c4
-rw-r--r--net/netlabel/netlabel_domainhash.c104
-rw-r--r--net/netlabel/netlabel_domainhash.h46
-rw-r--r--net/netlabel/netlabel_kapi.c88
-rw-r--r--net/netlabel/netlabel_mgmt.c44
-rw-r--r--net/netlabel/netlabel_unlabeled.c2
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/nfc/core.c20
-rw-r--r--net/nfc/hci/core.c8
-rw-r--r--net/nfc/nci/Kconfig1
-rw-r--r--net/nfc/netlink.c12
-rw-r--r--net/nfc/nfc.h6
-rw-r--r--net/openvswitch/actions.c1
-rw-r--r--net/openvswitch/datapath.c3
-rw-r--r--net/openvswitch/flow.c2
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sched/sch_api.c41
-rw-r--r--net/sched/sch_atm.c1
-rw-r--r--net/sched/sch_cbq.c1
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_htb.c15
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/transport.c4
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.c3
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c9
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/clnt.c4
-rw-r--r--net/sunrpc/netns.h1
-rw-r--r--net/sunrpc/rpcb_clnt.c48
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/tipc/bearer.c9
-rw-r--r--net/tipc/server.c15
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/wireless/core.c1
-rw-r--r--net/wireless/nl80211.c39
-rw-r--r--net/wireless/reg.c7
-rw-r--r--net/wireless/sme.c39
-rw-r--r--security/smack/smack_lsm.c24
-rw-r--r--sound/core/compress_offload.c2
-rw-r--r--sound/pci/hda/hda_auto_parser.c2
-rw-r--r--sound/pci/hda/hda_generic.c6
-rw-r--r--sound/pci/hda/patch_realtek.c11
-rw-r--r--sound/pci/hda/patch_sigmatel.c1
-rw-r--r--sound/soc/au1x/ac97c.c2
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.c5
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.h1
-rw-r--r--sound/soc/codecs/cs42l52.c5
-rw-r--r--sound/soc/codecs/sgtl5000.c18
-rw-r--r--sound/soc/codecs/wm0010.c24
-rw-r--r--sound/soc/soc-dapm.c12
-rw-r--r--sound/soc/tegra/tegra30_i2s.c2
-rw-r--r--sound/usb/6fire/comm.c38
-rw-r--r--sound/usb/6fire/comm.h2
-rw-r--r--sound/usb/6fire/midi.c16
-rw-r--r--sound/usb/6fire/midi.h6
-rw-r--r--sound/usb/6fire/pcm.c41
-rw-r--r--sound/usb/6fire/pcm.h2
-rw-r--r--sound/usb/endpoint.c13
-rw-r--r--sound/usb/mixer.c1
-rw-r--r--sound/usb/quirks.c6
882 files changed, 28436 insertions, 10847 deletions
diff --git a/.gitignore b/.gitignore
index 3b8b9b33be38..7e9932e55475 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,6 +29,7 @@ modules.builtin
*.bz2
*.lzma
*.xz
+*.lz4
*.lzo
*.patch
*.gcno
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index cbfdf5486639..fe397f90a34f 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -84,7 +84,7 @@ X!Iinclude/linux/kobject.h
<sect1><title>Kernel utility functions</title>
!Iinclude/linux/kernel.h
-!Ekernel/printk.c
+!Ekernel/printk/printk.c
!Ekernel/panic.c
!Ekernel/sys.c
!Ekernel/rcupdate.c
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl
index 6a8b7158697f..9c92bb879b6d 100644
--- a/Documentation/DocBook/media_api.tmpl
+++ b/Documentation/DocBook/media_api.tmpl
@@ -1,6 +1,6 @@
<?xml version="1.0"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
<!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities;
<!ENTITY media-indices SYSTEM "./media-indices.tmpl">
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
index a1ee681942cc..6113f9275f42 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
@@ -4,7 +4,7 @@
Required properties :
- reg : Offset and length of the register set for the device
- - compatible : Should be "marvell,mv64xxx-i2c"
+ - compatible : Should be "marvell,mv64xxx-i2c" or "allwinner,sun4i-i2c"
- interrupts : The interrupt number
Optional properties :
diff --git a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
index d5a308629c57..30b0581bb1ce 100644
--- a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
+++ b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
@@ -31,9 +31,8 @@ Optional nodes:
Optional sub-node properties:
ti,warm-reset - maintain voltage during warm reset(boolean)
ti,roof-floor - control voltage selection by pin(boolean)
- ti,sleep-mode - mode to adopt in pmic sleep 0 - off, 1 - auto,
+ ti,mode-sleep - mode to adopt in pmic sleep 0 - off, 1 - auto,
2 - eco, 3 - forced pwm
- ti,tstep - slope control 0 - Jump, 1 10mV/us, 2 5mV/us, 3 2.5mV/us
ti,smps-range - OTP has the wrong range set for the hardware so override
0 - low range, 1 - high range.
@@ -59,7 +58,6 @@ pmic {
ti,warm-reset;
ti,roof-floor;
ti,mode-sleep = <0>;
- ti,tstep = <0>;
ti,smps-range = <1>;
};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 15356aca938c..7f9d4f53882c 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2953,7 +2953,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
improve throughput, but will also increase the
amount of memory reserved for use by the client.
- swapaccount[=0|1]
+ swapaccount=[0|1]
[KNL] Enable accounting of swap in memory resource
controller if no parameter or 1 is given or disable
it if 0 is given (See Documentation/cgroups/memory.txt)
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 1c15043aaee4..d569f2a424d5 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -52,7 +52,7 @@ Default: 64
busy_read
----------------
-Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL)
+Low latency busy poll timeout for socket reads. (needs CONFIG_NET_RX_BUSY_POLL)
Approximate time in us to busy loop waiting for packets on the device queue.
This sets the default value of the SO_BUSY_POLL socket option.
Can be set or overridden per socket by setting socket option SO_BUSY_POLL,
@@ -63,7 +63,7 @@ Default: 0 (off)
busy_poll
----------------
-Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL)
+Low latency busy poll timeout for poll and select. (needs CONFIG_NET_RX_BUSY_POLL)
Approximate time in us to busy loop waiting for events.
Recommended value depends on the number of sockets you poll on.
For several sockets 50, for several hundreds 100.
diff --git a/MAINTAINERS b/MAINTAINERS
index a26b10e52aea..8197fbd70a3e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -965,6 +965,12 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
+M: Santosh Shilimkar <santosh.shilimkar@ti.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-keystone/
+
ARM/LOGICPD PXA270 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1259,7 +1265,6 @@ F: drivers/rtc/rtc-coh901331.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/Ux500 ARM ARCHITECTURE
-M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -1406,7 +1411,7 @@ ATHEROS ATH6KL WIRELESS DRIVER
M: Kalle Valo <kvalo@qca.qualcomm.com>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/ath6kl
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath6kl.git
+T: git git://github.com/kvalo/ath.git
S: Supported
F: drivers/net/wireless/ath/ath6kl/
@@ -2871,7 +2876,7 @@ F: drivers/media/usb/dvb-usb-v2/dvb_usb*
F: drivers/media/usb/dvb-usb-v2/usb_urb.c
DYNAMIC DEBUG
-M: Jason Baron <jbaron@redhat.com>
+M: Jason Baron <jbaron@akamai.com>
S: Maintained
F: lib/dynamic_debug.c
F: include/linux/dynamic_debug.h
@@ -5576,9 +5581,9 @@ S: Maintained
F: drivers/media/tuners/mxl5007t.*
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M: Andrew Gallatin <gallatin@myri.com>
+M: Hyong-Youb Kim <hykim@myri.com>
L: netdev@vger.kernel.org
-W: http://www.myri.com/scs/download-Myri10GE.html
+W: https://www.myricom.com/support/downloads/myri10ge.html
S: Supported
F: drivers/net/ethernet/myricom/myri10ge/
@@ -5879,7 +5884,7 @@ F: drivers/i2c/busses/i2c-omap.c
F: include/linux/i2c-omap.h
OMAP DEVICE TREE SUPPORT
-M: Benoît Cousson <b-cousson@ti.com>
+M: Benoît Cousson <bcousson@baylibre.com>
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
L: devicetree@vger.kernel.org
@@ -5959,14 +5964,14 @@ S: Maintained
F: drivers/char/hw_random/omap-rng.c
OMAP HWMOD SUPPORT
-M: Benoît Cousson <b-cousson@ti.com>
+M: Benoît Cousson <bcousson@baylibre.com>
M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod.*
OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
-M: Benoît Cousson <b-cousson@ti.com>
+M: Benoît Cousson <bcousson@baylibre.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -6726,6 +6731,14 @@ T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained
F: drivers/media/tuners/qt1010*
+QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
+M: Kalle Valo <kvalo@qca.qualcomm.com>
+L: ath10k@lists.infradead.org
+W: http://wireless.kernel.org/en/users/Drivers/ath10k
+T: git git://github.com/kvalo/ath.git
+S: Supported
+F: drivers/net/wireless/ath/ath10k/
+
QUALCOMM HEXAGON ARCHITECTURE
M: Richard Kuo <rkuo@codeaurora.org>
L: linux-hexagon@vger.kernel.org
@@ -7353,7 +7366,6 @@ F: drivers/net/ethernet/sfc/
SGI GRU DRIVER
M: Dimitri Sivanich <sivanich@sgi.com>
-M: Robin Holt <holt@sgi.com>
S: Maintained
F: drivers/misc/sgi-gru/
@@ -7373,7 +7385,8 @@ S: Maintained for 2.6.
F: Documentation/sgi-visws.txt
SGI XP/XPC/XPNET DRIVER
-M: Robin Holt <holt@sgi.com>
+M: Cliff Whickman <cpw@sgi.com>
+M: Robin Holt <robinmholt@gmail.com>
S: Maintained
F: drivers/misc/sgi-xp/
@@ -8270,7 +8283,7 @@ S: Maintained
F: sound/soc/codecs/twl4030*
TI WILINK WIRELESS DRIVERS
-M: Luciano Coelho <coelho@ti.com>
+M: Luciano Coelho <luca@coelho.fi>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/wl12xx
W: http://wireless.kernel.org/en/users/Drivers/wl1251
@@ -8656,6 +8669,11 @@ T: git git://git.alsa-project.org/alsa-kernel.git
S: Maintained
F: sound/usb/midi.*
+USB NETWORKING DRIVERS
+L: linux-usb@vger.kernel.org
+S: Odd Fixes
+F: drivers/net/usb/
+
USB OHCI DRIVER
M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
diff --git a/Makefile b/Makefile
index 95a8e55feceb..369882e4fc77 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc7
NAME = Linux for Workgroups
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 8d2ae24b9f4a..1feb169274fe 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -407,6 +407,12 @@ config CLONE_BACKWARDS2
help
Architecture has the first two arguments of clone(2) swapped.
+config CLONE_BACKWARDS3
+ bool
+ help
+ Architecture has tls passed as the 3rd argument of clone(2),
+ not the 5th one.
+
config ODD_RT_SIGACTION
bool
help
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 8943c028d4bb..df57611652e5 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -38,6 +38,7 @@
#include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */
#include <asm/thread_info.h> /* For THREAD_SIZE */
+#include <asm/mmu.h>
/* Note on the LD/ST addr modes with addr reg wback
*
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
index 99c10475d477..9c548c7cf001 100644
--- a/arch/arc/lib/strchr-700.S
+++ b/arch/arc/lib/strchr-700.S
@@ -39,9 +39,18 @@ ARC_ENTRY strchr
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
+#ifdef __LITTLE_ENDIAN__
and r7,r12,r4
breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
b .Lfound_char ; Likewise this one.
+#else
+ and r12,r12,r4
+ breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
+ lsr_s r12,r12,7
+ bic r2,r7,r6
+ b.d .Lfound_char_b
+ and_s r2,r2,r12
+#endif
; /* We require this code address to be unaligned for speed... */
.Laligned:
ld_s r2,[r0]
@@ -95,6 +104,7 @@ ARC_ENTRY strchr
lsr r7,r7,7
bic r2,r7,r6
+.Lfound_char_b:
norm r2,r2
sub_s r0,r0,4
asr_s r2,r2,3
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 37c0f4e978d4..43594d5116ef 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -20,7 +20,6 @@ config ARM
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select HARDIRQS_SW_RESEND
- select HAVE_AOUT
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
@@ -218,7 +217,8 @@ config VECTORS_BASE
default DRAM_BASE if REMAP_VECTORS_TO_RAM
default 0x00000000
help
- The base address of exception vectors.
+ The base address of exception vectors. This must be two pages
+ in size.
config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime" if EMBEDDED
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index e401a766c0bd..583f4a00ec32 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -804,9 +804,19 @@ config DEBUG_LL_INCLUDE
config DEBUG_UNCOMPRESS
bool
- default y if ARCH_MULTIPLATFORM && DEBUG_LL && \
- !DEBUG_OMAP2PLUS_UART && \
+ depends on ARCH_MULTIPLATFORM
+ default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
!DEBUG_TEGRA_UART
+ help
+ This option influences the normal decompressor output for
+ multiplatform kernels. Normally, multiplatform kernels disable
+ decompressor output because it is not possible to know where to
+ send the decompressor output.
+
+ When this option is set, the selected DEBUG_LL output method
+ will be re-used for normal decompressor output on multiplatform
+ kernels.
+
config UNCOMPRESS_INCLUDE
string
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c0ac0f5e5e5c..6fd2ceae305a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -153,6 +153,7 @@ machine-$(CONFIG_ARCH_DAVINCI) += davinci
machine-$(CONFIG_ARCH_DOVE) += dove
machine-$(CONFIG_ARCH_EBSA110) += ebsa110
machine-$(CONFIG_ARCH_EP93XX) += ep93xx
+machine-$(CONFIG_ARCH_EXYNOS) += exynos
machine-$(CONFIG_ARCH_GEMINI) += gemini
machine-$(CONFIG_ARCH_HIGHBANK) += highbank
machine-$(CONFIG_ARCH_INTEGRATOR) += integrator
@@ -160,15 +161,16 @@ machine-$(CONFIG_ARCH_IOP13XX) += iop13xx
machine-$(CONFIG_ARCH_IOP32X) += iop32x
machine-$(CONFIG_ARCH_IOP33X) += iop33x
machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx
+machine-$(CONFIG_ARCH_KEYSTONE) += keystone
machine-$(CONFIG_ARCH_KIRKWOOD) += kirkwood
machine-$(CONFIG_ARCH_KS8695) += ks8695
machine-$(CONFIG_ARCH_LPC32XX) += lpc32xx
machine-$(CONFIG_ARCH_MMP) += mmp
machine-$(CONFIG_ARCH_MSM) += msm
machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0
+machine-$(CONFIG_ARCH_MVEBU) += mvebu
machine-$(CONFIG_ARCH_MXC) += imx
machine-$(CONFIG_ARCH_MXS) += mxs
-machine-$(CONFIG_ARCH_MVEBU) += mvebu
machine-$(CONFIG_ARCH_NETX) += netx
machine-$(CONFIG_ARCH_NOMADIK) += nomadik
machine-$(CONFIG_ARCH_NSPIRE) += nspire
@@ -176,7 +178,6 @@ machine-$(CONFIG_ARCH_OMAP1) += omap1
machine-$(CONFIG_ARCH_OMAP2PLUS) += omap2
machine-$(CONFIG_ARCH_ORION5X) += orion5x
machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell
-machine-$(CONFIG_ARCH_SIRF) += prima2
machine-$(CONFIG_ARCH_PXA) += pxa
machine-$(CONFIG_ARCH_REALVIEW) += realview
machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip
@@ -186,25 +187,24 @@ machine-$(CONFIG_ARCH_S3C64XX) += s3c64xx
machine-$(CONFIG_ARCH_S5P64X0) += s5p64x0
machine-$(CONFIG_ARCH_S5PC100) += s5pc100
machine-$(CONFIG_ARCH_S5PV210) += s5pv210
-machine-$(CONFIG_ARCH_EXYNOS) += exynos
machine-$(CONFIG_ARCH_SA1100) += sa1100
machine-$(CONFIG_ARCH_SHARK) += shark
machine-$(CONFIG_ARCH_SHMOBILE) += shmobile
+machine-$(CONFIG_ARCH_SIRF) += prima2
+machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
+machine-$(CONFIG_ARCH_STI) += sti
+machine-$(CONFIG_ARCH_SUNXI) += sunxi
machine-$(CONFIG_ARCH_TEGRA) += tegra
machine-$(CONFIG_ARCH_U300) += u300
machine-$(CONFIG_ARCH_U8500) += ux500
machine-$(CONFIG_ARCH_VERSATILE) += versatile
machine-$(CONFIG_ARCH_VEXPRESS) += vexpress
+machine-$(CONFIG_ARCH_VIRT) += virt
machine-$(CONFIG_ARCH_VT8500) += vt8500
machine-$(CONFIG_ARCH_W90X900) += w90x900
+machine-$(CONFIG_ARCH_ZYNQ) += zynq
machine-$(CONFIG_FOOTBRIDGE) += footbridge
-machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
machine-$(CONFIG_PLAT_SPEAR) += spear
-machine-$(CONFIG_ARCH_STI) += sti
-machine-$(CONFIG_ARCH_VIRT) += virt
-machine-$(CONFIG_ARCH_ZYNQ) += zynq
-machine-$(CONFIG_ARCH_SUNXI) += sunxi
-machine-$(CONFIG_ARCH_KEYSTONE) += keystone
# Platform directory name. This list is sorted alphanumerically
# by CONFIG_* macro name.
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d59b70c6a6a0..3d77dbe406f4 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -14,11 +14,11 @@
compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9";
chosen {
- bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
+ bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
};
memory {
- reg = <0x20000000 0x10000000>;
+ reg = <0x20000000 0x8000000>;
};
clocks {
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index b753855b2058..49e3c45818c2 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -94,8 +94,9 @@
usb0: ohci@00600000 {
status = "okay";
- num-ports = <2>;
- atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW
+ num-ports = <3>;
+ atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */
+ &pioD 19 GPIO_ACTIVE_LOW
&pioD 20 GPIO_ACTIVE_LOW
>;
};
diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/msm8960-cdp.dts
index db2060c46540..9c1167b0459b 100644
--- a/arch/arm/boot/dts/msm8960-cdp.dts
+++ b/arch/arm/boot/dts/msm8960-cdp.dts
@@ -26,7 +26,7 @@
cpu-offset = <0x80000>;
};
- msmgpio: gpio@fd510000 {
+ msmgpio: gpio@800000 {
compatible = "qcom,msm-gpio";
gpio-controller;
#gpio-cells = <2>;
@@ -34,7 +34,7 @@
interrupts = <0 32 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
- reg = <0xfd510000 0x4000>;
+ reg = <0x800000 0x4000>;
};
serial@16440000 {
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 08b72678abff..65d7b601651c 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -235,7 +235,7 @@
};
&mmc1 {
- vmmc-supply = <&vmmcsd_fixed>;
+ vmmc-supply = <&ldo9_reg>;
bus-width = <4>;
};
@@ -282,6 +282,7 @@
regulators {
smps123_reg: smps123 {
+ /* VDD_OPP_MPU */
regulator-name = "smps123";
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1500000>;
@@ -290,6 +291,7 @@
};
smps45_reg: smps45 {
+ /* VDD_OPP_MM */
regulator-name = "smps45";
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1310000>;
@@ -298,6 +300,7 @@
};
smps6_reg: smps6 {
+ /* VDD_DDR3 - over VDD_SMPS6 */
regulator-name = "smps6";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
@@ -306,6 +309,7 @@
};
smps7_reg: smps7 {
+ /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */
regulator-name = "smps7";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -314,6 +318,7 @@
};
smps8_reg: smps8 {
+ /* VDD_OPP_CORE */
regulator-name = "smps8";
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1310000>;
@@ -322,15 +327,15 @@
};
smps9_reg: smps9 {
+ /* VDDA_2v1_AUD over VDD_2v1 */
regulator-name = "smps9";
regulator-min-microvolt = <2100000>;
regulator-max-microvolt = <2100000>;
- regulator-always-on;
- regulator-boot-on;
ti,smps-range = <0x80>;
};
smps10_reg: smps10 {
+ /* VBUS_5V_OTG */
regulator-name = "smps10";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
@@ -339,38 +344,40 @@
};
ldo1_reg: ldo1 {
+ /* VDDAPHY_CAM: vdda_csiport */
regulator-name = "ldo1";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- regulator-always-on;
- regulator-boot-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1800000>;
};
ldo2_reg: ldo2 {
+ /* VCC_2V8_DISP: Does not go anywhere */
regulator-name = "ldo2";
- regulator-min-microvolt = <2900000>;
- regulator-max-microvolt = <2900000>;
- regulator-always-on;
- regulator-boot-on;
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ /* Unused */
+ status = "disabled";
};
ldo3_reg: ldo3 {
+ /* VDDAPHY_MDM: vdda_lli */
regulator-name = "ldo3";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-always-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
regulator-boot-on;
+ /* Only if Modem is used */
+ status = "disabled";
};
ldo4_reg: ldo4 {
+ /* VDDAPHY_DISP: vdda_dsiport/hdmi */
regulator-name = "ldo4";
- regulator-min-microvolt = <2200000>;
- regulator-max-microvolt = <2200000>;
- regulator-always-on;
- regulator-boot-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1800000>;
};
ldo5_reg: ldo5 {
+ /* VDDA_1V8_PHY: usb/sata/hdmi.. */
regulator-name = "ldo5";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -379,38 +386,43 @@
};
ldo6_reg: ldo6 {
+ /* VDDS_1V2_WKUP: hsic/ldo_emu_wkup */
regulator-name = "ldo6";
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
regulator-always-on;
regulator-boot-on;
};
ldo7_reg: ldo7 {
+ /* VDD_VPP: vpp1 */
regulator-name = "ldo7";
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
- regulator-always-on;
- regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ /* Only for efuse reprograming! */
+ status = "disabled";
};
ldo8_reg: ldo8 {
+ /* VDD_3v0: Does not go anywhere */
regulator-name = "ldo8";
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
- regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
regulator-boot-on;
+ /* Unused */
+ status = "disabled";
};
ldo9_reg: ldo9 {
+ /* VCC_DV_SDIO: vdds_sdcard */
regulator-name = "ldo9";
regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
+ regulator-max-microvolt = <3000000>;
regulator-boot-on;
};
ldoln_reg: ldoln {
+ /* VDDA_1v8_REF: vdds_osc/mm_l4per.. */
regulator-name = "ldoln";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -419,12 +431,20 @@
};
ldousb_reg: ldousb {
+ /* VDDA_3V_USB: VDDA_USBHS33 */
regulator-name = "ldousb";
regulator-min-microvolt = <3250000>;
regulator-max-microvolt = <3250000>;
regulator-always-on;
regulator-boot-on;
};
+
+ regen3_reg: regen3 {
+ /* REGEN3 controls LDO9 supply to card */
+ regulator-name = "regen3";
+ regulator-always-on;
+ regulator-boot-on;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/stih41x.dtsi b/arch/arm/boot/dts/stih41x.dtsi
index 7321403cab8a..f5b9898d9c6e 100644
--- a/arch/arm/boot/dts/stih41x.dtsi
+++ b/arch/arm/boot/dts/stih41x.dtsi
@@ -6,10 +6,12 @@
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
+ device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
};
cpu@1 {
+ device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
};
diff --git a/arch/arm/boot/dts/tegra20-colibri-512.dtsi b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
index 2fcb3f2ca160..5592be6f2f7a 100644
--- a/arch/arm/boot/dts/tegra20-colibri-512.dtsi
+++ b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
@@ -457,6 +457,7 @@
};
usb-phy@c5004000 {
+ status = "okay";
nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1)
GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index 365760b33a26..40e6fb280333 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -830,6 +830,8 @@
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&gpio 24 0>; /* PD0 */
+ regulator-always-on;
+ regulator-boot-on;
};
};
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index ed4b901b0227..37c93d3c4812 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -412,6 +412,8 @@
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&gpio 170 0>; /* PV2 */
+ regulator-always-on;
+ regulator-boot-on;
};
};
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
index ab67c94db280..a3d0ebad78a1 100644
--- a/arch/arm/boot/dts/tegra20-whistler.dts
+++ b/arch/arm/boot/dts/tegra20-whistler.dts
@@ -588,6 +588,8 @@
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&tca6416 0 0>; /* GPIO_PMU0 */
+ regulator-always-on;
+ regulator-boot-on;
};
vbus3_reg: regulator@3 {
@@ -598,6 +600,8 @@
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&tca6416 1 0>; /* GPIO_PMU1 */
+ regulator-always-on;
+ regulator-boot-on;
};
};
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
deleted file mode 100644
index 92f10cb5c70c..000000000000
--- a/arch/arm/include/asm/a.out-core.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* a.out coredump register dumper
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _ASM_A_OUT_CORE_H
-#define _ASM_A_OUT_CORE_H
-
-#ifdef __KERNEL__
-
-#include <linux/user.h>
-#include <linux/elfcore.h>
-
-/*
- * fill in the user structure for an a.out core dump
- */
-static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
-{
- struct task_struct *tsk = current;
-
- dump->magic = CMAGIC;
- dump->start_code = tsk->mm->start_code;
- dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
-
- dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
- dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
- dump->u_ssize = 0;
-
- memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
-
- if (dump->start_stack < 0x04000000)
- dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
-
- dump->regs = *regs;
- dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_A_OUT_CORE_H */
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 8c25dc4e9851..9672e978d50d 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -89,13 +89,18 @@ extern unsigned int processor_id;
__val; \
})
+/*
+ * The memory clobber prevents gcc 4.5 from reordering the mrc before
+ * any is_smp() tests, which can cause undefined instruction aborts on
+ * ARM1136 r0 due to the missing extended CP15 registers.
+ */
#define read_cpuid_ext(ext_reg) \
({ \
unsigned int __val; \
asm("mrc p15, 0, %0, c0, " ext_reg \
: "=r" (__val) \
: \
- : "cc"); \
+ : "memory"); \
__val; \
})
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 38050b1c4800..56211f2084ef 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -130,4 +130,10 @@ struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
+#ifdef CONFIG_MMU
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+int arch_setup_additional_pages(struct linux_binprm *, int);
+#endif
+
#endif
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index e3d55547e755..6f18da09668b 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,8 +6,11 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
atomic64_t id;
+#else
+ int switch_pending;
#endif
unsigned int vmalloc_seq;
+ unsigned long sigpage;
} mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index b5792b7fd8d3..9b32f76bb0dd 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -56,7 +56,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
* on non-ASID CPUs, the old mm will remain valid until the
* finish_arch_post_lock_switch() call.
*/
- set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+ mm->context.switch_pending = 1;
else
cpu_switch_mm(mm->pgd, mm);
}
@@ -65,9 +65,21 @@ static inline void check_and_switch_context(struct mm_struct *mm,
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
- if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
- struct mm_struct *mm = current->mm;
- cpu_switch_mm(mm->pgd, mm);
+ struct mm_struct *mm = current->mm;
+
+ if (mm && mm->context.switch_pending) {
+ /*
+ * Preemption must be disabled during cpu_switch_mm() as we
+ * have some stateful cache flush implementations. Check
+ * switch_pending again in case we were preempted and the
+ * switch to this mm was already done.
+ */
+ preempt_disable();
+ if (mm->context.switch_pending) {
+ mm->context.switch_pending = 0;
+ cpu_switch_mm(mm->pgd, mm);
+ }
+ preempt_enable_no_resched();
}
}
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 6363f3d1d505..4355f0ec44d6 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
+#ifdef CONFIG_KUSER_HELPERS
#define __HAVE_ARCH_GATE_AREA 1
+#endif
#ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level-types.h>
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 06e7d509eaac..413f3876341c 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -54,7 +54,6 @@ struct thread_struct {
#define start_thread(regs,pc,sp) \
({ \
- unsigned long *stack = (unsigned long *)sp; \
memset(regs->uregs, 0, sizeof(regs->uregs)); \
if (current->personality & ADDR_LIMIT_32BIT) \
regs->ARM_cpsr = USR_MODE; \
@@ -65,9 +64,6 @@ struct thread_struct {
regs->ARM_cpsr |= PSR_ENDSTATE; \
regs->ARM_pc = pc & ~1; /* pc */ \
regs->ARM_sp = sp; /* sp */ \
- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
nommu_start_thread(regs); \
})
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 6462a721ebd4..a252c0bfacf5 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void)
{
return 1 << mpidr_hash.bits;
}
+
+extern int platform_can_cpu_hotplug(void);
+
#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index f8b8965666e9..b07c09e5a0ac 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
" subs %1, %0, %0, ror #16\n"
" addeq %0, %0, %4\n"
" strexeq %2, %0, [%3]"
- : "=&r" (slock), "=&r" (contended), "=r" (res)
+ : "=&r" (slock), "=&r" (contended), "=&r" (res)
: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
: "cc");
} while (res);
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp;
+ unsigned long contended, res;
- __asm__ __volatile__(
-" ldrex %0, [%1]\n"
-" teq %0, #0\n"
-" strexeq %0, %2, [%1]"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "cc");
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " teq %0, #0\n"
+ " strexeq %1, %3, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock), "r" (0x80000000)
+ : "cc");
+ } while (res);
- if (tmp == 0) {
+ if (!contended) {
smp_mb();
return 1;
} else {
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp, tmp2 = 1;
+ unsigned long contended, res;
- __asm__ __volatile__(
-" ldrex %0, [%2]\n"
-" adds %0, %0, #1\n"
-" strexpl %1, %0, [%2]\n"
- : "=&r" (tmp), "+r" (tmp2)
- : "r" (&rw->lock)
- : "cc");
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " adds %0, %0, #1\n"
+ " strexpl %1, %0, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock)
+ : "cc");
+ } while (res);
- smp_mb();
- return tmp2 == 0;
+ /* If the lock is negative, then it is already held for write. */
+ if (contended < 0x80000000) {
+ smp_mb();
+ return 1;
+ } else {
+ return 0;
+ }
}
/* read_can_lock - would read_trylock() succeed? */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 214d4158089a..2b8114fcba09 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -156,7 +156,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
-#define TIF_SWITCH_MM 22 /* deferred switch_mm */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 46e7cfb3e721..0baf7f0d9394 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -43,6 +43,7 @@ struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
+ unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index fdbb9e369745..f467e9b3f8d5 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -443,7 +443,18 @@ static inline void local_flush_bp_all(void)
isb();
}
+#include <asm/cputype.h>
#ifdef CONFIG_ARM_ERRATA_798181
+static inline int erratum_a15_798181(void)
+{
+ unsigned int midr = read_cpuid_id();
+
+ /* Cortex-A15 r0p0..r3p2 affected */
+ if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
+ return 0;
+ return 1;
+}
+
static inline void dummy_flush_tlb_a15_erratum(void)
{
/*
@@ -453,6 +464,11 @@ static inline void dummy_flush_tlb_a15_erratum(void)
dsb();
}
#else
+static inline int erratum_a15_798181(void)
+{
+ return 0;
+}
+
static inline void dummy_flush_tlb_a15_erratum(void)
{
}
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 50af92bac737..4371f45c5784 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -29,6 +29,7 @@
#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
#ifndef __ASSEMBLY__
+#include <asm/cacheflush.h>
#ifdef CONFIG_ARM_VIRT_EXT
/*
@@ -41,10 +42,21 @@
*/
extern int __boot_cpu_mode;
+static inline void sync_boot_mode(void)
+{
+ /*
+ * As secondaries write to __boot_cpu_mode with caches disabled, we
+ * must flush the corresponding cache entries to ensure the visibility
+ * of their writes.
+ */
+ sync_cache_r(&__boot_cpu_mode);
+}
+
void __hyp_set_vectors(unsigned long phys_vector_base);
unsigned long __hyp_get_vectors(void);
#else
#define __boot_cpu_mode (SVC_MODE)
+#define sync_boot_mode()
#endif
#ifndef ZIMAGE
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 47bcb2d254af..18d76fd5a2af 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,7 +1,6 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += a.out.h
header-y += byteorder.h
header-y += fcntl.h
header-y += hwcap.h
diff --git a/arch/arm/include/uapi/asm/a.out.h b/arch/arm/include/uapi/asm/a.out.h
deleted file mode 100644
index 083894b2e3bc..000000000000
--- a/arch/arm/include/uapi/asm/a.out.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __ARM_A_OUT_H__
-#define __ARM_A_OUT_H__
-
-#include <linux/personality.h>
-#include <linux/types.h>
-
-struct exec
-{
- __u32 a_info; /* Use macros N_MAGIC, etc for access */
- __u32 a_text; /* length of text, in bytes */
- __u32 a_data; /* length of data, in bytes */
- __u32 a_bss; /* length of uninitialized data area for file, in bytes */
- __u32 a_syms; /* length of symbol table data in file, in bytes */
- __u32 a_entry; /* start address */
- __u32 a_trsize; /* length of relocation info for text, in bytes */
- __u32 a_drsize; /* length of relocation info for data, in bytes */
-};
-
-/*
- * This is always the same
- */
-#define N_TXTADDR(a) (0x00008000)
-
-#define N_TRSIZE(a) ((a).a_trsize)
-#define N_DRSIZE(a) ((a).a_drsize)
-#define N_SYMSIZE(a) ((a).a_syms)
-
-#define M_ARM 103
-
-#ifndef LIBRARY_START_TEXT
-#define LIBRARY_START_TEXT (0x00c00000)
-#endif
-
-#endif /* __A_OUT_GNU_H__ */
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index a39cfc2a1f90..9cbe70c8b0ef 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -357,7 +357,8 @@ ENDPROC(__pabt_svc)
.endm
.macro kuser_cmpxchg_check
-#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
+ !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
@@ -742,6 +743,18 @@ ENDPROC(__switch_to)
#endif
.endm
+ .macro kuser_pad, sym, size
+ .if (. - \sym) & 3
+ .rept 4 - (. - \sym) & 3
+ .byte 0
+ .endr
+ .endif
+ .rept (\size - (. - \sym)) / 4
+ .word 0xe7fddef1
+ .endr
+ .endm
+
+#ifdef CONFIG_KUSER_HELPERS
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@@ -832,18 +845,13 @@ kuser_cmpxchg64_fixup:
#error "incoherent kernel configuration"
#endif
- /* pad to next slot */
- .rept (16 - (. - __kuser_cmpxchg64)/4)
- .word 0
- .endr
-
- .align 5
+ kuser_pad __kuser_cmpxchg64, 64
__kuser_memory_barrier: @ 0xffff0fa0
smp_dmb arm
usr_ret lr
- .align 5
+ kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0
@@ -916,13 +924,14 @@ kuser_cmpxchg32_fixup:
#endif
- .align 5
+ kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
usr_ret lr
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
- .rep 4
+ kuser_pad __kuser_get_tls, 16
+ .rep 3
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
@@ -932,14 +941,16 @@ __kuser_helper_version: @ 0xffff0ffc
.globl __kuser_helper_end
__kuser_helper_end:
+#endif
+
THUMB( .thumb )
/*
* Vector stubs.
*
- * This code is copied to 0xffff0200 so we can use branches in the
- * vectors, rather than ldr's. Note that this code must not
- * exceed 0x300 bytes.
+ * This code is copied to 0xffff1000 so we can use branches in the
+ * vectors, rather than ldr's. Note that this code must not exceed
+ * a page size.
*
* Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
@@ -986,8 +997,17 @@ ENDPROC(vector_\name)
1:
.endm
- .globl __stubs_start
+ .section .stubs, "ax", %progbits
__stubs_start:
+ @ This must be the first word
+ .word vector_swi
+
+vector_rst:
+ ARM( swi SYS_ERROR0 )
+ THUMB( svc #0 )
+ THUMB( nop )
+ b vector_und
+
/*
* Interrupt dispatcher
*/
@@ -1082,6 +1102,16 @@ __stubs_start:
.align 5
/*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen, and won't happen in 32-bit data mode).
+ */
+
+vector_addrexcptn:
+ b vector_addrexcptn
+
+/*=============================================================================
* Undefined FIQs
*-----------------------------------------------------------------------------
* Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
@@ -1094,45 +1124,19 @@ __stubs_start:
vector_fiq:
subs pc, lr, #4
-/*=============================================================================
- * Address exception handler
- *-----------------------------------------------------------------------------
- * These aren't too critical.
- * (they're not supposed to happen, and won't happen in 32-bit data mode).
- */
-
-vector_addrexcptn:
- b vector_addrexcptn
-
-/*
- * We group all the following data together to optimise
- * for CPUs with separate I & D caches.
- */
- .align 5
-
-.LCvswi:
- .word vector_swi
-
- .globl __stubs_end
-__stubs_end:
-
- .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
+ .globl vector_fiq_offset
+ .equ vector_fiq_offset, vector_fiq
- .globl __vectors_start
+ .section .vectors, "ax", %progbits
__vectors_start:
- ARM( swi SYS_ERROR0 )
- THUMB( svc #0 )
- THUMB( nop )
- W(b) vector_und + stubs_offset
- W(ldr) pc, .LCvswi + stubs_offset
- W(b) vector_pabt + stubs_offset
- W(b) vector_dabt + stubs_offset
- W(b) vector_addrexcptn + stubs_offset
- W(b) vector_irq + stubs_offset
- W(b) vector_fiq + stubs_offset
-
- .globl __vectors_end
-__vectors_end:
+ W(b) vector_rst
+ W(b) vector_und
+ W(ldr) pc, __vectors_start + 0x1000
+ W(b) vector_pabt
+ W(b) vector_dabt
+ W(b) vector_addrexcptn
+ W(b) vector_irq
+ W(b) vector_fiq
.data
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index e00621f1403f..52b26432c9a9 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -49,7 +49,7 @@ __irq_entry:
mov r1, sp
stmdb sp!, {lr}
@ routine called with r0 = irq number, r1 = struct pt_regs *
- bl nvic_do_IRQ
+ bl nvic_handle_irq
pop {lr}
@
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 2adda11f712f..918875d96d5d 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -47,6 +47,11 @@
#include <asm/irq.h>
#include <asm/traps.h>
+#define FIQ_OFFSET ({ \
+ extern void *vector_fiq_offset; \
+ (unsigned)&vector_fiq_offset; \
+ })
+
static unsigned long no_fiq_insn;
/* Default reacquire function
@@ -79,14 +84,14 @@ int show_fiq_list(struct seq_file *p, int prec)
void set_fiq_handler(void *start, unsigned int length)
{
-#if defined(CONFIG_CPU_USE_DOMAINS)
- memcpy((void *)0xffff001c, start, length);
-#else
- memcpy(vectors_page + 0x1c, start, length);
-#endif
- flush_icache_range(0xffff001c, 0xffff001c + length);
- if (!vectors_high())
- flush_icache_range(0x1c, 0x1c + length);
+ void *base = vectors_page;
+ unsigned offset = FIQ_OFFSET;
+
+ memcpy(base + offset, start, length);
+ if (!cache_is_vipt_nonaliasing())
+ flush_icache_range((unsigned long)base + offset, offset +
+ length);
+ flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
}
int claim_fiq(struct fiq_handler *f)
@@ -144,6 +149,7 @@ EXPORT_SYMBOL(disable_fiq);
void __init init_FIQ(int start)
{
- no_fiq_insn = *(unsigned long *)0xffff001c;
+ unsigned offset = FIQ_OFFSET;
+ no_fiq_insn = *(unsigned long *)(0xffff0000 + offset);
fiq_start = start;
}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index b361de143756..14235ba64a90 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -87,6 +87,7 @@ ENTRY(stext)
ENDPROC(stext)
#ifdef CONFIG_SMP
+ .text
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 9cf6063020ae..2c7cc1e03473 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -343,6 +343,7 @@ __turn_mmu_on_loc:
.long __turn_mmu_on_end
#if defined(CONFIG_SMP)
+ .text
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 4910232c4833..797b1a6a4906 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -56,8 +56,8 @@ ENTRY(__boot_cpu_mode)
ldr \reg3, [\reg2]
ldr \reg1, [\reg2, \reg3]
cmp \mode, \reg1 @ matches primary CPU boot mode?
- orrne r7, r7, #BOOT_CPU_MODE_MISMATCH
- strne r7, [r5, r6] @ record what happened and give up
+ orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH
+ strne \reg1, [\reg2, \reg3] @ record what happened and give up
.endm
#else /* ZIMAGE */
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 4fb074c446bf..57221e349a7c 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -15,6 +15,7 @@
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/mach-types.h>
+#include <asm/smp_plat.h>
#include <asm/system_misc.h>
extern const unsigned char relocate_new_kernel[];
@@ -39,6 +40,14 @@ int machine_kexec_prepare(struct kimage *image)
int i, err;
/*
+ * Validate that if the current HW supports SMP, then the SW supports
+ * and implements CPU hotplug for the current HW. If not, we won't be
+ * able to kexec reliably, so fail the prepare operation.
+ */
+ if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug())
+ return -EINVAL;
+
+ /*
* No segment at default ATAGs address. try to locate
* a dtb using magic.
*/
@@ -73,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused)
crash_save_cpu(&regs, smp_processor_id());
flush_cache_all();
+ set_cpu_online(smp_processor_id(), false);
atomic_dec(&waiting_for_crash_ipi);
while (1)
cpu_relax();
@@ -134,10 +144,13 @@ void machine_kexec(struct kimage *image)
unsigned long reboot_code_buffer_phys;
void *reboot_code_buffer;
- if (num_online_cpus() > 1) {
- pr_err("kexec: error: multiple CPUs still online\n");
- return;
- }
+ /*
+ * This can only happen if machine_shutdown() failed to disable some
+ * CPU, and that can only happen if the checks in
+ * machine_kexec_prepare() were not correct. If this fails, we can't
+ * reliably kexec anyway, so BUG_ON is appropriate.
+ */
+ BUG_ON(num_online_cpus() > 1);
page_list = image->head & PAGE_MASK;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d9f5cd4e533f..e186ee1e63f6 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
static int
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*event_map)[config];
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+
+ mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
+ if (is_software_event(event))
+ return 1;
+
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index d3ca4f6915af..94f6b05f9e24 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -197,6 +197,7 @@ void machine_shutdown(void)
*/
void machine_halt(void)
{
+ local_irq_disable();
smp_send_stop();
local_irq_disable();
@@ -211,6 +212,7 @@ void machine_halt(void)
*/
void machine_power_off(void)
{
+ local_irq_disable();
smp_send_stop();
if (pm_power_off)
@@ -230,6 +232,7 @@ void machine_power_off(void)
*/
void machine_restart(char *cmd)
{
+ local_irq_disable();
smp_send_stop();
arm_pm_restart(reboot_mode, cmd);
@@ -426,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
}
#ifdef CONFIG_MMU
+#ifdef CONFIG_KUSER_HELPERS
/*
* The vectors page is always readable from user space for the
- * atomic helpers and the signal restart code. Insert it into the
- * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
+ * atomic helpers. Insert it into the gate_vma so that it is visible
+ * through ptrace and /proc/<pid>/mem.
*/
static struct vm_area_struct gate_vma = {
.vm_start = 0xffff0000,
@@ -458,9 +462,48 @@ int in_gate_area_no_mm(unsigned long addr)
{
return in_gate_area(NULL, addr);
}
+#define is_gate_vma(vma) ((vma) == &gate_vma)
+#else
+#define is_gate_vma(vma) 0
+#endif
const char *arch_vma_name(struct vm_area_struct *vma)
{
- return (vma == &gate_vma) ? "[vectors]" : NULL;
+ return is_gate_vma(vma) ? "[vectors]" :
+ (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
+ "[sigpage]" : NULL;
+}
+
+static struct page *signal_page;
+extern struct page *get_signal_page(void);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr;
+ int ret;
+
+ if (!signal_page)
+ signal_page = get_signal_page();
+ if (!signal_page)
+ return -ENOMEM;
+
+ down_write(&mm->mmap_sem);
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
+ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &signal_page);
+
+ if (ret == 0)
+ mm->context.sigpage = addr;
+
+ up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
}
#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 63af9a7ae512..afc2489ee13b 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -836,6 +836,8 @@ static int __init meminfo_cmp(const void *_a, const void *_b)
void __init hyp_mode_check(void)
{
#ifdef CONFIG_ARM_VIRT_EXT
+ sync_boot_mode();
+
if (is_hyp_mode_available()) {
pr_info("CPU: All CPU(s) started in HYP mode.\n");
pr_info("CPU: Virtualization extensions available.\n");
@@ -971,6 +973,7 @@ static const char *hwcap_str[] = {
"vfpv4",
"idiva",
"idivt",
+ "vfpd32",
"lpae",
NULL
};
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 1c16c35c271a..ab3304225272 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
+#include <linux/random.h>
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/uaccess.h>
@@ -15,12 +16,11 @@
#include <asm/elf.h>
#include <asm/cacheflush.h>
+#include <asm/traps.h>
#include <asm/ucontext.h>
#include <asm/unistd.h>
#include <asm/vfp.h>
-#include "signal.h"
-
/*
* For ARM syscalls, we encode the syscall number into the instruction.
*/
@@ -40,11 +40,13 @@
#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-const unsigned long sigreturn_codes[7] = {
+static const unsigned long sigreturn_codes[7] = {
MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
};
+static unsigned long signal_return_offset;
+
#ifdef CONFIG_CRUNCH
static int preserve_crunch_context(struct crunch_sigframe __user *frame)
{
@@ -400,14 +402,20 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
__put_user(sigreturn_codes[idx+1], rc+1))
return 1;
- if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) {
+#ifdef CONFIG_MMU
+ if (cpsr & MODE32_BIT) {
+ struct mm_struct *mm = current->mm;
+
/*
- * 32-bit code can use the new high-page
- * signal return code support except when the MPU has
- * protected the vectors page from PL0
+ * 32-bit code can use the signal return page
+ * except when the MPU has protected the vectors
+ * page from PL0
*/
- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
- } else {
+ retcode = mm->context.sigpage + signal_return_offset +
+ (idx << 2) + thumb;
+ } else
+#endif
+ {
/*
* Ensure that the instruction cache sees
* the return code written onto the stack.
@@ -608,3 +616,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} while (thread_flags & _TIF_WORK_MASK);
return 0;
}
+
+struct page *get_signal_page(void)
+{
+ unsigned long ptr;
+ unsigned offset;
+ struct page *page;
+ void *addr;
+
+ page = alloc_pages(GFP_KERNEL, 0);
+
+ if (!page)
+ return NULL;
+
+ addr = page_address(page);
+
+ /* Give the signal return code some randomness */
+ offset = 0x200 + (get_random_int() & 0x7fc);
+ signal_return_offset = offset;
+
+ /*
+ * Copy signal return handlers into the vector page, and
+ * set sigreturn to be a pointer to these.
+ */
+ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
+
+ ptr = (unsigned long)addr + offset;
+ flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+
+ return page;
+}
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
deleted file mode 100644
index 5ff067b7c752..000000000000
--- a/arch/arm/kernel/signal.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * linux/arch/arm/kernel/signal.h
- *
- * Copyright (C) 2005-2009 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
-
-extern const unsigned long sigreturn_codes[7];
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index c2b4f8f0be9a..2dc19349eb19 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle)
return -ENOSYS;
}
+int platform_can_cpu_hotplug(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ if (smp_ops.cpu_kill)
+ return 1;
+#endif
+
+ return 0;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
static void percpu_timer_stop(void);
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index a98b62dca2fa..c2edfff573c2 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -70,23 +70,6 @@ static inline void ipi_flush_bp_all(void *ignored)
local_flush_bp_all();
}
-#ifdef CONFIG_ARM_ERRATA_798181
-static int erratum_a15_798181(void)
-{
- unsigned int midr = read_cpuid_id();
-
- /* Cortex-A15 r0p0..r3p2 affected */
- if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
- return 0;
- return 1;
-}
-#else
-static int erratum_a15_798181(void)
-{
- return 0;
-}
-#endif
-
static void ipi_flush_tlb_a15_erratum(void *arg)
{
dmb();
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cab094c234ee..ab517fcce21b 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -35,8 +35,6 @@
#include <asm/tls.h>
#include <asm/system_misc.h>
-#include "signal.h"
-
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
void *vectors_page;
@@ -800,15 +798,26 @@ void __init trap_init(void)
return;
}
-static void __init kuser_get_tls_init(unsigned long vectors)
+#ifdef CONFIG_KUSER_HELPERS
+static void __init kuser_init(void *vectors)
{
+ extern char __kuser_helper_start[], __kuser_helper_end[];
+ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+
+ memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+
/*
* vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/
if (tls_emu || has_tls_reg)
- memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
+ memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
}
+#else
+static void __init kuser_init(void *vectors)
+{
+}
+#endif
void __init early_trap_init(void *vectors_base)
{
@@ -816,33 +825,30 @@ void __init early_trap_init(void *vectors_base)
unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
- extern char __kuser_helper_start[], __kuser_helper_end[];
- int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned i;
vectors_page = vectors_base;
/*
+ * Poison the vectors page with an undefined instruction. This
+ * instruction is chosen to be undefined for both ARM and Thumb
+ * ISAs. The Thumb version is an undefined instruction with a
+ * branch back to the undefined instruction.
+ */
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
+ ((u32 *)vectors_base)[i] = 0xe7fddef1;
+
+ /*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
- memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
- memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+ memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
- /*
- * Do processor specific fixups for the kuser helpers
- */
- kuser_get_tls_init(vectors);
-
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
- sigreturn_codes, sizeof(sigreturn_codes));
+ kuser_init(vectors_base);
- flush_icache_range(vectors, vectors + PAGE_SIZE);
+ flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
#else /* ifndef CONFIG_CPU_V7M */
/*
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index fa25e4e425f6..7bcee5c9b604 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -148,6 +148,23 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
#endif
+ /*
+ * The vectors and stubs are relocatable code, and the
+ * only thing that matters is their relative offsets
+ */
+ __vectors_start = .;
+ .vectors 0 : AT(__vectors_start) {
+ *(.vectors)
+ }
+ . = __vectors_start + SIZEOF(.vectors);
+ __vectors_end = .;
+
+ __stubs_start = .;
+ .stubs 0x1000 : AT(__stubs_start) {
+ *(.stubs)
+ }
+ . = __stubs_start + SIZEOF(.stubs);
+ __stubs_end = .;
INIT_TEXT_SECTION(8)
.exit.text : {
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 4a5199070430..db9cf692d4dd 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
#define access_pmintenclr pm_fake
/* Architected CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ * registers preceding 32-bit ones.
*/
static const struct coproc_reg cp15_regs[] = {
/* CSSELR: swapped by interrupt.S. */
@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = {
NULL, reset_unknown, c0_CSSELR },
/* TTBR0/TTBR1: swapped by interrupt.S. */
- { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
- { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
+ { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
+ { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
/* TTBCR: swapped by interrupt.S. */
{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
@@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = {
NULL, reset_unknown, c6_IFAR },
/* PAR swapped by interrupt.S */
- { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
+ { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
/*
* DC{C,I,CI}SW operations:
@@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params)
| KVM_REG_ARM_OPC1_MASK))
return false;
params->is_64bit = true;
- params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
+ /* CRm to CRn: see cp15_to_index for details */
+ params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
>> KVM_REG_ARM_CRM_SHIFT);
params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
>> KVM_REG_ARM_OPC1_SHIFT);
params->Op2 = 0;
- params->CRn = 0;
+ params->CRm = 0;
return true;
default:
return false;
@@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg)
if (reg->is_64) {
val |= KVM_REG_SIZE_U64;
val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
- val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
+ /*
+ * CRn always denotes the primary coproc. reg. nr. for the
+ * in-kernel representation, but the user space API uses the
+ * CRm for the encoding, because it is modelled after the
+ * MRRC/MCRR instructions: see the ARM ARM rev. c page
+ * B3-1445
+ */
+ val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
} else {
val |= KVM_REG_SIZE_U32;
val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index b7301d3e4799..0461d5c8d3de 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
return -1;
if (i1->CRn != i2->CRn)
return i1->CRn - i2->CRn;
+ if (i1->is_64 != i2->is_64)
+ return i2->is_64 - i1->is_64;
if (i1->CRm != i2->CRm)
return i1->CRm - i2->CRm;
if (i1->Op1 != i2->Op1)
@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1,
#define CRn(_x) .CRn = _x
#define CRm(_x) .CRm = _x
+#define CRm64(_x) .CRn = _x, .CRm = 0
#define Op1(_x) .Op1 = _x
#define Op2(_x) .Op2 = _x
#define is64 .is_64 = true
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index 685063a6d0cf..cf93472b9dd6 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
/*
* A15-specific CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ * registers preceding 32-bit ones.
*/
static const struct coproc_reg a15_regs[] = {
/* MPIDR: we use VMPIDR for guest access. */
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index b8e06b7a2833..0c25d9487d53 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_exit_mmio *mmio)
{
- unsigned long rt, len;
+ unsigned long rt;
+ int len;
bool is_write, sign_extend;
if (kvm_vcpu_dabt_isextabt(vcpu)) {
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index ca6bea4859b4..0988d9e04dd4 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
return p;
}
+static bool page_empty(void *ptr)
+{
+ struct page *ptr_page = virt_to_page(ptr);
+ return page_count(ptr_page) == 1;
+}
+
static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
pmd_t *pmd_table = pmd_offset(pud, 0);
@@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
put_page(virt_to_page(pmd));
}
-static bool pmd_empty(pmd_t *pmd)
-{
- struct page *pmd_page = virt_to_page(pmd);
- return page_count(pmd_page) == 1;
-}
-
static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
{
if (pte_present(*pte)) {
@@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
}
}
-static bool pte_empty(pte_t *pte)
-{
- struct page *pte_page = virt_to_page(pte);
- return page_count(pte_page) == 1;
-}
-
static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
unsigned long long start, u64 size)
{
@@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
pmd_t *pmd;
pte_t *pte;
unsigned long long addr = start, end = start + size;
- u64 range;
+ u64 next;
while (addr < end) {
pgd = pgdp + pgd_index(addr);
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
- addr += PUD_SIZE;
+ addr = pud_addr_end(addr, end);
continue;
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
- addr += PMD_SIZE;
+ addr = pmd_addr_end(addr, end);
continue;
}
pte = pte_offset_kernel(pmd, addr);
clear_pte_entry(kvm, pte, addr);
- range = PAGE_SIZE;
+ next = addr + PAGE_SIZE;
/* If we emptied the pte, walk back up the ladder */
- if (pte_empty(pte)) {
+ if (page_empty(pte)) {
clear_pmd_entry(kvm, pmd, addr);
- range = PMD_SIZE;
- if (pmd_empty(pmd)) {
+ next = pmd_addr_end(addr, end);
+ if (page_empty(pmd) && !page_empty(pud)) {
clear_pud_entry(kvm, pud, addr);
- range = PUD_SIZE;
+ next = pud_addr_end(addr, end);
}
}
- addr += range;
+ addr = next;
}
}
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index 2abee6626aac..916e5a142917 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -227,6 +227,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk),
CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk),
CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk),
+ CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk),
CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk),
CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk),
CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk),
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index dff4ddc5ef81..139e42da25f0 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW_SYNDROME,
+ .ecc_bits = 4,
.bbt_options = NAND_BBT_USE_FLASH,
};
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index a33686a6fbb2..fa4bfaf952d8 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = {
.parts = davinci_evm_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.bbt_options = NAND_BBT_USE_FLASH,
.timing = &davinci_evm_nandflash_timing,
};
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index fbb8e5ab1dc1..0c005e876cac 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.options = 0,
};
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 2bc112adf565..808233b60e3d 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
.parts = davinci_ntosd2_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.bbt_options = NAND_BBT_USE_FLASH,
};
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 614e41e7881b..905efc8cac79 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -121,8 +121,7 @@ config MSM_SMD
bool
config MSM_GPIOMUX
- depends on !(ARCH_MSM8X60 || ARCH_MSM8960)
- bool "MSM V1 TLMM GPIOMUX architecture"
+ bool
help
Support for MSM V1 TLMM GPIOMUX architecture.
diff --git a/arch/arm/mach-msm/gpiomux-v1.c b/arch/arm/mach-msm/gpiomux-v1.c
deleted file mode 100644
index 27de2abd7144..000000000000
--- a/arch/arm/mach-msm/gpiomux-v1.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include <linux/kernel.h>
-#include "gpiomux.h"
-#include "proc_comm.h"
-
-void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val)
-{
- unsigned tlmm_config = (val & ~GPIOMUX_CTL_MASK) |
- ((gpio & 0x3ff) << 4);
- unsigned tlmm_disable = 0;
- int rc;
-
- rc = msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
- &tlmm_config, &tlmm_disable);
- if (rc)
- pr_err("%s: unexpected proc_comm failure %d: %08x %08x\n",
- __func__, rc, tlmm_config, tlmm_disable);
-}
diff --git a/arch/arm/mach-msm/gpiomux.h b/arch/arm/mach-msm/gpiomux.h
index 8e82f41a8923..4410d7766f93 100644
--- a/arch/arm/mach-msm/gpiomux.h
+++ b/arch/arm/mach-msm/gpiomux.h
@@ -73,16 +73,6 @@ extern struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS];
int msm_gpiomux_write(unsigned gpio,
gpiomux_config_t active,
gpiomux_config_t suspended);
-
-/* Architecture-internal function for use by the framework only.
- * This function can assume the following:
- * - the gpio value has passed a bounds-check
- * - the gpiomux spinlock has been obtained
- *
- * This function is not for public consumption. External users
- * should use msm_gpiomux_write.
- */
-void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val);
#else
static inline int msm_gpiomux_write(unsigned gpio,
gpiomux_config_t active,
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index f6eeb87e4e95..827d15009a86 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -122,11 +122,7 @@ static struct musb_hdrc_config musb_config = {
};
static struct musb_hdrc_platform_data tusb_data = {
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
.mode = MUSB_OTG,
-#else
- .mode = MUSB_HOST,
-#endif
.set_power = tusb_set_power,
.min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */
.power = 100, /* Max 100 mA VBUS for host mode */
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index d2ea68ea678a..7735105561d8 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -85,7 +85,7 @@ static struct omap_board_mux board_mux[] __initdata = {
static struct omap_musb_board_data musb_board_data = {
.interface_type = MUSB_INTERFACE_ULPI,
- .mode = MUSB_PERIPHERAL,
+ .mode = MUSB_OTG,
.power = 0,
};
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index 393aeefaebb0..043e5705f2a6 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -42,7 +42,7 @@
/* Using generic display panel */
static struct tfp410_platform_data omap4_dvi_panel = {
- .i2c_bus_num = 3,
+ .i2c_bus_num = 2,
.power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO,
};
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 5cc92874be7e..f99f68e1e85b 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -129,6 +129,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
const char *oh_name;
int oh_cnt, i, ret = 0;
+ bool device_active = false;
oh_cnt = of_property_count_strings(node, "ti,hwmods");
if (oh_cnt <= 0) {
@@ -152,6 +153,8 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
goto odbfd_exit1;
}
hwmods[i] = oh;
+ if (oh->flags & HWMOD_INIT_NO_IDLE)
+ device_active = true;
}
od = omap_device_alloc(pdev, hwmods, oh_cnt);
@@ -172,6 +175,11 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
pdev->dev.pm_domain = &omap_device_pm_domain;
+ if (device_active) {
+ omap_device_enable(pdev);
+ pm_runtime_set_active(&pdev->dev);
+ }
+
odbfd_exit1:
kfree(hwmods);
odbfd_exit:
@@ -842,6 +850,7 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_device *od = to_omap_device(pdev);
+ int i;
if (!od)
return 0;
@@ -850,6 +859,15 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
* If omap_device state is enabled, but has no driver bound,
* idle it.
*/
+
+ /*
+ * Some devices (like memory controllers) are always kept
+ * enabled, and should not be idled even with no drivers.
+ */
+ for (i = 0; i < od->hwmods_cnt; i++)
+ if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE)
+ return 0;
+
if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) {
if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
dev_warn(dev, "%s: enabled but no driver. Idling\n",
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 7341eff63f56..7f4db12b1459 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2386,7 +2386,7 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
if (np)
- va_start = of_iomap(np, 0);
+ va_start = of_iomap(np, oh->mpu_rt_idx);
} else {
va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index aab33fd814c0..e1482a9b3bc2 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -95,6 +95,54 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3;
#define MODULEMODE_HWCTRL 1
#define MODULEMODE_SWCTRL 2
+#define DEBUG_OMAP2UART1_FLAGS 0
+#define DEBUG_OMAP2UART2_FLAGS 0
+#define DEBUG_OMAP2UART3_FLAGS 0
+#define DEBUG_OMAP3UART3_FLAGS 0
+#define DEBUG_OMAP3UART4_FLAGS 0
+#define DEBUG_OMAP4UART3_FLAGS 0
+#define DEBUG_OMAP4UART4_FLAGS 0
+#define DEBUG_TI81XXUART1_FLAGS 0
+#define DEBUG_TI81XXUART2_FLAGS 0
+#define DEBUG_TI81XXUART3_FLAGS 0
+#define DEBUG_AM33XXUART1_FLAGS 0
+
+#define DEBUG_OMAPUART_FLAGS (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET)
+
+#if defined(CONFIG_DEBUG_OMAP2UART1)
+#undef DEBUG_OMAP2UART1_FLAGS
+#define DEBUG_OMAP2UART1_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP2UART2)
+#undef DEBUG_OMAP2UART2_FLAGS
+#define DEBUG_OMAP2UART2_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP2UART3)
+#undef DEBUG_OMAP2UART3_FLAGS
+#define DEBUG_OMAP2UART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP3UART3)
+#undef DEBUG_OMAP3UART3_FLAGS
+#define DEBUG_OMAP3UART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP3UART4)
+#undef DEBUG_OMAP3UART4_FLAGS
+#define DEBUG_OMAP3UART4_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP4UART3)
+#undef DEBUG_OMAP4UART3_FLAGS
+#define DEBUG_OMAP4UART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP4UART4)
+#undef DEBUG_OMAP4UART4_FLAGS
+#define DEBUG_OMAP4UART4_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_TI81XXUART1)
+#undef DEBUG_TI81XXUART1_FLAGS
+#define DEBUG_TI81XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_TI81XXUART2)
+#undef DEBUG_TI81XXUART2_FLAGS
+#define DEBUG_TI81XXUART2_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_TI81XXUART3)
+#undef DEBUG_TI81XXUART3_FLAGS
+#define DEBUG_TI81XXUART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_AM33XXUART1)
+#undef DEBUG_AM33XXUART1_FLAGS
+#define DEBUG_AM33XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
+#endif
/**
* struct omap_hwmod_mux_info - hwmod specific mux configuration
@@ -568,6 +616,7 @@ struct omap_hwmod_link {
* @voltdm: pointer to voltage domain (filled in at runtime)
* @dev_attr: arbitrary device attributes that can be passed to the driver
* @_sysc_cache: internal-use hwmod flags
+ * @mpu_rt_idx: index of device address space for register target (for DT boot)
* @_mpu_rt_va: cached register target start address (internal use)
* @_mpu_port: cached MPU register target slave (internal use)
* @opt_clks_cnt: number of @opt_clks
@@ -617,6 +666,7 @@ struct omap_hwmod {
struct list_head node;
struct omap_hwmod_ocp_if *_mpu_port;
u16 flags;
+ u8 mpu_rt_idx;
u8 response_lat;
u8 rst_lines_cnt;
u8 opt_clks_cnt;
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index d05fc7b54567..56cebb05509e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -512,7 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = {
.mpu_irqs = omap2_uart1_mpu_irqs,
.sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP2UART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@@ -532,7 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = {
.mpu_irqs = omap2_uart2_mpu_irqs,
.sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP2UART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@@ -552,7 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = {
.mpu_irqs = omap2_uart3_mpu_irqs,
.sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP2UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 28bbd56346a9..eb2f3b93b51c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -562,6 +562,7 @@ static struct omap_hwmod am33xx_cpgmac0_hwmod = {
.clkdm_name = "cpsw_125mhz_clkdm",
.flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
.main_clk = "cpsw_125mhz_gclk",
+ .mpu_rt_idx = 1,
.prcm = {
.omap4 = {
.clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET,
@@ -1512,7 +1513,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
.name = "uart1",
.class = &uart_class,
.clkdm_name = "l4_wkup_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.main_clk = "dpll_per_m2_div4_wkupdm_ck",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index f7a3df2fb579..0c3a427da544 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -490,7 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = {
.mpu_irqs = omap2_uart1_mpu_irqs,
.sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_TI81XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@@ -509,7 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = {
.mpu_irqs = omap2_uart2_mpu_irqs,
.sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_TI81XXUART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@@ -528,7 +528,8 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
.mpu_irqs = omap2_uart3_mpu_irqs,
.sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP3UART3_FLAGS | DEBUG_TI81XXUART3_FLAGS |
+ HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = OMAP3430_PER_MOD,
@@ -558,7 +559,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = {
.mpu_irqs = uart4_mpu_irqs,
.sdma_reqs = uart4_sdma_reqs,
.main_clk = "uart4_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP3UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = OMAP3430_PER_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index d04b5e60fdbe..9c3b504477d7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2858,8 +2858,7 @@ static struct omap_hwmod omap44xx_uart3_hwmod = {
.name = "uart3",
.class = &omap44xx_uart_hwmod_class,
.clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET |
- HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP4UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
@@ -2875,7 +2874,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {
.name = "uart4",
.class = &omap44xx_uart_hwmod_class,
.clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP4UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index f37ae96b70a1..3c70f5c1860f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -1375,7 +1375,7 @@ static struct omap_hwmod omap54xx_uart3_hwmod = {
.name = "uart3",
.class = &omap54xx_uart_hwmod_class,
.clkdm_name = "l4per_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .flags = DEBUG_OMAP4UART3_FLAGS,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
@@ -1391,6 +1391,7 @@ static struct omap_hwmod omap54xx_uart4_hwmod = {
.name = "uart4",
.class = &omap54xx_uart_hwmod_class,
.clkdm_name = "l4per_clkdm",
+ .flags = DEBUG_OMAP4UART4_FLAGS,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 3a674de6cb63..a388f8c1bcb3 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -208,17 +208,6 @@ static int __init omap_serial_early_init(void)
pr_info("%s used as console in debug mode: uart%d clocks will not be gated",
uart_name, uart->num);
}
-
- /*
- * omap-uart can be used for earlyprintk logs
- * So if omap-uart is used as console then prevent
- * uart reset and idle to get logs from omap-uart
- * until uart console driver is available to take
- * care for console messages.
- * Idling or resetting omap-uart while printing logs
- * early boot logs can stall the boot-up.
- */
- oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
}
} while (1);
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 8c4de2708cf2..bc897231bd10 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -38,11 +38,8 @@ static struct musb_hdrc_config musb_config = {
};
static struct musb_hdrc_platform_data musb_plat = {
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
.mode = MUSB_OTG,
-#else
- .mode = MUSB_HOST,
-#endif
+
/* .clock is set dynamically */
.config = &musb_config,
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index e115f6742107..c5be60d85e4b 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -1162,9 +1162,6 @@ static void __init eva_init(void)
gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */
gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */
- /* Touchscreen */
- gpio_request_one(166, GPIOF_OUT_INIT_HIGH, NULL); /* TP_RST_B */
-
/* GETHER */
gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index d5554646916c..3354a85c90f7 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -167,7 +167,13 @@ static const struct pinctrl_map bockw_pinctrl_map[] = {
"usb1", "usb1"),
/* SDHI0 */
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
- "sdhi0", "sdhi0"),
+ "sdhi0_data4", "sdhi0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
+ "sdhi0_ctrl", "sdhi0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
+ "sdhi0_cd", "sdhi0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
+ "sdhi0_wp", "sdhi0"),
};
#define FPGA 0x18200000
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index d73e21d3ea8a..8d6bd5c5efb9 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -59,7 +59,7 @@ static __initdata struct gpio_led_platform_data lager_leds_pdata = {
#define GPIO_KEY(c, g, d, ...) \
{ .code = c, .gpio = g, .desc = d, .active_low = 1 }
-static __initdata struct gpio_keys_button gpio_buttons[] = {
+static struct gpio_keys_button gpio_buttons[] = {
GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"),
GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"),
GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"),
diff --git a/arch/arm/mach-sti/headsmp.S b/arch/arm/mach-sti/headsmp.S
index 78ebc7559f53..4c09bae86edf 100644
--- a/arch/arm/mach-sti/headsmp.S
+++ b/arch/arm/mach-sti/headsmp.S
@@ -16,8 +16,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
- __INIT
-
/*
* ST specific entry point for secondary CPUs. This provides
* a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 6cacdc8dd654..cd2c88e7a8f7 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -421,24 +421,28 @@ config CPU_32v3
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v4
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v4T
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v5
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v6
bool
@@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE
config TLS_REG_EMUL
bool
+ select NEED_KUSER_HELPERS
help
An SMP system using a pre-ARMv6 processor (there are apparently
a few prototypes like that in existence) and therefore access to
@@ -783,11 +788,43 @@ config TLS_REG_EMUL
config NEEDS_SYSCALL_FOR_CMPXCHG
bool
+ select NEED_KUSER_HELPERS
help
SMP on a pre-ARMv6 processor? Well OK then.
Forget about fast user space cmpxchg support.
It is just not possible.
+config NEED_KUSER_HELPERS
+ bool
+
+config KUSER_HELPERS
+ bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+ default y
+ help
+ Warning: disabling this option may break user programs.
+
+ Provide kuser helpers in the vector page. The kernel provides
+ helper code to userspace in read only form at a fixed location
+ in the high vector page to allow userspace to be independent of
+ the CPU type fitted to the system. This permits binaries to be
+ run on ARMv4 through to ARMv7 without modification.
+
+ See Documentation/arm/kernel_user_helpers.txt for details.
+
+ However, the fixed address nature of these helpers can be used
+ by ROP (return orientated programming) authors when creating
+ exploits.
+
+ If all of the binaries and libraries which run on your platform
+ are built specifically for your platform, and make no use of
+ these helpers, then you can turn this option off to hinder
+ such exploits. However, in that case, if a binary or library
+ relying on those helpers is run, it will receive a SIGILL signal,
+ which will terminate the program.
+
+ Say N here only if you are absolutely certain that you do not
+ need these helpers; otherwise, the safe option is to say Y.
+
config DMA_CACHE_RWFO
bool "Enable read/write for ownership DMA cache maintenance"
depends on CPU_V6K && SMP
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b55b1015724b..4a0544492f10 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -245,7 +245,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
local_flush_bp_all();
local_flush_tlb_all();
- dummy_flush_tlb_a15_erratum();
+ if (erratum_a15_798181())
+ dummy_flush_tlb_a15_erratum();
}
atomic64_set(&per_cpu(active_asids, cpu), asid);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4f56617a2392..53cdbd39ec8e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -989,6 +989,7 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
void __init sanity_check_meminfo(void)
{
+ phys_addr_t memblock_limit = 0;
int i, j, highmem = 0;
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
@@ -1052,9 +1053,32 @@ void __init sanity_check_meminfo(void)
bank->size = size_limit;
}
#endif
- if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
- arm_lowmem_limit = bank->start + bank->size;
+ if (!bank->highmem) {
+ phys_addr_t bank_end = bank->start + bank->size;
+ if (bank_end > arm_lowmem_limit)
+ arm_lowmem_limit = bank_end;
+
+ /*
+ * Find the first non-section-aligned page, and point
+ * memblock_limit at it. This relies on rounding the
+ * limit down to be section-aligned, which happens at
+ * the end of this function.
+ *
+ * With this algorithm, the start or end of almost any
+ * bank can be non-section-aligned. The only exception
+ * is that the start of the bank 0 must be section-
+ * aligned, since otherwise memory would need to be
+ * allocated when mapping the start of bank 0, which
+ * occurs before any free memory is mapped.
+ */
+ if (!memblock_limit) {
+ if (!IS_ALIGNED(bank->start, SECTION_SIZE))
+ memblock_limit = bank->start;
+ else if (!IS_ALIGNED(bank_end, SECTION_SIZE))
+ memblock_limit = bank_end;
+ }
+ }
j++;
}
#ifdef CONFIG_HIGHMEM
@@ -1079,7 +1103,18 @@ void __init sanity_check_meminfo(void)
#endif
meminfo.nr_banks = j;
high_memory = __va(arm_lowmem_limit - 1) + 1;
- memblock_set_current_limit(arm_lowmem_limit);
+
+ /*
+ * Round the memblock limit down to a section size. This
+ * helps to ensure that we will allocate memory from the
+ * last full section, which should be mapped.
+ */
+ if (memblock_limit)
+ memblock_limit = round_down(memblock_limit, SECTION_SIZE);
+ if (!memblock_limit)
+ memblock_limit = arm_lowmem_limit;
+
+ memblock_set_current_limit(memblock_limit);
}
static inline void prepare_page_table(void)
@@ -1160,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
/*
* Allocate the vector page early.
*/
- vectors = early_alloc(PAGE_SIZE);
+ vectors = early_alloc(PAGE_SIZE * 2);
early_trap_init(vectors);
@@ -1205,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
map.pfn = __phys_to_pfn(virt_to_phys(vectors));
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
+#ifdef CONFIG_KUSER_HELPERS
map.type = MT_HIGH_VECTORS;
+#else
+ map.type = MT_LOW_VECTORS;
+#endif
create_mapping(&map);
if (!vectors_high()) {
map.virtual = 0;
+ map.length = PAGE_SIZE * 2;
map.type = MT_LOW_VECTORS;
create_mapping(&map);
}
+ /* Now create a kernel read-only mapping */
+ map.pfn += 1;
+ map.virtual = 0xffff0000 + PAGE_SIZE;
+ map.length = PAGE_SIZE;
+ map.type = MT_LOW_VECTORS;
+ create_mapping(&map);
+
/*
* Ask the machine support to map in the statically mapped devices.
*/
@@ -1276,8 +1323,6 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
- memblock_set_current_limit(arm_lowmem_limit);
-
build_mem_type_table();
prepare_page_table();
map_lowmem();
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index f64afb9f1bd5..bdd3be4be77a 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext)
ARM( str r3, [r0, #2048]! )
THUMB( add r0, r0, #2048 )
THUMB( str r3, [r0] )
- ALT_SMP(mov pc,lr)
+ ALT_SMP(W(nop))
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
mov pc, lr
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index c36ac69488c8..01a719e18bb0 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext)
tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
orreq r2, #L_PTE_RDONLY
1: strd r2, r3, [r0]
- ALT_SMP(mov pc, lr)
+ ALT_SMP(W(nop))
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
mov pc, lr
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 5c6d5a3050ea..73398bcf9bd8 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle)
ENDPROC(cpu_v7_do_idle)
ENTRY(cpu_v7_dcache_clean_area)
- ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW
- ALT_UP(W(nop))
- dcache_line_size r2, r3
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
+ ALT_UP_B(1f)
+ mov pc, lr
+1: dcache_line_size r2, r3
+2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, r2
subs r1, r1, r2
- bhi 1b
+ bhi 2b
dsb
mov pc, lr
ENDPROC(cpu_v7_dcache_clean_area)
diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c
index 3e5c4619caa5..50a3ea0037db 100644
--- a/arch/arm/plat-samsung/init.c
+++ b/arch/arm/plat-samsung/init.c
@@ -55,12 +55,13 @@ void __init s3c_init_cpu(unsigned long idcode,
printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode);
- if (cpu->map_io == NULL || cpu->init == NULL) {
+ if (cpu->init == NULL) {
printk(KERN_ERR "CPU %s support not enabled\n", cpu->name);
panic("Unsupported Samsung CPU");
}
- cpu->map_io();
+ if (cpu->map_io)
+ cpu->map_io();
}
/* s3c24xx_init_clocks
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index f71c37edca26..8a6295c86209 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -170,9 +170,10 @@ static void __init xen_percpu_init(void *unused)
per_cpu(xen_vcpu, cpu) = vcpup;
enable_percpu_irq(xen_events_irq, 0);
+ put_cpu();
}
-static void xen_restart(char str, const char *cmd)
+static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
{
struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
int rc;
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index c92de4163eba..b25763bc0ec4 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -42,14 +42,15 @@
#define TPIDR_EL1 18 /* Thread ID, Privileged */
#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
+#define PAR_EL1 21 /* Physical Address Register */
/* 32bit specific registers. Keep them at the end of the range */
-#define DACR32_EL2 21 /* Domain Access Control Register */
-#define IFSR32_EL2 22 /* Instruction Fault Status Register */
-#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */
-#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */
-#define TEECR32_EL1 25 /* ThumbEE Configuration Register */
-#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */
-#define NR_SYS_REGS 27
+#define DACR32_EL2 22 /* Domain Access Control Register */
+#define IFSR32_EL2 23 /* Instruction Fault Status Register */
+#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */
+#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */
+#define TEECR32_EL1 26 /* ThumbEE Configuration Register */
+#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */
+#define NR_SYS_REGS 28
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -69,6 +70,8 @@
#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
+#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
+#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 644d73956864..0859a4ddd1e7 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -129,7 +129,7 @@ struct kvm_vcpu_arch {
struct kvm_mmu_memory_cache mmu_page_cache;
/* Target CPU and feature flags */
- u32 target;
+ int target;
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* Detect first run of a vcpu */
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 46b3beb4b773..717031a762c2 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -35,6 +35,7 @@ struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
+ unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 9ba33c40cdf8..12e6ccb88691 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
static int
armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*event_map)[config];
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+
+ mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
@@ -317,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events,
struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
+ if (is_software_event(event))
+ return 1;
+
if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1;
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index ff985e3d8b72..1ac0bbbdddb2 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -214,6 +214,7 @@ __kvm_hyp_code_start:
mrs x21, tpidr_el1
mrs x22, amair_el1
mrs x23, cntkctl_el1
+ mrs x24, par_el1
stp x4, x5, [x3]
stp x6, x7, [x3, #16]
@@ -225,6 +226,7 @@ __kvm_hyp_code_start:
stp x18, x19, [x3, #112]
stp x20, x21, [x3, #128]
stp x22, x23, [x3, #144]
+ str x24, [x3, #160]
.endm
.macro restore_sysregs
@@ -243,6 +245,7 @@ __kvm_hyp_code_start:
ldp x18, x19, [x3, #112]
ldp x20, x21, [x3, #128]
ldp x22, x23, [x3, #144]
+ ldr x24, [x3, #160]
msr vmpidr_el2, x4
msr csselr_el1, x5
@@ -264,6 +267,7 @@ __kvm_hyp_code_start:
msr tpidr_el1, x21
msr amair_el1, x22
msr cntkctl_el1, x23
+ msr par_el1, x24
.endm
.macro skip_32bit_state tmp, target
@@ -600,6 +604,8 @@ END(__kvm_vcpu_run)
// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
ENTRY(__kvm_tlb_flush_vmid_ipa)
+ dsb ishst
+
kern_hyp_va x0
ldr x2, [x0, #KVM_VTTBR]
msr vttbr_el2, x2
@@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
ENDPROC(__kvm_tlb_flush_vmid_ipa)
ENTRY(__kvm_flush_vm_context)
+ dsb ishst
tlbi alle1is
ic ialluis
dsb sy
@@ -753,6 +760,10 @@ el1_trap:
*/
tbnz x1, #7, 1f // S1PTW is set
+ /* Preserve PAR_EL1 */
+ mrs x3, par_el1
+ push x3, xzr
+
/*
* Permission fault, HPFAR_EL2 is invalid.
* Resolve the IPA the hard way using the guest VA.
@@ -766,6 +777,8 @@ el1_trap:
/* Read result */
mrs x3, par_el1
+ pop x0, xzr // Restore PAR_EL1 from the stack
+ msr par_el1, x0
tbnz x3, #0, 3f // Bail out if we failed the translation
ubfx x3, x3, #12, #36 // Extract IPA
lsl x3, x3, #4 // and present it like HPFAR
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 94923609753b..02e9d09e1d80 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* FAR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
NULL, reset_unknown, FAR_EL1 },
+ /* PAR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
+ NULL, reset_unknown, PAR_EL1 },
/* PMINTENSET_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c
index f91431963452..7de083d19b7e 100644
--- a/arch/avr32/boards/atngw100/mrmt.c
+++ b/arch/avr32/boards/atngw100/mrmt.c
@@ -150,7 +150,6 @@ static struct ac97c_platform_data __initdata ac97c0_data = {
static struct platform_device rmt_ts_device = {
.name = "ucb1400_ts",
.id = -1,
- }
};
#endif
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 33a97929d055..77d442ab28c8 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -158,6 +158,7 @@ source "kernel/Kconfig.hz"
endmenu
source "init/Kconfig"
+source "kernel/Kconfig.freezer"
source "drivers/Kconfig"
source "fs/Kconfig"
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 7913695b2fcb..efbd2929aeb7 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -31,7 +31,7 @@ CONFIG_ACPI_FAN=m
CONFIG_ACPI_DOCK=y
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index f8e913365423..f64980dd20c3 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -25,7 +25,7 @@ CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index a5a9e02e60a0..0f4e9e41f130 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -31,7 +31,7 @@ CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig
index 37b9b422caad..b025acfde5c1 100644
--- a/arch/ia64/configs/xen_domu_defconfig
+++ b/arch/ia64/configs/xen_domu_defconfig
@@ -32,7 +32,7 @@ CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index ef3a9de01954..bc5efc7c3f3f 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -22,7 +22,7 @@
* unmapping a portion of the virtual address space, these hooks are called according to
* the following template:
*
- * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
+ * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
* {
* for each vma that needs a shootdown do {
* tlb_start_vma(tlb, vma);
@@ -58,6 +58,7 @@ struct mmu_gather {
unsigned int max;
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
+ unsigned long start, end;
unsigned long start_addr;
unsigned long end_addr;
struct page **pages;
@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
tlb->nr = 0;
- tlb->fullmm = full_mm_flush;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->start_addr = ~0UL;
}
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
index 2291a7d69d49..fa277aecfb78 100644
--- a/arch/m68k/emu/natfeat.c
+++ b/arch/m68k/emu/natfeat.c
@@ -18,9 +18,11 @@
#include <asm/machdep.h>
#include <asm/natfeat.h>
+extern long nf_get_id2(const char *feature_name);
+
asm("\n"
-" .global nf_get_id,nf_call\n"
-"nf_get_id:\n"
+" .global nf_get_id2,nf_call\n"
+"nf_get_id2:\n"
" .short 0x7300\n"
" rts\n"
"nf_call:\n"
@@ -29,12 +31,25 @@ asm("\n"
"1: moveq.l #0,%d0\n"
" rts\n"
" .section __ex_table,\"a\"\n"
-" .long nf_get_id,1b\n"
+" .long nf_get_id2,1b\n"
" .long nf_call,1b\n"
" .previous");
-EXPORT_SYMBOL_GPL(nf_get_id);
EXPORT_SYMBOL_GPL(nf_call);
+long nf_get_id(const char *feature_name)
+{
+ /* feature_name may be in vmalloc()ed memory, so make a copy */
+ char name_copy[32];
+ size_t n;
+
+ n = strlcpy(name_copy, feature_name, sizeof(name_copy));
+ if (n >= sizeof(name_copy))
+ return 0;
+
+ return nf_get_id2(name_copy);
+}
+EXPORT_SYMBOL_GPL(nf_get_id);
+
void nfprint(const char *fmt, ...)
{
static char buf[256];
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
index 444ea8a09e9f..ef881cfbbca9 100644
--- a/arch/m68k/include/asm/div64.h
+++ b/arch/m68k/include/asm/div64.h
@@ -15,16 +15,17 @@
unsigned long long n64; \
} __n; \
unsigned long __rem, __upper; \
+ unsigned long __base = (base); \
\
__n.n64 = (n); \
if ((__upper = __n.n32[0])) { \
asm ("divul.l %2,%1:%0" \
- : "=d" (__n.n32[0]), "=d" (__upper) \
- : "d" (base), "0" (__n.n32[0])); \
+ : "=d" (__n.n32[0]), "=d" (__upper) \
+ : "d" (__base), "0" (__n.n32[0])); \
} \
asm ("divu.l %2,%1:%0" \
- : "=d" (__n.n32[1]), "=d" (__rem) \
- : "d" (base), "1" (__upper), "0" (__n.n32[1])); \
+ : "=d" (__n.n32[1]), "=d" (__rem) \
+ : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \
(n) = __n.n64; \
__rem; \
})
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index d22a4ecffff4..4fab52294d98 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -28,7 +28,7 @@ config MICROBLAZE
select GENERIC_CLOCKEVENTS
select GENERIC_IDLE_POLL_SETUP
select MODULES_USE_ELF_RELA
- select CLONE_BACKWARDS
+ select CLONE_BACKWARDS3
config SWAP
def_bool n
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c3abed332301..e12764c2a9d0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -114,6 +114,7 @@ config BCM47XX
select FW_CFE
select HW_HAS_PCI
select IRQ_CPU
+ select SYS_HAS_CPU_MIPS32_R1
select NO_EXCEPT_FILL
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index ba611927749b..2b8b118398c4 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -2,7 +2,6 @@ if BCM47XX
config BCM47XX_SSB
bool "SSB Support for Broadcom BCM47XX"
- select SYS_HAS_CPU_MIPS32_R1
select SSB
select SSB_DRIVER_MIPS
select SSB_DRIVER_EXTIF
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 1dc086087a72..fa44f3ec5302 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -17,6 +17,8 @@
#define current_cpu_type() current_cpu_data.cputype
#endif
+#define boot_cpu_type() cpu_data[0].cputype
+
/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
* This is true for all known MIPS systems.
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 5b2f2e68e57f..9488fa5f8866 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -25,8 +25,12 @@
#else
#define CAC_BASE _AC(0x80000000, UL)
#endif
+#ifndef IO_BASE
#define IO_BASE _AC(0xa0000000, UL)
+#endif
+#ifndef UNCAC_BASE
#define UNCAC_BASE _AC(0xa0000000, UL)
+#endif
#ifndef MAP_BASE
#ifdef CONFIG_KVM_GUEST
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index b7a23064841f..88e292b7719e 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -25,11 +25,12 @@ struct siginfo;
/*
* Careful to keep union _sifields from shifting ...
*/
-#if __SIZEOF_LONG__ == 4
+#if _MIPS_SZLONG == 32
#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
-#endif
-#if __SIZEOF_LONG__ == 8
+#elif _MIPS_SZLONG == 64
#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
+#else
+#error _MIPS_SZLONG neither 32 nor 64
#endif
#include <asm-generic/siginfo.h>
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index f739aedcb509..bd79c4f9bff4 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -54,7 +54,11 @@ LEAF(bmips_smp_movevec)
/* set up CPU1 CBR; move BASE to 0xa000_0000 */
li k0, 0xff400000
mtc0 k0, $22, 6
- li k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_1
+ /* set up relocation vector address based on thread ID */
+ mfc0 k1, $22, 3
+ srl k1, 16
+ andi k1, 0x8000
+ or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0
or k0, k1
li k1, 0xa0080000
sw k1, 0(k0)
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index c0bb4d59076a..126da74d4c55 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -66,6 +66,8 @@ static void __init bmips_smp_setup(void)
int i, cpu = 1, boot_cpu = 0;
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
+ int cpu_hw_intr;
+
/* arbitration priority */
clear_c0_brcm_cmt_ctrl(0x30);
@@ -79,15 +81,13 @@ static void __init bmips_smp_setup(void)
* MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread
* MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
* MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
- *
- * If booting from TP1, leave the existing CMT interrupt routing
- * such that TP0 responds to SW1 and TP1 responds to SW0.
*/
if (boot_cpu == 0)
- change_c0_brcm_cmt_intr(0xf8018000,
- (0x02 << 27) | (0x03 << 15));
+ cpu_hw_intr = 0x02;
else
- change_c0_brcm_cmt_intr(0xf8018000, (0x1d << 27));
+ cpu_hw_intr = 0x1d;
+
+ change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15));
/* single core, 2 threads (2 pipelines) */
max_cpus = 2;
@@ -202,9 +202,15 @@ static void bmips_init_secondary(void)
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
void __iomem *cbr = BMIPS_GET_CBR();
unsigned long old_vec;
+ unsigned long relo_vector;
+ int boot_cpu;
+
+ boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
+ relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
+ BMIPS_RELO_VECTOR_CONTROL_1;
- old_vec = __raw_readl(cbr + BMIPS_RELO_VECTOR_CONTROL_1);
- __raw_writel(old_vec & ~0x20000000, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
+ old_vec = __raw_readl(cbr + relo_vector);
+ __raw_writel(old_vec & ~0x20000000, cbr + relo_vector);
clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
#elif defined(CONFIG_CPU_BMIPS5000)
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index e773659ccf9f..46048d24328c 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.next_pc_inc;
return 1;
break;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case ldc2_op: /* This is bbit032 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case swc2_op: /* This is bbit1 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case sdc2_op: /* This is bbit132 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+#endif
case cop0_op:
case cop1_op:
case cop2_op:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index e4b1140cdae0..3a2b6e9f25cf 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -166,7 +166,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
reg.control[i] |= M_PERFCTL_USER;
if (ctr[i].exl)
reg.control[i] |= M_PERFCTL_EXL;
- if (current_cpu_type() == CPU_XLR)
+ if (boot_cpu_type() == CPU_XLR)
reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
reg.counter[i] = 0x80000000 - ctr[i].count;
}
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index d22dc0d6f289..2b7e837dc2e2 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -206,11 +206,13 @@ static struct resource pnx833x_ethernet_resources[] = {
.end = PNX8335_IP3902_PORTS_END,
.flags = IORESOURCE_MEM,
},
+#ifdef CONFIG_SOC_PNX8335
[1] = {
.start = PNX8335_PIC_ETHERNET_INT,
.end = PNX8335_PIC_ETHERNET_INT,
.flags = IORESOURCE_IRQ,
},
+#endif
};
static struct platform_device pnx833x_ethernet_device = {
diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c
index 9f64c2387808..0238af1ba503 100644
--- a/arch/mips/powertv/asic/asic_devices.c
+++ b/arch/mips/powertv/asic/asic_devices.c
@@ -529,8 +529,7 @@ EXPORT_SYMBOL(asic_resource_get);
*/
void platform_release_memory(void *ptr, int size)
{
- free_reserved_area((unsigned long)ptr, (unsigned long)(ptr + size),
- -1, NULL);
+ free_reserved_area(ptr, ptr + size, -1, NULL);
}
EXPORT_SYMBOL(platform_release_memory);
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 99dbab1c59ac..d60bf98fa5cf 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -55,6 +55,7 @@ config GENERIC_CSUM
source "init/Kconfig"
+source "kernel/Kconfig.freezer"
menu "Processor type and features"
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
new file mode 100644
index 000000000000..f11006361297
--- /dev/null
+++ b/arch/parisc/configs/c8000_defconfig
@@ -0,0 +1,279 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_PA8X00=y
+CONFIG_MLONGCALLS=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+CONFIG_PREEMPT=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_IOMMU_CCIO=y
+CONFIG_PCI=y
+CONFIG_PCI_LBA=y
+# CONFIG_SUPERIO is not set
+# CONFIG_CHASSIS_LCD_LED is not set
+# CONFIG_PDC_CHASSIS is not set
+# CONFIG_PDC_CHASSIS_WARN is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+# CONFIG_IPV6 is not set
+CONFIG_IP_DCCP=m
+# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_TIPC=m
+CONFIG_LLC2=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+CONFIG_PARPORT_PC_FIFO=y
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_SX8=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_WCACHE=y
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_PLATFORM=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_SIIMAGE=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=y
+CONFIG_FUSION_SAS=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=m
+CONFIG_TUN=y
+CONFIG_E1000=y
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_HIL_OLD is not set
+# CONFIG_KEYBOARD_HIL is not set
+CONFIG_MOUSE_PS2=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_CM109=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_PARKBD=m
+CONFIG_SERIO_GSCPS2=m
+# CONFIG_HP_SDC is not set
+CONFIG_SERIO_PCIPS2=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_RUNTIME_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+# CONFIG_SERIAL_MUX is not set
+CONFIG_SERIAL_JSM=m
+CONFIG_PRINTER=y
+CONFIG_HW_RANDOM=y
+CONFIG_RAW_DRIVER=m
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_SSB=m
+CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_AGP=y
+CONFIG_AGP_PARISC=y
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_FOREIGN_ENDIAN=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+# CONFIG_FB_STI is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_STI_CONSOLE is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_AD1889=m
+# CONFIG_SND_USB is not set
+# CONFIG_SND_GSC is not set
+CONFIG_HID_A4TECH=m
+CONFIG_HID_APPLE=m
+CONFIG_HID_BELKIN=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_CHICONY=m
+CONFIG_HID_CYPRESS=m
+CONFIG_HID_DRAGONRISE=m
+CONFIG_HID_EZKEY=m
+CONFIG_HID_KYE=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_TWINHAN=m
+CONFIG_HID_KENSINGTON=m
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MONTEREY=m
+CONFIG_HID_NTRIG=m
+CONFIG_HID_ORTEK=m
+CONFIG_HID_PANTHERLORD=m
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_GREENASIA=m
+CONFIG_HID_SMARTJOYPLUS=m
+CONFIG_HID_TOPSEED=m
+CONFIG_HID_THRUSTMASTER=m
+CONFIG_HID_ZEROPLUS=m
+CONFIG_USB_HID=m
+CONFIG_USB=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=m
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_NFS_FS=m
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_SLAB=y
+CONFIG_DEBUG_SLAB_LEAK=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_RT_MUTEX_TESTER=y
+CONFIG_PROVE_RCU_DELAY=y
+CONFIG_DEBUG_BLOCK_EXT_DEVT=y
+CONFIG_LATENCYTOP=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+CONFIG_KEYS=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_FONTS=y
diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h
index 9afdad6c2ffb..eaf4dc1c7294 100644
--- a/arch/parisc/include/asm/parisc-device.h
+++ b/arch/parisc/include/asm/parisc-device.h
@@ -23,6 +23,7 @@ struct parisc_device {
/* generic info returned from pdc_pat_cell_module() */
unsigned long mod_info; /* PAT specific - Misc Module info */
unsigned long pmod_loc; /* physical Module location */
+ unsigned long mod0;
#endif
u64 dma_mask; /* DMA mask for I/O */
struct device dev;
@@ -61,4 +62,6 @@ parisc_get_drvdata(struct parisc_device *d)
extern struct bus_type parisc_bus_type;
+int iosapic_serial_irq(struct parisc_device *dev);
+
#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 2e65aa54bd10..c035673209f7 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -71,18 +71,27 @@ flush_cache_all_local(void)
}
EXPORT_SYMBOL(flush_cache_all_local);
+/* Virtual address of pfn. */
+#define pfn_va(pfn) __va(PFN_PHYS(pfn))
+
void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
- struct page *page = pte_page(*ptep);
+ unsigned long pfn = pte_pfn(*ptep);
+ struct page *page;
- if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
- test_bit(PG_dcache_dirty, &page->flags)) {
+ /* We don't have pte special. As a result, we can be called with
+ an invalid pfn and we don't need to flush the kernel dcache page.
+ This occurs with FireGL card in C8000. */
+ if (!pfn_valid(pfn))
+ return;
- flush_kernel_dcache_page(page);
+ page = pfn_to_page(pfn);
+ if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency())
- flush_kernel_dcache_page(page);
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
}
void
@@ -495,44 +504,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
void flush_cache_mm(struct mm_struct *mm)
{
+ struct vm_area_struct *vma;
+ pgd_t *pgd;
+
/* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */
- if (mm_total_size(mm) < parisc_cache_flush_threshold) {
- struct vm_area_struct *vma;
-
- if (mm->context == mfsp(3)) {
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- flush_user_dcache_range_asm(vma->vm_start,
- vma->vm_end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(
- vma->vm_start, vma->vm_end);
- }
- } else {
- pgd_t *pgd = mm->pgd;
-
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- unsigned long addr;
-
- for (addr = vma->vm_start; addr < vma->vm_end;
- addr += PAGE_SIZE) {
- pte_t *ptep = get_ptep(pgd, addr);
- if (ptep != NULL) {
- pte_t pte = *ptep;
- __flush_cache_page(vma, addr,
- page_to_phys(pte_page(pte)));
- }
- }
- }
+ if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+ flush_cache_all();
+ return;
+ }
+
+ if (mm->context == mfsp(3)) {
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
+ if ((vma->vm_flags & VM_EXEC) == 0)
+ continue;
+ flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
}
return;
}
-#ifdef CONFIG_SMP
- flush_cache_all();
-#else
- flush_cache_all_local();
-#endif
+ pgd = mm->pgd;
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ unsigned long addr;
+
+ for (addr = vma->vm_start; addr < vma->vm_end;
+ addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (!pfn_valid(pfn))
+ continue;
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ }
+ }
}
void
@@ -556,33 +563,32 @@ flush_user_icache_range(unsigned long start, unsigned long end)
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
+ unsigned long addr;
+ pgd_t *pgd;
+
BUG_ON(!vma->vm_mm->context);
- if ((end - start) < parisc_cache_flush_threshold) {
- if (vma->vm_mm->context == mfsp(3)) {
- flush_user_dcache_range_asm(start, end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(start, end);
- } else {
- unsigned long addr;
- pgd_t *pgd = vma->vm_mm->pgd;
-
- for (addr = start & PAGE_MASK; addr < end;
- addr += PAGE_SIZE) {
- pte_t *ptep = get_ptep(pgd, addr);
- if (ptep != NULL) {
- pte_t pte = *ptep;
- flush_cache_page(vma,
- addr, pte_pfn(pte));
- }
- }
- }
- } else {
-#ifdef CONFIG_SMP
+ if ((end - start) >= parisc_cache_flush_threshold) {
flush_cache_all();
-#else
- flush_cache_all_local();
-#endif
+ return;
+ }
+
+ if (vma->vm_mm->context == mfsp(3)) {
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
+ return;
+ }
+
+ pgd = vma->vm_mm->pgd;
+ for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (pfn_valid(pfn))
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
@@ -591,9 +597,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{
BUG_ON(!vma->vm_mm->context);
- flush_tlb_page(vma, vmaddr);
- __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
-
+ if (pfn_valid(pfn)) {
+ flush_tlb_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ }
}
#ifdef CONFIG_PARISC_TMPALIAS
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 3295ef4a185d..f0b6722fc706 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -211,6 +211,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
/* REVISIT: who is the consumer of this? not sure yet... */
dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
dev->pmod_loc = pa_pdc_cell->mod_location;
+ dev->mod0 = pa_pdc_cell->mod[0];
register_parisc_device(dev); /* advertise device */
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 940188d1942c..07349b002687 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -56,13 +56,6 @@
#define A(__x) ((unsigned long)(__x))
/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-#ifdef CONFIG_64BIT
-#include "sys32.h"
-#endif
-
-/*
* Do a signal return - restore sigcontext.
*/
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 33eca1b04926..6c6a271a6140 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -34,7 +34,6 @@
#include <asm/uaccess.h>
#include "signal32.h"
-#include "sys32.h"
#define DEBUG_COMPAT_SIG 0
#define DEBUG_COMPAT_SIG_LEVEL 2
diff --git a/arch/parisc/kernel/sys32.h b/arch/parisc/kernel/sys32.h
deleted file mode 100644
index 60dd470f39f8..000000000000
--- a/arch/parisc/kernel/sys32.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2002 Richard Hirst <rhirst at parisc-linux.org>
- * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
- * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _PARISC64_KERNEL_SYS32_H
-#define _PARISC64_KERNEL_SYS32_H
-
-#include <linux/compat.h>
-
-/* Call a kernel syscall which will use kernel space instead of user
- * space for its copy_to/from_user.
- */
-#define KERNEL_SYSCALL(ret, syscall, args...) \
-{ \
- mm_segment_t old_fs = get_fs(); \
- set_fs(KERNEL_DS); \
- ret = syscall(args); \
- set_fs (old_fs); \
-}
-
-#endif
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index a134ff4da12e..bb9f3b64de55 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -42,8 +42,6 @@
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
-#include "sys32.h"
-
#undef DEBUG
#ifdef DEBUG
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3bf72cd2c8fc..dbd9d3c991e8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -566,7 +566,7 @@ config SCHED_SMT
config PPC_DENORMALISATION
bool "PowerPC denormalisation exception handling"
depends on PPC_BOOK3S_64
- default "n"
+ default "y" if PPC_POWERNV
---help---
Add support for handling denormalisation of single precision
values. Useful for bare metal only. If unsure say Y here.
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index c86fcb92358e..0e8cfd09da2f 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -58,7 +58,7 @@ CONFIG_SCHED_SMT=y
CONFIG_PPC_DENORMALISATION=y
CONFIG_PCCARD=y
CONFIG_ELECTRA_CF=y
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
CONFIG_PACKET=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 4b20f76172e2..0085dc4642c5 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -32,7 +32,7 @@ CONFIG_IRQ_ALL_CPUS=y
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_PCI_MSI=y
CONFIG_PCCARD=y
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index bea8587c3af5..1d4b9763895d 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -53,7 +53,7 @@ CONFIG_PPC_64K_PAGES=y
CONFIG_PPC_SUBPAGE_PROT=y
CONFIG_SCHED_SMT=y
CONFIG_PPC_DENORMALISATION=y
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
CONFIG_PACKET=y
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 2dd7bfc459be..8b2492644754 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <asm/hw_irq.h>
#include <linux/device.h>
+#include <uapi/asm/perf_event.h>
#define MAX_HWEVENTS 8
#define MAX_EVENT_ALTERNATIVES 8
@@ -69,11 +70,6 @@ struct power_pmu {
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
-/*
- * We use the event config bit 63 as a flag to request EBB.
- */
-#define EVENT_CONFIG_EBB_SHIFT 63
-
extern int register_power_pmu(struct power_pmu *);
struct pt_regs;
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 47a35b08b963..e378cccfca55 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -247,6 +247,10 @@ struct thread_struct {
unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
struct pt_regs ckpt_regs; /* Checkpointed registers */
+ unsigned long tm_tar;
+ unsigned long tm_ppr;
+ unsigned long tm_dscr;
+
/*
* Transactional FP and VSX 0-31 register set.
* NOTE: the sense of these is the opposite of the integer ckpt_regs!
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a6840e4e24f7..99222e27f173 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -254,19 +254,28 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_TAR_LG 8 /* Enable Target Address Register */
+#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
+#define FSCR_TM_LG 5 /* Enable Transactional Memory */
+#define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */
+#define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/
+#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
+#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
+#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
-#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
-#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
-#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
+#define FSCR_TAR __MASK(FSCR_TAR_LG)
+#define FSCR_EBB __MASK(FSCR_EBB_LG)
+#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
-#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
-#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
-#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */
-#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */
-#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/
-#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
-#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */
-#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */
+#define HFSCR_TAR __MASK(FSCR_TAR_LG)
+#define HFSCR_EBB __MASK(FSCR_EBB_LG)
+#define HFSCR_TM __MASK(FSCR_TM_LG)
+#define HFSCR_PM __MASK(FSCR_PM_LG)
+#define HFSCR_BHRB __MASK(FSCR_BHRB_LG)
+#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
+#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
+#define HFSCR_FP __MASK(FSCR_FP_LG)
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0))
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ffbaabebcdca..48cfc858abd6 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -145,6 +145,10 @@ extern void __cpu_die(unsigned int cpu);
#define smp_setup_cpu_maps()
static inline void inhibit_secondary_onlining(void) {}
static inline void uninhibit_secondary_onlining(void) {}
+static inline const struct cpumask *cpu_sibling_mask(int cpu)
+{
+ return cpumask_of(cpu);
+}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 49a13e0ef234..294c2cedcf7a 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *,
struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline void save_tar(struct thread_struct *prev)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ prev->tar = mfspr(SPRN_TAR);
+}
+#else
+static inline void save_tar(struct thread_struct *prev) {}
+#endif
extern void giveup_fpu(struct task_struct *);
extern void load_up_fpu(void);
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index 5182c8622b54..48be855ef37b 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -20,6 +20,7 @@ header-y += mman.h
header-y += msgbuf.h
header-y += nvram.h
header-y += param.h
+header-y += perf_event.h
header-y += poll.h
header-y += posix_types.h
header-y += ps3fb.h
diff --git a/arch/powerpc/include/uapi/asm/perf_event.h b/arch/powerpc/include/uapi/asm/perf_event.h
new file mode 100644
index 000000000000..80a4d40cf5bc
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/perf_event.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2013 Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 of the
+ * License.
+ */
+
+#ifndef _UAPI_ASM_POWERPC_PERF_EVENT_H
+#define _UAPI_ASM_POWERPC_PERF_EVENT_H
+
+/*
+ * We use bit 63 of perf_event_attr.config as a flag to request EBB.
+ */
+#define PERF_EVENT_CONFIG_EBB_SHIFT 63
+
+#endif /* _UAPI_ASM_POWERPC_PERF_EVENT_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c7e8afc2ead0..8207459efe56 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -138,6 +138,9 @@ int main(void)
DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
+ DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
+ DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
+ DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
transact_vr[0]));
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index ea9414c8088d..55593ee2d5aa 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1061,7 +1061,7 @@ static const struct file_operations proc_eeh_operations = {
static int __init eeh_init_proc(void)
{
- if (machine_is(pseries))
+ if (machine_is(pseries) || machine_is(powernv))
proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
return 0;
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index ab15b8d057ad..2bd0b885b0fe 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#ifdef CONFIG_PPC_BOOK3S_64
BEGIN_FTR_SECTION
- /*
- * Back up the TAR across context switches. Note that the TAR is not
- * available for use in the kernel. (To provide this, the TAR should
- * be backed up/restored on exception entry/exit instead, and be in
- * pt_regs. FIXME, this should be in pt_regs anyway (for debug).)
- */
- mfspr r0,SPRN_TAR
- std r0,THREAD_TAR(r3)
-
/* Event based branch registers */
mfspr r0, SPRN_BESCR
std r0, THREAD_BESCR(r3)
@@ -584,9 +575,34 @@ BEGIN_FTR_SECTION
ld r7,DSCR_DEFAULT@toc(2)
ld r0,THREAD_DSCR(r4)
cmpwi r6,0
+ li r8, FSCR_DSCR
bne 1f
ld r0,0(r7)
-1: cmpd r0,r25
+ b 3f
+1:
+ BEGIN_FTR_SECTION_NESTED(70)
+ mfspr r6, SPRN_FSCR
+ or r6, r6, r8
+ mtspr SPRN_FSCR, r6
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r6, SPRN_HFSCR
+ or r6, r6, r8
+ mtspr SPRN_HFSCR, r6
+ END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+ b 4f
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+3:
+ BEGIN_FTR_SECTION_NESTED(70)
+ mfspr r6, SPRN_FSCR
+ andc r6, r6, r8
+ mtspr SPRN_FSCR, r6
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r6, SPRN_HFSCR
+ andc r6, r6, r8
+ mtspr SPRN_HFSCR, r6
+ END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+4: cmpd r0,r25
beq 2f
mtspr SPRN_DSCR,r0
2:
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 4e00d223b2e3..902ca3c6b4b6 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -848,7 +848,7 @@ hv_facility_unavailable_relon_trampoline:
. = 0x4f80
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXGEN)
- b facility_unavailable_relon_hv
+ b hv_facility_unavailable_relon_hv
STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
#ifdef CONFIG_PPC_DENORMALISATION
@@ -1175,6 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
b .ret_from_except
STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
+ STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
.align 7
.globl __end_handlers
@@ -1188,7 +1189,7 @@ __end_handlers:
STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
- STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable)
+ STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 2e51cde616d2..c69440cef7af 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -362,7 +362,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
seq_printf(p, " Spurious interrupts\n");
- seq_printf(p, "%*s: ", prec, "CNT");
+ seq_printf(p, "%*s: ", prec, "PMI");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
seq_printf(p, " Performance monitoring interrupts\n");
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index c517dbe705fd..8083be20fe5e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct ppc64_tlb_batch *batch;
#endif
+ /* Back up the TAR across context switches.
+ * Note that the TAR is not available for use in the kernel. (To
+ * provide this, the TAR should be backed up/restored on exception
+ * entry/exit instead, and be in pt_regs. FIXME, this should be in
+ * pt_regs anyway (for debug).)
+ * Save the TAR here before we do treclaim/trecheckpoint as these
+ * will change the TAR.
+ */
+ save_tar(&prev->thread);
+
__switch_to_tm(prev);
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 51be8fb24803..0554d1f6d70d 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -233,6 +233,16 @@ dont_backup_fp:
std r5, _CCR(r7)
std r6, _XER(r7)
+
+ /* ******************** TAR, PPR, DSCR ********** */
+ mfspr r3, SPRN_TAR
+ mfspr r4, SPRN_PPR
+ mfspr r5, SPRN_DSCR
+
+ std r3, THREAD_TM_TAR(r12)
+ std r4, THREAD_TM_PPR(r12)
+ std r5, THREAD_TM_DSCR(r12)
+
/* MSR and flags: We don't change CRs, and we don't need to alter
* MSR.
*/
@@ -347,6 +357,16 @@ dont_restore_fp:
mtmsr r6 /* FP/Vec off again! */
restore_gprs:
+
+ /* ******************** TAR, PPR, DSCR ********** */
+ ld r4, THREAD_TM_TAR(r3)
+ ld r5, THREAD_TM_PPR(r3)
+ ld r6, THREAD_TM_DSCR(r3)
+
+ mtspr SPRN_TAR, r4
+ mtspr SPRN_PPR, r5
+ mtspr SPRN_DSCR, r6
+
/* ******************** CR,LR,CCR,MSR ********** */
ld r3, _CTR(r7)
ld r4, _LINK(r7)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index bf33c22e38a4..e435bc089ea3 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -44,9 +44,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/pmc.h>
-#ifdef CONFIG_PPC32
#include <asm/reg.h>
-#endif
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
@@ -1296,43 +1294,54 @@ void vsx_unavailable_exception(struct pt_regs *regs)
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
}
+#ifdef CONFIG_PPC64
void facility_unavailable_exception(struct pt_regs *regs)
{
static char *facility_strings[] = {
- "FPU",
- "VMX/VSX",
- "DSCR",
- "PMU SPRs",
- "BHRB",
- "TM",
- "AT",
- "EBB",
- "TAR",
+ [FSCR_FP_LG] = "FPU",
+ [FSCR_VECVSX_LG] = "VMX/VSX",
+ [FSCR_DSCR_LG] = "DSCR",
+ [FSCR_PM_LG] = "PMU SPRs",
+ [FSCR_BHRB_LG] = "BHRB",
+ [FSCR_TM_LG] = "TM",
+ [FSCR_EBB_LG] = "EBB",
+ [FSCR_TAR_LG] = "TAR",
};
- char *facility, *prefix;
+ char *facility = "unknown";
u64 value;
+ u8 status;
+ bool hv;
- if (regs->trap == 0xf60) {
- value = mfspr(SPRN_FSCR);
- prefix = "";
- } else {
+ hv = (regs->trap == 0xf80);
+ if (hv)
value = mfspr(SPRN_HFSCR);
- prefix = "Hypervisor ";
+ else
+ value = mfspr(SPRN_FSCR);
+
+ status = value >> 56;
+ if (status == FSCR_DSCR_LG) {
+ /* User is acessing the DSCR. Set the inherit bit and allow
+ * the user to set it directly in future by setting via the
+ * H/FSCR DSCR bit.
+ */
+ current->thread.dscr_inherit = 1;
+ if (hv)
+ mtspr(SPRN_HFSCR, value | HFSCR_DSCR);
+ else
+ mtspr(SPRN_FSCR, value | FSCR_DSCR);
+ return;
}
- value = value >> 56;
+ if ((status < ARRAY_SIZE(facility_strings)) &&
+ facility_strings[status])
+ facility = facility_strings[status];
/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
- if (value < ARRAY_SIZE(facility_strings))
- facility = facility_strings[value];
- else
- facility = "unknown";
-
pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
- prefix, facility, regs->nip, regs->msr);
+ hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
if (user_mode(regs)) {
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
@@ -1341,6 +1350,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
die("Unexpected facility unavailable exception", regs, SIGABRT);
}
+#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2efa9dde741a..7629cd3eb91a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1809,7 +1809,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
rma_size <<= PAGE_SHIFT;
rmls = lpcr_rmls(rma_size);
err = -EINVAL;
- if (rmls < 0) {
+ if ((long)rmls < 0) {
pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
goto out_srcu;
}
@@ -1874,7 +1874,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
/* Allocate the guest's logical partition ID */
lpid = kvmppc_alloc_lpid();
- if (lpid < 0)
+ if ((long)lpid < 0)
return -ENOMEM;
kvm->arch.lpid = lpid;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 19498a567a81..c6e13d9a9e15 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1047,11 +1047,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (err)
goto free_shadow_vcpu;
+ err = -ENOMEM;
p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
- /* the real shared page fills the last 4k of our page */
- vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
if (!p)
goto uninit_vcpu;
+ /* the real shared page fills the last 4k of our page */
+ vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
#ifdef CONFIG_PPC_BOOK3S_64
/* default to book3s_64 (970fx) */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 08397217e8ac..5850798826cd 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -27,6 +27,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <asm/cputhreads.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/smp.h>
@@ -1318,7 +1319,8 @@ static int update_cpu_associativity_changes_mask(void)
}
}
if (changed) {
- cpumask_set_cpu(cpu, changes);
+ cpumask_or(changes, changes, cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
}
}
@@ -1426,7 +1428,7 @@ static int update_cpu_topology(void *data)
if (!data)
return -EINVAL;
- cpu = get_cpu();
+ cpu = smp_processor_id();
for (update = data; update; update = update->next) {
if (cpu != update->cpu)
@@ -1446,12 +1448,12 @@ static int update_cpu_topology(void *data)
*/
int arch_update_cpu_topology(void)
{
- unsigned int cpu, changed = 0;
+ unsigned int cpu, sibling, changed = 0;
struct topology_update_data *updates, *ud;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
cpumask_t updated_cpus;
struct device *dev;
- int weight, i = 0;
+ int weight, new_nid, i = 0;
weight = cpumask_weight(&cpu_associativity_changes_mask);
if (!weight)
@@ -1464,19 +1466,46 @@ int arch_update_cpu_topology(void)
cpumask_clear(&updated_cpus);
for_each_cpu(cpu, &cpu_associativity_changes_mask) {
- ud = &updates[i++];
- ud->cpu = cpu;
- vphn_get_associativity(cpu, associativity);
- ud->new_nid = associativity_to_nid(associativity);
-
- if (ud->new_nid < 0 || !node_online(ud->new_nid))
- ud->new_nid = first_online_node;
+ /*
+ * If siblings aren't flagged for changes, updates list
+ * will be too short. Skip on this update and set for next
+ * update.
+ */
+ if (!cpumask_subset(cpu_sibling_mask(cpu),
+ &cpu_associativity_changes_mask)) {
+ pr_info("Sibling bits not set for associativity "
+ "change, cpu%d\n", cpu);
+ cpumask_or(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask,
+ cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
+ continue;
+ }
- ud->old_nid = numa_cpu_lookup_table[cpu];
- cpumask_set_cpu(cpu, &updated_cpus);
+ /* Use associativity from first thread for all siblings */
+ vphn_get_associativity(cpu, associativity);
+ new_nid = associativity_to_nid(associativity);
+ if (new_nid < 0 || !node_online(new_nid))
+ new_nid = first_online_node;
+
+ if (new_nid == numa_cpu_lookup_table[cpu]) {
+ cpumask_andnot(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask,
+ cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
+ continue;
+ }
- if (i < weight)
- ud->next = &updates[i];
+ for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
+ ud = &updates[i++];
+ ud->cpu = sibling;
+ ud->new_nid = new_nid;
+ ud->old_nid = numa_cpu_lookup_table[sibling];
+ cpumask_set_cpu(sibling, &updated_cpus);
+ if (i < weight)
+ ud->next = &updates[i];
+ }
+ cpu = cpu_last_thread_sibling(cpu);
}
stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 24a45f91c65f..eeae308cf982 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -484,7 +484,7 @@ static bool is_ebb_event(struct perf_event *event)
* use bit 63 of the event code for something else if they wish.
*/
return (ppmu->flags & PPMU_EBB) &&
- ((event->attr.config >> EVENT_CONFIG_EBB_SHIFT) & 1);
+ ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
}
static int ebb_event_check(struct perf_event *event)
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 7466374d2787..2ee4a707f0df 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -118,7 +118,7 @@
(EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
(EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
(EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
- (EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT) | \
+ (EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT) | \
EVENT_PSEL_MASK)
/* MMCRA IFM bits - POWER8 */
@@ -233,10 +233,10 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long
pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
- ebb = (event >> EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK;
+ ebb = (event >> PERF_EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK;
/* Clear the EBB bit in the event, so event checks work below */
- event &= ~(EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT);
+ event &= ~(EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT);
if (pmc) {
if (pmc > 6)
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 9f8671a44551..6a5f2b1f32ca 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -569,35 +569,6 @@ error:
return ret;
}
-static int unzip_oops(char *oops_buf, char *big_buf)
-{
- struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
- u64 timestamp = oops_hdr->timestamp;
- char *big_oops_data = NULL;
- char *oops_data_buf = NULL;
- size_t big_oops_data_sz;
- int unzipped_len;
-
- big_oops_data = big_buf + sizeof(struct oops_log_info);
- big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info);
- oops_data_buf = oops_buf + sizeof(struct oops_log_info);
-
- unzipped_len = nvram_decompress(oops_data_buf, big_oops_data,
- oops_hdr->report_length,
- big_oops_data_sz);
-
- if (unzipped_len < 0) {
- pr_err("nvram: decompression failed; returned %d\n",
- unzipped_len);
- return -1;
- }
- oops_hdr = (struct oops_log_info *)big_buf;
- oops_hdr->version = OOPS_HDR_VERSION;
- oops_hdr->report_length = (u16) unzipped_len;
- oops_hdr->timestamp = timestamp;
- return 0;
-}
-
static int nvram_pstore_open(struct pstore_info *psi)
{
/* Reset the iterator to start reading partitions again */
@@ -685,10 +656,9 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
unsigned int err_type, id_no, size = 0;
struct nvram_os_partition *part = NULL;
char *buff = NULL, *big_buff = NULL;
- int rc, sig = 0;
+ int sig = 0;
loff_t p;
-read_partition:
read_type++;
switch (nvram_type_ids[read_type]) {
@@ -749,30 +719,46 @@ read_partition:
*id = id_no;
if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
+ int length, unzipped_len;
+ size_t hdr_size;
+
oops_hdr = (struct oops_log_info *)buff;
- *buf = buff + sizeof(*oops_hdr);
+ if (oops_hdr->version < OOPS_HDR_VERSION) {
+ /* Old format oops header had 2-byte record size */
+ hdr_size = sizeof(u16);
+ length = oops_hdr->version;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ } else {
+ hdr_size = sizeof(*oops_hdr);
+ length = oops_hdr->report_length;
+ time->tv_sec = oops_hdr->timestamp;
+ time->tv_nsec = 0;
+ }
+ *buf = kmalloc(length, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, buff + hdr_size, length);
+ kfree(buff);
if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) {
big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (!big_buff)
return -ENOMEM;
- rc = unzip_oops(buff, big_buff);
+ unzipped_len = nvram_decompress(*buf, big_buff,
+ length, big_oops_buf_sz);
- if (rc != 0) {
- kfree(buff);
+ if (unzipped_len < 0) {
+ pr_err("nvram: decompression failed, returned "
+ "rc %d\n", unzipped_len);
kfree(big_buff);
- goto read_partition;
+ } else {
+ *buf = big_buff;
+ length = unzipped_len;
}
-
- oops_hdr = (struct oops_log_info *)big_buff;
- *buf = big_buff + sizeof(*oops_hdr);
- kfree(buff);
}
-
- time->tv_sec = oops_hdr->timestamp;
- time->tv_nsec = 0;
- return oops_hdr->report_length;
+ return length;
}
*buf = buff;
@@ -816,6 +802,7 @@ static int nvram_pstore_init(void)
static void __init nvram_init_oops_partition(int rtas_partition_exists)
{
int rc;
+ size_t size;
rc = pseries_nvram_init_os_partition(&oops_log_partition);
if (rc != 0) {
@@ -844,8 +831,9 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
big_oops_buf_sz = (oops_data_sz * 100) / 45;
big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (big_oops_buf) {
- stream.workspace = kmalloc(zlib_deflate_workspacesize(
- WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
+ size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
+ zlib_inflate_workspacesize());
+ stream.workspace = kmalloc(size, GFP_KERNEL);
if (!stream.workspace) {
pr_err("nvram: No memory for compression workspace; "
"skipping compression of %s partition data\n",
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 22f75b504f7f..8a4cae78f03c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -118,6 +118,7 @@ config S390
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZ4
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
select HAVE_KERNEL_XZ
@@ -227,11 +228,12 @@ config MARCH_Z196
not work on older machines.
config MARCH_ZEC12
- bool "IBM zEC12"
+ bool "IBM zBC12 and zEC12"
select HAVE_MARCH_ZEC12_FEATURES if 64BIT
help
- Select this to enable optimizations for IBM zEC12 (2827 series). The
- kernel will be slightly faster but will not work on older machines.
+ Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
+ 2827 series). The kernel will be slightly faster but will not work on
+ older machines.
endchoice
@@ -709,6 +711,7 @@ config S390_GUEST
def_bool y
prompt "s390 support for virtio devices"
depends on 64BIT
+ select TTY
select VIRTUALIZATION
select VIRTIO
select VIRTIO_CONSOLE
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 3ad8f61c9985..866ecbe670e4 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -6,9 +6,9 @@
BITS := $(if $(CONFIG_64BIT),64,31)
-targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
- vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o \
- sizes.h head$(BITS).o
+targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
+targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
+targets += misc.o piggy.o sizes.h head$(BITS).o
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -48,6 +48,7 @@ vmlinux.bin.all-y := $(obj)/vmlinux.bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
+suffix-$(CONFIG_KERNEL_LZ4) := lz4
suffix-$(CONFIG_KERNEL_LZMA) := lzma
suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_XZ) := xz
@@ -56,6 +57,8 @@ $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
$(call if_changed,gzip)
$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
$(call if_changed,bzip2)
+$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
+ $(call if_changed,lz4)
$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
$(call if_changed,lzma)
$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index c4c6a1cf221b..57cbaff1f397 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -47,6 +47,10 @@ static unsigned long free_mem_end_ptr;
#include "../../../../lib/decompress_bunzip2.c"
#endif
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 4d8604e311f3..7d4676758733 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -693,7 +693,7 @@ static inline int find_next_bit_left(const unsigned long *addr,
size -= offset;
p = addr + offset / BITS_PER_LONG;
if (bit) {
- set = __flo_word(0, *p & (~0UL << bit));
+ set = __flo_word(0, *p & (~0UL >> bit));
if (set >= size)
return size + offset;
if (set < BITS_PER_LONG)
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b75d7d686684..6d6d92b4ea11 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -32,6 +32,7 @@ struct mmu_gather {
struct mm_struct *mm;
struct mmu_table_batch *batch;
unsigned int fullmm;
+ unsigned long start, end;
};
struct mmu_table_batch {
@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
static inline void tlb_gather_mmu(struct mmu_gather *tlb,
struct mm_struct *mm,
- unsigned int full_mm_flush)
+ unsigned long start,
+ unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->start = start;
+ tlb->end = end;
+ tlb->fullmm = !(start | (end+1));
tlb->batch = NULL;
if (tlb->fullmm)
__tlb_flush_mm(mm);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index a6fc037671b1..500aa1029bcb 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -52,12 +52,13 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
static bool is_in_guest(struct pt_regs *regs)
{
- unsigned long ip = instruction_pointer(regs);
-
if (user_mode(regs))
return false;
-
- return ip == (unsigned long) &sie_exit;
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+ return instruction_pointer(regs) == (unsigned long) &sie_exit;
+#else
+ return false;
+#endif
}
static unsigned long guest_is_user_mode(struct pt_regs *regs)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 497451ec5e26..aeed8a61fa0d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -994,6 +994,7 @@ static void __init setup_hwcaps(void)
strcpy(elf_platform, "z196");
break;
case 0x2827:
+ case 0x2828:
strcpy(elf_platform, "zEC12");
break;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ba694d2ba51e..34c1c9a90be2 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -702,14 +702,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
vcpu->arch.sie_block->icptcode = 0;
- preempt_disable();
- kvm_guest_enter();
- preempt_enable();
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
trace_kvm_s390_sie_enter(vcpu,
atomic_read(&vcpu->arch.sie_block->cpuflags));
+
+ /*
+ * As PF_VCPU will be used in fault handler, between guest_enter
+ * and guest_exit should be no uaccess.
+ */
+ preempt_disable();
+ kvm_guest_enter();
+ preempt_enable();
rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
+ kvm_guest_exit();
+
+ VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
+ vcpu->arch.sie_block->icptcode);
+ trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
+
if (rc > 0)
rc = 0;
if (rc < 0) {
@@ -721,10 +732,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
}
- VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
- vcpu->arch.sie_block->icptcode);
- trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
- kvm_guest_exit();
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
return rc;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 0da3e6eb6be6..4cdc54e63ebc 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/compat.h>
#include <asm/asm-offsets.h>
+#include <asm/facility.h>
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
@@ -532,8 +533,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* Only provide non-quiescing support if the host supports it */
- if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
- S390_lowcore.stfl_fac_list & 0x00020000)
+ if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* No support for conditional-SSKE */
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ce36ea80e4f9..ad446b0c55b6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -69,6 +69,7 @@ static void __init setup_zero_pages(void)
order = 2;
break;
case 0x2827: /* zEC12 */
+ case 0x2828: /* zEC12 */
default:
order = 5;
break;
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index ffeb17ce7f31..930783d2c99b 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -440,7 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
switch (id.machine) {
case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
- case 0x2827: ops->cpu_type = "s390/zEC12"; break;
+ case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
default: return -ENODEV;
}
}
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index c8def8bc9020..5fc237581caf 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT
source "init/Kconfig"
+source "kernel/Kconfig.freezer"
+
config MMU
def_bool y
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 2051821724c6..0cf4097b71e8 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -22,7 +22,7 @@ CONFIG_PREEMPT=y
CONFIG_CMDLINE_OVERWRITE=y
CONFIG_CMDLINE="console=ttySC1,115200 mem=64M root=/dev/nfs"
CONFIG_PCI=y
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index e61d43d9f689..362192ed12fe 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->start = start;
+ tlb->end = end;
+ tlb->fullmm = !(start | (end+1));
init_tlb_gather(tlb);
}
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 4febacd1a8a1..29b0301c18aa 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->start = start;
+ tlb->end = end;
+ tlb->fullmm = !(start | (end+1));
init_tlb_gather(tlb);
}
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index d606463aa6d6..b7388a425f09 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -225,7 +225,7 @@ static void low_free(unsigned long size, unsigned long addr)
unsigned long nr_pages;
nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- efi_call_phys2(sys_table->boottime->free_pages, addr, size);
+ efi_call_phys2(sys_table->boottime->free_pages, addr, nr_pages);
}
static void find_bits(unsigned long mask, u8 *pos, u8 *size)
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 653668d140f9..4a8cb8d7cbd5 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params)
*/
if (boot_params->sentinel) {
/* fields in boot_params are left uninitialized, clear them */
- memset(&boot_params->olpc_ofw_header, 0,
+ memset(&boot_params->ext_ramdisk_image, 0,
(char *)&boot_params->efi_info -
- (char *)&boot_params->olpc_ofw_header);
+ (char *)&boot_params->ext_ramdisk_image);
memset(&boot_params->kbd_status, 0,
(char *)&boot_params->hdr -
(char *)&boot_params->kbd_status);
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index 50e5c58ced23..4c019179a57d 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
extern int apply_microcode_amd(int cpu);
-extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size);
+extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
#ifdef CONFIG_MICROCODE_AMD_EARLY
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index f2b489cf1602..3bf2dd0cf61f 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif
+#ifdef CONFIG_MEM_SOFT_DIRTY
+
+/*
+ * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and
+ * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset
+ * into this range.
+ */
+#define PTE_FILE_MAX_BITS 28
+#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
+#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
+#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
+#define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1)
+#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
+#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
+#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
+
+#define pte_to_pgoff(pte) \
+ ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \
+ & ((1U << PTE_FILE_BITS1) - 1))) \
+ + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \
+ & ((1U << PTE_FILE_BITS2) - 1)) \
+ << (PTE_FILE_BITS1)) \
+ + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \
+ & ((1U << PTE_FILE_BITS3) - 1)) \
+ << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
+ + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \
+ << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
+
+#define pgoff_to_pte(off) \
+ ((pte_t) { .pte_low = \
+ ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
+ + ((((off) >> PTE_FILE_BITS1) \
+ & ((1U << PTE_FILE_BITS2) - 1)) \
+ << PTE_FILE_SHIFT2) \
+ + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
+ & ((1U << PTE_FILE_BITS3) - 1)) \
+ << PTE_FILE_SHIFT3) \
+ + ((((off) >> \
+ (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \
+ << PTE_FILE_SHIFT4) \
+ + _PAGE_FILE })
+
+#else /* CONFIG_MEM_SOFT_DIRTY */
+
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
- * split up the 29 bits of offset into this range:
+ * split up the 29 bits of offset into this range.
*/
#define PTE_FILE_MAX_BITS 29
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
<< PTE_FILE_SHIFT3) \
+ _PAGE_FILE })
+#endif /* CONFIG_MEM_SOFT_DIRTY */
+
/* Encode and de-code a swap entry */
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 4cc9f2b7cdc3..81bb91b49a88 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
/*
* Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part.
+ *
+ * For soft-dirty tracking 11 bit is taken from
+ * the low part of pte as well.
*/
#define pte_to_pgoff(pte) ((pte).pte_high)
#define pgoff_to_pte(off) \
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 7dc305a46058..1c00631164c2 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -314,6 +314,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
}
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_SOFT_DIRTY;
+}
+
/*
* Mask out unsupported bits in a present pgprot. Non-present pgprots
* can use those bits for other purposes, so leave them be.
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index c98ac63aae48..f4843e031131 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -61,12 +61,27 @@
* they do not conflict with each other.
*/
+#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN
+
#ifdef CONFIG_MEM_SOFT_DIRTY
-#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
#else
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
#endif
+/*
+ * Tracking soft dirty bit when a page goes to a swap is tricky.
+ * We need a bit which can be stored in pte _and_ not conflict
+ * with swap entry format. On x86 bits 6 and 7 are *not* involved
+ * into swap entry computation, but bit 6 is used for nonlinear
+ * file mapping, so we borrow bit 7 for soft dirty tracking.
+ */
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
+#else
+#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
+#endif
+
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#else
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 33692eaabab5..e3ddd7db723f 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
-/* The {read|write|spin}_lock() on x86 are full memory barriers. */
-static inline void smp_mb__after_lock(void) { }
-#define ARCH_HAS_SMP_MB_AFTER_LOCK
-
#endif /* _ASM_X86_SPINLOCK_H */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f654ecefea5b..08a089043ccf 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
static const int amd_erratum_383[];
static const int amd_erratum_400[];
-static bool cpu_has_amd_erratum(const int *erratum);
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
static void init_amd(struct cpuinfo_x86 *c)
{
@@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c)
value &= ~(1ULL << 24);
wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
- if (cpu_has_amd_erratum(amd_erratum_383))
+ if (cpu_has_amd_erratum(c, amd_erratum_383))
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
}
- if (cpu_has_amd_erratum(amd_erratum_400))
+ if (cpu_has_amd_erratum(c, amd_erratum_400))
set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
@@ -878,23 +878,13 @@ static const int amd_erratum_400[] =
static const int amd_erratum_383[] =
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
-static bool cpu_has_amd_erratum(const int *erratum)
+
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
- struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
int osvw_id = *erratum++;
u32 range;
u32 ms;
- /*
- * If called early enough that current_cpu_data hasn't been initialized
- * yet, fall back to boot_cpu_data.
- */
- if (cpu->x86 == 0)
- cpu = &boot_cpu_data;
-
- if (cpu->x86_vendor != X86_VENDOR_AMD)
- return false;
-
if (osvw_id >= 0 && osvw_id < 65536 &&
cpu_has(cpu, X86_FEATURE_OSVW)) {
u64 osvw_len;
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index e2703520d120..c370e1c4468b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -111,8 +111,8 @@ static struct severity {
#ifdef CONFIG_MEMORY_FAILURE
MCESEV(
KEEP, "Action required but unaffected thread is continuable",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR),
- MCGMASK(MCG_STATUS_RIPV, MCG_STATUS_RIPV)
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR, MCI_UC_SAR|MCI_ADDR),
+ MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, MCG_STATUS_RIPV)
),
MCESEV(
AR, "Action required: data load error in a user process",
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index fbc9210b45bc..a45d8d4ace10 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2270,6 +2270,7 @@ __init int intel_pmu_init(void)
case 70:
case 71:
case 63:
+ case 69:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index cad791dbde95..1fb6c72717bd 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = {
static struct uncore_event_desc snbep_uncore_qpi_events[] = {
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
- INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
- INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
+ INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
+ INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
{ /* end: all zeroes */ },
};
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 94ab6b90dd3f..63bdb29b2549 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -196,15 +196,23 @@ static void __init ati_bugs_contd(int num, int slot, int func)
static void __init intel_remapping_check(int num, int slot, int func)
{
u8 revision;
+ u16 device;
+ device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
/*
- * Revision 0x13 of this chipset supports irq remapping
- * but has an erratum that breaks its behavior, flag it as such
+ * Revision 13 of all triggering devices id in this quirk have
+ * a problem draining interrupts when irq remapping is enabled,
+ * and should be flagged as broken. Additionally revisions 0x12
+ * and 0x22 of device id 0x3405 has this problem.
*/
if (revision == 0x13)
set_irq_remapping_broken();
+ else if ((device == 0x3405) &&
+ ((revision == 0x12) ||
+ (revision == 0x22)))
+ set_irq_remapping_broken();
}
@@ -239,6 +247,8 @@ static struct chipset early_qrk[] __initdata = {
PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
{ PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
+ { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST,
+ PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{}
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 202d24f0f7e7..5d576ab34403 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -116,7 +116,7 @@ static void mxcsr_feature_mask_init(void)
if (cpu_has_fxsr) {
memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
- asm volatile("fxsave %0" : : "m" (fx_scratch));
+ asm volatile("fxsave %0" : "+m" (fx_scratch));
mask = fx_scratch.mxcsr_mask;
if (mask == 0)
mask = 0x0000ffbf;
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 47ebb1dbfbcb..7123b5df479d 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
return 0;
}
-static unsigned int verify_patch_size(int cpu, u32 patch_size,
+static unsigned int verify_patch_size(u8 family, u32 patch_size,
unsigned int size)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 max_size;
#define F1XH_MPB_MAX_SIZE 2048
@@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
#define F15H_MPB_MAX_SIZE 4096
#define F16H_MPB_MAX_SIZE 3458
- switch (c->x86) {
+ switch (family) {
case 0x14:
max_size = F14H_MPB_MAX_SIZE;
break;
@@ -220,12 +219,13 @@ int apply_microcode_amd(int cpu)
return 0;
}
- if (__apply_microcode_amd(mc_amd))
+ if (__apply_microcode_amd(mc_amd)) {
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
cpu, mc_amd->hdr.patch_id);
- else
- pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
- mc_amd->hdr.patch_id);
+ return -1;
+ }
+ pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
+ mc_amd->hdr.patch_id);
uci->cpu_sig.rev = mc_amd->hdr.patch_id;
c->microcode = mc_amd->hdr.patch_id;
@@ -276,9 +276,8 @@ static void cleanup(void)
* driver cannot continue functioning normally. In such cases, we tear
* down everything we've used up so far and exit.
*/
-static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
+static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
struct microcode_header_amd *mc_hdr;
struct ucode_patch *patch;
unsigned int patch_size, crnt_size, ret;
@@ -298,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
/* check if patch is for the current family */
proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
- if (proc_fam != c->x86)
+ if (proc_fam != family)
return crnt_size;
if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
@@ -307,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
return crnt_size;
}
- ret = verify_patch_size(cpu, patch_size, leftover);
+ ret = verify_patch_size(family, patch_size, leftover);
if (!ret) {
pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
return crnt_size;
@@ -338,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
return crnt_size;
}
-static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size)
+static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
+ size_t size)
{
enum ucode_state ret = UCODE_ERROR;
unsigned int leftover;
@@ -361,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
}
while (leftover) {
- crnt_size = verify_and_add_patch(cpu, fw, leftover);
+ crnt_size = verify_and_add_patch(family, fw, leftover);
if (crnt_size < 0)
return ret;
@@ -372,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
return UCODE_OK;
}
-enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size)
+enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
{
enum ucode_state ret;
/* free old equiv table */
free_equiv_cpu_table();
- ret = __load_microcode_amd(cpu, data, size);
+ ret = __load_microcode_amd(family, data, size);
if (ret != UCODE_OK)
cleanup();
#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
/* save BSP's matching patch for early load */
- if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
- struct ucode_patch *p = find_patch(cpu);
+ if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
+ struct ucode_patch *p = find_patch(smp_processor_id());
if (p) {
memset(amd_bsp_mpb, 0, MPB_MAX_SIZE);
memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data),
@@ -440,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
goto fw_release;
}
- ret = load_microcode_amd(cpu, fw->data, fw->size);
+ ret = load_microcode_amd(c->x86, fw->data, fw->size);
fw_release:
release_firmware(fw);
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c
index 1d14ffee5749..6073104ccaa3 100644
--- a/arch/x86/kernel/microcode_amd_early.c
+++ b/arch/x86/kernel/microcode_amd_early.c
@@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
uci->cpu_sig.sig = cpuid_eax(0x00000001);
}
#else
-static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
- struct ucode_cpu_info *uci)
+void load_ucode_amd_ap(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
u32 rev, eax;
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
eax = cpuid_eax(0x00000001);
- uci->cpu_sig.sig = eax;
uci->cpu_sig.rev = rev;
- c->microcode = rev;
- c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
-}
-
-void load_ucode_amd_ap(void)
-{
- unsigned int cpu = smp_processor_id();
-
- collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu);
+ uci->cpu_sig.sig = eax;
if (cpu && !ucode_loaded) {
void *ucode;
@@ -265,8 +257,10 @@ void load_ucode_amd_ap(void)
return;
ucode = (void *)(initrd_start + ucode_offset);
- if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK)
+ eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+ if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK)
return;
+
ucode_loaded = true;
}
@@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void)
{
enum ucode_state ret;
void *ucode;
+ u32 eax;
+
#ifdef CONFIG_X86_32
unsigned int bsp = boot_cpu_data.cpu_index;
struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
@@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void)
return 0;
ucode = (void *)(initrd_start + ucode_offset);
- ret = load_microcode_amd(0, ucode, ucode_size);
+ eax = cpuid_eax(0x00000001);
+ eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+
+ ret = load_microcode_amd(eax, ucode, ucode_size);
if (ret != UCODE_OK)
return -EINVAL;
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index dbded5aedb81..30277e27431a 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
*begin = new_begin;
}
} else {
- *begin = TASK_UNMAPPED_BASE;
+ *begin = current->mm->mmap_legacy_base;
*end = TASK_SIZE;
}
}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 62c29a5bfe26..25e7e1372bb2 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -112,11 +112,13 @@ static unsigned long mmap_legacy_base(void)
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
+ mm->mmap_legacy_base = mmap_legacy_base();
+ mm->mmap_base = mmap_base();
+
if (mmap_is_legacy()) {
- mm->mmap_base = mmap_legacy_base();
+ mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
- mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index 643b8b5eee86..8244f5ec2f4c 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/reboot.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
#include <linux/reboot.h>
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 056d11faef21..8f3eea6b80c5 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
e820_add_region(start, end - start, type);
}
+void xen_ignore_unusable(struct e820entry *list, size_t map_size)
+{
+ struct e820entry *entry;
+ unsigned int i;
+
+ for (i = 0, entry = list; i < map_size; i++, entry++) {
+ if (entry->type == E820_UNUSABLE)
+ entry->type = E820_RAM;
+ }
+}
+
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
@@ -353,6 +364,17 @@ char * __init xen_memory_setup(void)
}
BUG_ON(rc);
+ /*
+ * Xen won't allow a 1:1 mapping to be created to UNUSABLE
+ * regions, so if we're using the machine memory map leave the
+ * region as RAM as it is in the pseudo-physical map.
+ *
+ * UNUSABLE regions in domUs are not handled and will need
+ * a patch in the future.
+ */
+ if (xen_initial_domain())
+ xen_ignore_unusable(map, memmap.nr_entries);
+
/* Make sure the Xen-supplied memory map is well-ordered. */
sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index ca92754eb846..b81c88e51daa 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc;
- rc = native_cpu_up(cpu, tidle);
- WARN_ON (xen_smp_intr_init(cpu));
+ /*
+ * xen_smp_intr_init() needs to run before native_cpu_up()
+ * so that IPI vectors are set up on the booting CPU before
+ * it is marked online in native_cpu_up().
+ */
+ rc = xen_smp_intr_init(cpu);
+ WARN_ON(rc);
+ if (!rc)
+ rc = native_cpu_up(cpu, tidle);
return rc;
}
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index d21167bfc865..dc34a5b8bcee 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -359,6 +359,9 @@ int braille_register_console(struct console *console, int index,
char *console_options, char *braille_options)
{
int ret;
+
+ if (!(console->flags & CON_BRL))
+ return 0;
if (!console_options)
/* Only support VisioBraille for now */
console_options = "57600o8";
@@ -374,15 +377,17 @@ int braille_register_console(struct console *console, int index,
braille_co = console;
register_keyboard_notifier(&keyboard_notifier_block);
register_vt_notifier(&vt_notifier_block);
- return 0;
+ return 1;
}
int braille_unregister_console(struct console *console)
{
if (braille_co != console)
return -EINVAL;
+ if (!(console->flags & CON_BRL))
+ return 0;
unregister_keyboard_notifier(&keyboard_notifier_block);
unregister_vt_notifier(&vt_notifier_block);
braille_co = NULL;
- return 0;
+ return 1;
}
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index fd6c51cc3acb..5a74a9c1e42c 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -451,7 +451,6 @@ static void acpi_processor_remove(struct acpi_device *device)
/* Clean up. */
per_cpu(processor_device_array, pr->id) = NULL;
per_cpu(processors, pr->id) = NULL;
- try_offline_node(cpu_to_node(pr->id));
/* Remove the CPU. */
get_online_cpus();
@@ -459,6 +458,8 @@ static void acpi_processor_remove(struct acpi_device *device)
acpi_unmap_lsapic(pr->id);
put_online_cpus();
+ try_offline_node(cpu_to_node(pr->id));
+
out:
free_cpumask_var(pr->throttling.shared_cpu_map);
kfree(pr);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 082b4dd252a8..d405fbad406a 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -117,6 +117,7 @@ struct acpi_battery {
struct acpi_device *device;
struct notifier_block pm_nb;
unsigned long update_time;
+ int revision;
int rate_now;
int capacity_now;
int voltage_now;
@@ -359,6 +360,7 @@ static struct acpi_offsets info_offsets[] = {
};
static struct acpi_offsets extended_info_offsets[] = {
+ {offsetof(struct acpi_battery, revision), 0},
{offsetof(struct acpi_battery, power_unit), 0},
{offsetof(struct acpi_battery, design_capacity), 0},
{offsetof(struct acpi_battery, full_charge_capacity), 0},
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index f68095756fb7..408f6b2a5fa8 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -31,6 +31,7 @@ static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
#define PHYSICAL_NODE_STRING "physical_node"
+#define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10)
int register_acpi_bus_type(struct acpi_bus_type *type)
{
@@ -78,41 +79,108 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
return ret;
}
-static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
- void *addr_p, void **ret_p)
+static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **ret_p)
{
- unsigned long long addr, sta;
- acpi_status status;
+ struct acpi_device *adev = NULL;
- status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
- if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
+ acpi_bus_get_device(handle, &adev);
+ if (adev) {
*ret_p = handle;
- status = acpi_bus_get_status_handle(handle, &sta);
- if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED))
- return AE_CTRL_TERMINATE;
+ return AE_CTRL_TERMINATE;
}
return AE_OK;
}
-acpi_handle acpi_get_child(acpi_handle parent, u64 address)
+static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
{
- void *ret = NULL;
+ unsigned long long sta;
+ acpi_status status;
+
+ status = acpi_bus_get_status_handle(handle, &sta);
+ if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
+ return false;
+
+ if (is_bridge) {
+ void *test = NULL;
+
+ /* Check if this object has at least one child device. */
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_dev_present, NULL, NULL, &test);
+ return !!test;
+ }
+ return true;
+}
+
+struct find_child_context {
+ u64 addr;
+ bool is_bridge;
+ acpi_handle ret;
+ bool ret_checked;
+};
+
+static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
+ void *data, void **not_used)
+{
+ struct find_child_context *context = data;
+ unsigned long long addr;
+ acpi_status status;
- if (!parent)
- return NULL;
+ status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
+ if (ACPI_FAILURE(status) || addr != context->addr)
+ return AE_OK;
- acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
- do_acpi_find_child, &address, &ret);
- return (acpi_handle)ret;
+ if (!context->ret) {
+ /* This is the first matching object. Save its handle. */
+ context->ret = handle;
+ return AE_OK;
+ }
+ /*
+ * There is more than one matching object with the same _ADR value.
+ * That really is unexpected, so we are kind of beyond the scope of the
+ * spec here. We have to choose which one to return, though.
+ *
+ * First, check if the previously found object is good enough and return
+ * its handle if so. Second, check the same for the object that we've
+ * just found.
+ */
+ if (!context->ret_checked) {
+ if (acpi_extra_checks_passed(context->ret, context->is_bridge))
+ return AE_CTRL_TERMINATE;
+ else
+ context->ret_checked = true;
+ }
+ if (acpi_extra_checks_passed(handle, context->is_bridge)) {
+ context->ret = handle;
+ return AE_CTRL_TERMINATE;
+ }
+ return AE_OK;
}
-EXPORT_SYMBOL(acpi_get_child);
+
+acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
+{
+ if (parent) {
+ struct find_child_context context = {
+ .addr = addr,
+ .is_bridge = is_bridge,
+ };
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
+ NULL, &context, NULL);
+ return context.ret;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_find_child);
int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
acpi_status status;
struct acpi_device_physical_node *physical_node, *pn;
- char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
+ char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
+ struct list_head *physnode_list;
+ unsigned int node_id;
int retval = -EINVAL;
if (ACPI_HANDLE(dev)) {
@@ -139,25 +207,27 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
mutex_lock(&acpi_dev->physical_node_lock);
- /* Sanity check. */
- list_for_each_entry(pn, &acpi_dev->physical_node_list, node)
+ /*
+ * Keep the list sorted by node_id so that the IDs of removed nodes can
+ * be recycled easily.
+ */
+ physnode_list = &acpi_dev->physical_node_list;
+ node_id = 0;
+ list_for_each_entry(pn, &acpi_dev->physical_node_list, node) {
+ /* Sanity check. */
if (pn->dev == dev) {
dev_warn(dev, "Already associated with ACPI node\n");
goto err_free;
}
-
- /* allocate physical node id according to physical_node_id_bitmap */
- physical_node->node_id =
- find_first_zero_bit(acpi_dev->physical_node_id_bitmap,
- ACPI_MAX_PHYSICAL_NODE);
- if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
- retval = -ENOSPC;
- goto err_free;
+ if (pn->node_id == node_id) {
+ physnode_list = &pn->node;
+ node_id++;
+ }
}
- set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap);
+ physical_node->node_id = node_id;
physical_node->dev = dev;
- list_add_tail(&physical_node->node, &acpi_dev->physical_node_list);
+ list_add(&physical_node->node, physnode_list);
acpi_dev->physical_node_count++;
mutex_unlock(&acpi_dev->physical_node_lock);
@@ -208,7 +278,7 @@ int acpi_unbind_one(struct device *dev)
mutex_lock(&acpi_dev->physical_node_lock);
list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
- char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
+ char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
entry = list_entry(node, struct acpi_device_physical_node,
node);
@@ -216,7 +286,6 @@ int acpi_unbind_one(struct device *dev)
continue;
list_del(node);
- clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap);
acpi_dev->physical_node_count--;
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index aa1227a7e3f2..04a13784dd20 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -311,6 +311,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
dev->pnp.bus_id,
(u32) dev->wakeup.sleep_state);
+ mutex_lock(&dev->physical_node_lock);
+
if (!dev->physical_node_count) {
seq_printf(seq, "%c%-8s\n",
dev->wakeup.flags.run_wake ? '*' : ' ',
@@ -338,6 +340,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
put_device(ldev);
}
}
+
+ mutex_unlock(&dev->physical_node_lock);
}
mutex_unlock(&acpi_device_lock);
return 0;
@@ -347,12 +351,16 @@ static void physical_device_enable_wakeup(struct acpi_device *adev)
{
struct acpi_device_physical_node *entry;
+ mutex_lock(&adev->physical_node_lock);
+
list_for_each_entry(entry,
&adev->physical_node_list, node)
if (entry->dev && device_can_wakeup(entry->dev)) {
bool enable = !device_may_wakeup(entry->dev);
device_set_wakeup_enable(entry->dev, enable);
}
+
+ mutex_unlock(&adev->physical_node_lock);
}
static ssize_t
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 0ec434d2586d..3270d3c8ba4e 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -689,7 +689,7 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
* Some systems always report current brightness level as maximum
* through _BQC, we need to test another value for them.
*/
- test_level = current_level == max_level ? br->levels[2] : max_level;
+ test_level = current_level == max_level ? br->levels[3] : max_level;
result = acpi_video_device_lcd_set_level(device, test_level);
if (result)
@@ -908,9 +908,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
device->cap._DDC = 1;
}
- if (acpi_video_init_brightness(device))
- return;
-
if (acpi_video_backlight_support()) {
struct backlight_properties props;
struct pci_dev *pdev;
@@ -920,6 +917,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
static int count = 0;
char *name;
+ result = acpi_video_init_brightness(device);
+ if (result)
+ return;
name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
@@ -979,11 +979,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (result)
printk(KERN_ERR PREFIX "Create sysfs link\n");
- } else {
- /* Remove the brightness object. */
- kfree(device->brightness->levels);
- kfree(device->brightness);
- device->brightness = NULL;
}
}
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 1c41722bb7e2..20fd337a5731 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
/* Disable sending Early R_OK.
* With "cached read" HDD testing and multiple ports busy on a SATA
- * host controller, 3726 PMP will very rarely drop a deferred
+ * host controller, 3x26 PMP will very rarely drop a deferred
* R_OK that was intended for the host. Symptom will be all
* 5 drives under test will timeout, get reset, and recover.
*/
- if (vendor == 0x1095 && devid == 0x3726) {
+ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
u32 reg;
err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
if (err_mask) {
rc = -EIO;
- reason = "failed to read Sil3726 Private Register";
+ reason = "failed to read Sil3x26 Private Register";
goto fail;
}
reg &= ~0x1;
err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
if (err_mask) {
rc = -EIO;
- reason = "failed to write Sil3726 Private Register";
+ reason = "failed to write Sil3x26 Private Register";
goto fail;
}
}
@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
u16 devid = sata_pmp_gscr_devid(gscr);
struct ata_link *link;
- if (vendor == 0x1095 && devid == 0x3726) {
- /* sil3726 quirks */
+ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
+ /* sil3x26 quirks */
ata_for_each_link(link, ap, EDGE) {
/* link reports offline after LPM */
link->flags |= ATA_LFLAG_NO_LPM;
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 4ec7c04b3f82..26386f0b89a8 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -237,6 +237,7 @@ static const struct of_device_id imx_pata_dt_ids[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 19720a0a4a65..851bd3f43ac6 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
{
struct sata_fsl_host_priv *host_priv = host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
+ unsigned long flags;
if (count > ICC_MAX_INT_COUNT_THRESHOLD)
count = ICC_MAX_INT_COUNT_THRESHOLD;
@@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
(count > ICC_MIN_INT_COUNT_THRESHOLD))
ticks = ICC_SAFE_INT_TICKS;
- spin_lock(&host->lock);
+ spin_lock_irqsave(&host->lock, flags);
iowrite32((count << 24 | ticks), hcr_base + ICC);
intr_coalescing_count = count;
intr_coalescing_ticks = ticks;
- spin_unlock(&host->lock);
+ spin_unlock_irqrestore(&host->lock, flags);
DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
intr_coalescing_count, intr_coalescing_ticks);
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index d047d92a456f..e9a4f46d962e 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -86,11 +86,11 @@ struct ecx_plat_data {
#define SGPIO_SIGNALS 3
#define ECX_ACTIVITY_BITS 0x300000
-#define ECX_ACTIVITY_SHIFT 2
+#define ECX_ACTIVITY_SHIFT 0
#define ECX_LOCATE_BITS 0x80000
#define ECX_LOCATE_SHIFT 1
#define ECX_FAULT_BITS 0x400000
-#define ECX_FAULT_SHIFT 0
+#define ECX_FAULT_SHIFT 2
static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
u32 shift)
{
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index e69102696533..3455f833e473 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -719,7 +719,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
}
}
- return regcache_sync_block_raw_flush(map, &data, base, regtmp);
+ return regcache_sync_block_raw_flush(map, &data, base, regtmp +
+ map->reg_stride);
}
int regcache_sync_block(struct regmap *map, void *block,
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 99cb944a002d..4d45dba7fb8f 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -906,16 +906,10 @@ bio_pageinc(struct bio *bio)
int i;
bio_for_each_segment(bv, bio, i) {
- page = bv->bv_page;
/* Non-zero page count for non-head members of
- * compound pages is no longer allowed by the kernel,
- * but this has never been seen here.
+ * compound pages is no longer allowed by the kernel.
*/
- if (unlikely(PageCompound(page)))
- if (compound_trans_head(page) != page) {
- pr_crit("page tail used for block I/O\n");
- BUG();
- }
+ page = compound_trans_head(bv->bv_page);
atomic_inc(&page->_count);
}
}
@@ -924,10 +918,13 @@ static void
bio_pagedec(struct bio *bio)
{
struct bio_vec *bv;
+ struct page *page;
int i;
- bio_for_each_segment(bv, bio, i)
- atomic_dec(&bv->bv_page->_count);
+ bio_for_each_segment(bv, bio, i) {
+ page = compound_trans_head(bv->bv_page);
+ atomic_dec(&page->_count);
+ }
}
static void
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 11f467c00d0a..a12b923bbaca 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -91,6 +91,10 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0489, 0xe04e) },
{ USB_DEVICE(0x0489, 0xe056) },
{ USB_DEVICE(0x0489, 0xe04d) },
+ { USB_DEVICE(0x04c5, 0x1330) },
+ { USB_DEVICE(0x13d3, 0x3402) },
+ { USB_DEVICE(0x0cf3, 0x3121) },
+ { USB_DEVICE(0x0cf3, 0xe003) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
@@ -128,6 +132,10 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
@@ -193,24 +201,44 @@ error:
static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
{
- int pipe = 0;
+ int ret, pipe = 0;
+ char *buf;
+
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
pipe = usb_rcvctrlpipe(udev, 0);
- return usb_control_msg(udev, pipe, ATH3K_GETSTATE,
- USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
- state, 0x01, USB_CTRL_SET_TIMEOUT);
+ ret = usb_control_msg(udev, pipe, ATH3K_GETSTATE,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+ buf, sizeof(*buf), USB_CTRL_SET_TIMEOUT);
+
+ *state = *buf;
+ kfree(buf);
+
+ return ret;
}
static int ath3k_get_version(struct usb_device *udev,
struct ath3k_version *version)
{
- int pipe = 0;
+ int ret, pipe = 0;
+ struct ath3k_version *buf;
+ const int size = sizeof(*buf);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
pipe = usb_rcvctrlpipe(udev, 0);
- return usb_control_msg(udev, pipe, ATH3K_GETVERSION,
- USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version,
- sizeof(struct ath3k_version),
- USB_CTRL_SET_TIMEOUT);
+ ret = usb_control_msg(udev, pipe, ATH3K_GETVERSION,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+ buf, size, USB_CTRL_SET_TIMEOUT);
+
+ memcpy(version, buf, size);
+ kfree(buf);
+
+ return ret;
}
static int ath3k_load_fwfile(struct usb_device *udev,
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index de4cf4daa2f4..8e16f0af6358 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -154,6 +154,10 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -1095,7 +1099,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
if (IS_ERR(skb)) {
BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)",
hdev->name, cmd->opcode, PTR_ERR(skb));
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
/* It ensures that the returned event matches the event data read from
@@ -1147,7 +1151,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (IS_ERR(skb)) {
BT_ERR("%s sending initial HCI reset command failed (%ld)",
hdev->name, PTR_ERR(skb));
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
kfree_skb(skb);
@@ -1161,7 +1165,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (IS_ERR(skb)) {
BT_ERR("%s reading Intel fw version command failed (%ld)",
hdev->name, PTR_ERR(skb));
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
if (skb->len != sizeof(*ver)) {
@@ -1219,7 +1223,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
BT_ERR("%s entering Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
release_firmware(fw);
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
if (skb->data[0]) {
@@ -1276,7 +1280,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
kfree_skb(skb);
@@ -1292,7 +1296,7 @@ exit_mfg_disable:
if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
kfree_skb(skb);
@@ -1310,7 +1314,7 @@ exit_mfg_deactivate:
if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
kfree_skb(skb);
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index bf5d2477cb77..15f2e7025b78 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -129,7 +129,8 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
off_t j, io_pg_start;
int io_pg_count;
- if (type != 0 || mem->type != 0) {
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL;
}
@@ -175,7 +176,8 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
struct _parisc_agp_info *info = &parisc_agp_info;
int i, io_pg_start, io_pg_count;
- if (type != 0 || mem->type != 0) {
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 1b456fe9b87a..fc45567ad3ac 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -272,9 +272,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
unsigned long flags;
spin_lock_irqsave(&portdev->ports_lock, flags);
- list_for_each_entry(port, &portdev->ports, list)
- if (port->cdev->dev == dev)
+ list_for_each_entry(port, &portdev->ports, list) {
+ if (port->cdev->dev == dev) {
+ kref_get(&port->kref);
goto out;
+ }
+ }
port = NULL;
out:
spin_unlock_irqrestore(&portdev->ports_lock, flags);
@@ -746,6 +749,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
port = filp->private_data;
+ /* Port is hot-unplugged. */
+ if (!port->guest_connected)
+ return -ENODEV;
+
if (!port_has_data(port)) {
/*
* If nothing's connected on the host just return 0 in
@@ -762,7 +769,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
if (ret < 0)
return ret;
}
- /* Port got hot-unplugged. */
+ /* Port got hot-unplugged while we were waiting above. */
if (!port->guest_connected)
return -ENODEV;
/*
@@ -932,13 +939,25 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
if (is_rproc_serial(port->out_vq->vdev))
return -EINVAL;
+ /*
+ * pipe->nrbufs == 0 means there are no data to transfer,
+ * so this returns just 0 for no data.
+ */
+ pipe_lock(pipe);
+ if (!pipe->nrbufs) {
+ ret = 0;
+ goto error_out;
+ }
+
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
- return ret;
+ goto error_out;
buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ goto error_out;
+ }
sgl.n = 0;
sgl.len = 0;
@@ -946,12 +965,17 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
sgl.sg = buf->sg;
sg_init_table(sgl.sg, sgl.size);
ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
+ pipe_unlock(pipe);
if (likely(ret > 0))
ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
if (unlikely(ret <= 0))
free_buf(buf, true);
return ret;
+
+error_out:
+ pipe_unlock(pipe);
+ return ret;
}
static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
@@ -1019,14 +1043,14 @@ static int port_fops_open(struct inode *inode, struct file *filp)
struct port *port;
int ret;
+ /* We get the port with a kref here */
port = find_port_by_devt(cdev->dev);
+ if (!port) {
+ /* Port was unplugged before we could proceed */
+ return -ENXIO;
+ }
filp->private_data = port;
- /* Prevent against a port getting hot-unplugged at the same time */
- spin_lock_irq(&port->portdev->ports_lock);
- kref_get(&port->kref);
- spin_unlock_irq(&port->portdev->ports_lock);
-
/*
* Don't allow opening of console port devices -- that's done
* via /dev/hvc
@@ -1498,14 +1522,6 @@ static void remove_port(struct kref *kref)
port = container_of(kref, struct port, kref);
- sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
- device_destroy(pdrvdata.class, port->dev->devt);
- cdev_del(port->cdev);
-
- kfree(port->name);
-
- debugfs_remove(port->debugfs_file);
-
kfree(port);
}
@@ -1539,12 +1555,14 @@ static void unplug_port(struct port *port)
spin_unlock_irq(&port->portdev->ports_lock);
if (port->guest_connected) {
+ /* Let the app know the port is going down. */
+ send_sigio_to_port(port);
+
+ /* Do this after sigio is actually sent */
port->guest_connected = false;
port->host_connected = false;
- wake_up_interruptible(&port->waitqueue);
- /* Let the app know the port is going down. */
- send_sigio_to_port(port);
+ wake_up_interruptible(&port->waitqueue);
}
if (is_console_port(port)) {
@@ -1563,6 +1581,14 @@ static void unplug_port(struct port *port)
*/
port->portdev = NULL;
+ sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+ device_destroy(pdrvdata.class, port->dev->devt);
+ cdev_del(port->cdev);
+
+ kfree(port->name);
+
+ debugfs_remove(port->debugfs_file);
+
/*
* Locks around here are not necessary - a port can't be
* opened after we removed the port struct from ports_list
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 1bdb882c845b..4e5739773c33 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -581,11 +581,15 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
- DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3),
- DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3),
+ DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
+ CLK_GET_RATE_NOCACHE, 0),
+ DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
+ CLK_GET_RATE_NOCACHE, 0),
DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
- DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3),
- DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3),
+ DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1,
+ 4, 3, CLK_GET_RATE_NOCACHE, 0),
+ DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
+ 8, 3, CLK_GET_RATE_NOCACHE, 0),
DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
};
@@ -863,57 +867,57 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100",
E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"),
GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
};
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 5c205b60a82a..089d3e30e221 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -71,6 +71,7 @@ static DEFINE_SPINLOCK(armpll_lock);
static DEFINE_SPINLOCK(ddrpll_lock);
static DEFINE_SPINLOCK(iopll_lock);
static DEFINE_SPINLOCK(armclk_lock);
+static DEFINE_SPINLOCK(swdtclk_lock);
static DEFINE_SPINLOCK(ddrclk_lock);
static DEFINE_SPINLOCK(dciclk_lock);
static DEFINE_SPINLOCK(gem0clk_lock);
@@ -293,7 +294,7 @@ static void __init zynq_clk_setup(struct device_node *np)
}
clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt],
swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT,
- SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock);
+ SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock);
/* DDR clocks */
clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
@@ -364,8 +365,9 @@ static void __init zynq_clk_setup(struct device_node *np)
CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem0clk_lock);
- clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0,
- SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock);
+ clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
+ CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0,
+ &gem0clk_lock);
clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0],
"gem0_emio_mux", CLK_SET_RATE_PARENT,
SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock);
@@ -386,8 +388,9 @@ static void __init zynq_clk_setup(struct device_node *np)
CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem1clk_lock);
- clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0,
- SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock);
+ clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
+ CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0,
+ &gem1clk_lock);
clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1],
"gem1_emio_mux", CLK_SET_RATE_PARENT,
SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a4ad7339588d..f0a5e2b0eb8a 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1177,14 +1177,11 @@ static int __cpufreq_remove_dev(struct device *dev,
__func__, cpu_dev->id, cpu);
}
- if ((cpus == 1) && (cpufreq_driver->target))
- __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
-
- pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
- cpufreq_cpu_put(data);
-
/* If cpu is last user of policy, free policy */
if (cpus == 1) {
+ if (cpufreq_driver->target)
+ __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+
lock_policy_rwsem_read(cpu);
kobj = &data->kobj;
cmp = &data->kobj_unregister;
@@ -1205,9 +1202,13 @@ static int __cpufreq_remove_dev(struct device *dev,
free_cpumask_var(data->related_cpus);
free_cpumask_var(data->cpus);
kfree(data);
- } else if (cpufreq_driver->target) {
- __cpufreq_governor(data, CPUFREQ_GOV_START);
- __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+ } else {
+ pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
+ cpufreq_cpu_put(data);
+ if (cpufreq_driver->target) {
+ __cpufreq_governor(data, CPUFREQ_GOV_START);
+ __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+ }
}
per_cpu(cpufreq_policy_cpu, cpu) = -1;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 0ceb2eff5a7e..f97cb3d8c5a2 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -221,8 +221,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
{
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input, j;
@@ -235,10 +235,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
if (input > 1)
input = 1;
- if (input == cs_tuners->ignore_nice) /* nothing to do */
+ if (input == cs_tuners->ignore_nice_load) /* nothing to do */
return count;
- cs_tuners->ignore_nice = input;
+ cs_tuners->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
@@ -246,7 +246,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
dbs_info = &per_cpu(cs_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->cdbs.prev_cpu_wall, 0);
- if (cs_tuners->ignore_nice)
+ if (cs_tuners->ignore_nice_load)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
@@ -279,7 +279,7 @@ show_store_one(cs, sampling_rate);
show_store_one(cs, sampling_down_factor);
show_store_one(cs, up_threshold);
show_store_one(cs, down_threshold);
-show_store_one(cs, ignore_nice);
+show_store_one(cs, ignore_nice_load);
show_store_one(cs, freq_step);
declare_show_sampling_rate_min(cs);
@@ -287,7 +287,7 @@ gov_sys_pol_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(sampling_down_factor);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(down_threshold);
-gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(ignore_nice_load);
gov_sys_pol_attr_rw(freq_step);
gov_sys_pol_attr_ro(sampling_rate_min);
@@ -297,7 +297,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
&sampling_down_factor_gov_sys.attr,
&up_threshold_gov_sys.attr,
&down_threshold_gov_sys.attr,
- &ignore_nice_gov_sys.attr,
+ &ignore_nice_load_gov_sys.attr,
&freq_step_gov_sys.attr,
NULL
};
@@ -313,7 +313,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
&sampling_down_factor_gov_pol.attr,
&up_threshold_gov_pol.attr,
&down_threshold_gov_pol.attr,
- &ignore_nice_gov_pol.attr,
+ &ignore_nice_load_gov_pol.attr,
&freq_step_gov_pol.attr,
NULL
};
@@ -338,7 +338,7 @@ static int cs_init(struct dbs_data *dbs_data)
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
- tuners->ignore_nice = 0;
+ tuners->ignore_nice_load = 0;
tuners->freq_step = DEF_FREQUENCY_STEP;
dbs_data->tuners = tuners;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 7b839a8db2a7..e59afaa9da23 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -47,9 +47,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
unsigned int j;
if (dbs_data->cdata->governor == GOV_ONDEMAND)
- ignore_nice = od_tuners->ignore_nice;
+ ignore_nice = od_tuners->ignore_nice_load;
else
- ignore_nice = cs_tuners->ignore_nice;
+ ignore_nice = cs_tuners->ignore_nice_load;
policy = cdbs->cur_policy;
@@ -298,12 +298,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
cs_tuners = dbs_data->tuners;
cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = cs_tuners->sampling_rate;
- ignore_nice = cs_tuners->ignore_nice;
+ ignore_nice = cs_tuners->ignore_nice_load;
} else {
od_tuners = dbs_data->tuners;
od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = od_tuners->sampling_rate;
- ignore_nice = od_tuners->ignore_nice;
+ ignore_nice = od_tuners->ignore_nice_load;
od_ops = dbs_data->cdata->gov_ops;
io_busy = od_tuners->io_is_busy;
}
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 6663ec3b3056..d5f12b4b11b8 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -165,7 +165,7 @@ struct cs_cpu_dbs_info_s {
/* Per policy Governers sysfs tunables */
struct od_dbs_tuners {
- unsigned int ignore_nice;
+ unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
@@ -175,7 +175,7 @@ struct od_dbs_tuners {
};
struct cs_dbs_tuners {
- unsigned int ignore_nice;
+ unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 93eb5cbcc1f6..c087347d6688 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -403,8 +403,8 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
return count;
}
-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
{
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
@@ -419,10 +419,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
if (input > 1)
input = 1;
- if (input == od_tuners->ignore_nice) { /* nothing to do */
+ if (input == od_tuners->ignore_nice_load) { /* nothing to do */
return count;
}
- od_tuners->ignore_nice = input;
+ od_tuners->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
@@ -430,7 +430,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
- if (od_tuners->ignore_nice)
+ if (od_tuners->ignore_nice_load)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
@@ -461,7 +461,7 @@ show_store_one(od, sampling_rate);
show_store_one(od, io_is_busy);
show_store_one(od, up_threshold);
show_store_one(od, sampling_down_factor);
-show_store_one(od, ignore_nice);
+show_store_one(od, ignore_nice_load);
show_store_one(od, powersave_bias);
declare_show_sampling_rate_min(od);
@@ -469,7 +469,7 @@ gov_sys_pol_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(io_is_busy);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(sampling_down_factor);
-gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(ignore_nice_load);
gov_sys_pol_attr_rw(powersave_bias);
gov_sys_pol_attr_ro(sampling_rate_min);
@@ -478,7 +478,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
&sampling_rate_gov_sys.attr,
&up_threshold_gov_sys.attr,
&sampling_down_factor_gov_sys.attr,
- &ignore_nice_gov_sys.attr,
+ &ignore_nice_load_gov_sys.attr,
&powersave_bias_gov_sys.attr,
&io_is_busy_gov_sys.attr,
NULL
@@ -494,7 +494,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
&sampling_rate_gov_pol.attr,
&up_threshold_gov_pol.attr,
&sampling_down_factor_gov_pol.attr,
- &ignore_nice_gov_pol.attr,
+ &ignore_nice_load_gov_pol.attr,
&powersave_bias_gov_pol.attr,
&io_is_busy_gov_pol.attr,
NULL
@@ -544,7 +544,7 @@ static int od_init(struct dbs_data *dbs_data)
}
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
- tuners->ignore_nice = 0;
+ tuners->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
tuners->io_is_busy = should_io_be_busy();
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index bb838b985077..9536852c504a 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -118,11 +118,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
clk_put(cpuclk);
return -EINVAL;
}
- ret = clk_set_rate(cpuclk, rate);
- if (ret) {
- clk_put(cpuclk);
- return ret;
- }
/* clock table init */
for (i = 2;
@@ -130,6 +125,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
i++)
loongson2_clockmod_table[i].frequency = (rate * i) / 8;
+ ret = clk_set_rate(cpuclk, rate);
+ if (ret) {
+ clk_put(cpuclk);
+ return ret;
+ }
+
policy->cur = loongson2_cpufreq_get(policy->cpu);
cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index fe343a06b7da..bc580b67a652 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -28,13 +28,6 @@
#define MAX_INTERESTING 50000
#define STDDEV_THRESH 400
-/* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */
-#define MAX_DEVIATION 60
-
-static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
-static DEFINE_PER_CPU(int, hrtimer_status);
-/* menu hrtimer mode */
-enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
/*
* Concepts and ideas behind the menu governor
@@ -116,13 +109,6 @@ enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
*
*/
-/*
- * The C-state residency is so long that is is worthwhile to exit
- * from the shallow C-state and re-enter into a deeper C-state.
- */
-static unsigned int perfect_cstate_ms __read_mostly = 30;
-module_param(perfect_cstate_ms, uint, 0000);
-
struct menu_device {
int last_state_idx;
int needs_update;
@@ -205,52 +191,17 @@ static u64 div_round64(u64 dividend, u32 divisor)
return div_u64(dividend + (divisor / 2), divisor);
}
-/* Cancel the hrtimer if it is not triggered yet */
-void menu_hrtimer_cancel(void)
-{
- int cpu = smp_processor_id();
- struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
-
- /* The timer is still not time out*/
- if (per_cpu(hrtimer_status, cpu)) {
- hrtimer_cancel(hrtmr);
- per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
- }
-}
-EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
-
-/* Call back for hrtimer is triggered */
-static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
-{
- int cpu = smp_processor_id();
- struct menu_device *data = &per_cpu(menu_devices, cpu);
-
- /* In general case, the expected residency is much larger than
- * deepest C-state target residency, but prediction logic still
- * predicts a small predicted residency, so the prediction
- * history is totally broken if the timer is triggered.
- * So reset the correction factor.
- */
- if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
- data->correction_factor[data->bucket] = RESOLUTION * DECAY;
-
- per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
-
- return HRTIMER_NORESTART;
-}
-
/*
* Try detecting repeating patterns by keeping track of the last 8
* intervals, and checking if the standard deviation of that set
* of points is below a threshold. If it is... then use the
* average of these 8 points as the estimated value.
*/
-static u32 get_typical_interval(struct menu_device *data)
+static void get_typical_interval(struct menu_device *data)
{
int i = 0, divisor = 0;
uint64_t max = 0, avg = 0, stddev = 0;
int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
- unsigned int ret = 0;
again:
@@ -291,16 +242,13 @@ again:
if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
|| stddev <= 20) {
data->predicted_us = avg;
- ret = 1;
- return ret;
+ return;
} else if ((divisor * 4) > INTERVALS * 3) {
/* Exclude the max interval */
thresh = max - 1;
goto again;
}
-
- return ret;
}
/**
@@ -315,9 +263,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
int i;
int multiplier;
struct timespec t;
- int repeat = 0, low_predicted = 0;
- int cpu = smp_processor_id();
- struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
if (data->needs_update) {
menu_update(drv, dev);
@@ -352,7 +297,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
RESOLUTION * DECAY);
- repeat = get_typical_interval(data);
+ get_typical_interval(data);
/*
* We want to default to C1 (hlt), not to busy polling
@@ -373,10 +318,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (s->disabled || su->disable)
continue;
- if (s->target_residency > data->predicted_us) {
- low_predicted = 1;
+ if (s->target_residency > data->predicted_us)
continue;
- }
if (s->exit_latency > latency_req)
continue;
if (s->exit_latency * multiplier > data->predicted_us)
@@ -386,44 +329,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->exit_us = s->exit_latency;
}
- /* not deepest C-state chosen for low predicted residency */
- if (low_predicted) {
- unsigned int timer_us = 0;
- unsigned int perfect_us = 0;
-
- /*
- * Set a timer to detect whether this sleep is much
- * longer than repeat mode predicted. If the timer
- * triggers, the code will evaluate whether to put
- * the CPU into a deeper C-state.
- * The timer is cancelled on CPU wakeup.
- */
- timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
-
- perfect_us = perfect_cstate_ms * 1000;
-
- if (repeat && (4 * timer_us < data->expected_us)) {
- RCU_NONIDLE(hrtimer_start(hrtmr,
- ns_to_ktime(1000 * timer_us),
- HRTIMER_MODE_REL_PINNED));
- /* In repeat case, menu hrtimer is started */
- per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
- } else if (perfect_us < data->expected_us) {
- /*
- * The next timer is long. This could be because
- * we did not make a useful prediction.
- * In that case, it makes sense to re-enter
- * into a deeper C-state after some time.
- */
- RCU_NONIDLE(hrtimer_start(hrtmr,
- ns_to_ktime(1000 * timer_us),
- HRTIMER_MODE_REL_PINNED));
- /* In general case, menu hrtimer is started */
- per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
- }
-
- }
-
return data->last_state_idx;
}
@@ -514,9 +419,6 @@ static int menu_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
- struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
- hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- t->function = menu_hrtimer_notify;
memset(data, 0, sizeof(struct menu_device));
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index ce3dc3e9688c..0bbdea5059f3 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -867,6 +867,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Cannot find proper base address\n");
+ err = -ENODEV;
goto err_disable_pdev;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 593827b3fdd4..fa645d825009 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2505,6 +2505,10 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
/* Assign cookies to all nodes */
while (!list_empty(&last->node)) {
desc = list_entry(last->node.next, struct dma_pl330_desc, node);
+ if (pch->cyclic) {
+ desc->txd.callback = last->txd.callback;
+ desc->txd.callback_param = last->txd.callback_param;
+ }
dma_cookie_assign(&desc->txd);
@@ -2688,45 +2692,82 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
{
- struct dma_pl330_desc *desc;
+ struct dma_pl330_desc *desc = NULL, *first = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_dmac *pdmac = pch->dmac;
+ unsigned int i;
dma_addr_t dst;
dma_addr_t src;
- desc = pl330_get_desc(pch);
- if (!desc) {
- dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
- __func__, __LINE__);
+ if (len % period_len != 0)
return NULL;
- }
- switch (direction) {
- case DMA_MEM_TO_DEV:
- desc->rqcfg.src_inc = 1;
- desc->rqcfg.dst_inc = 0;
- desc->req.rqtype = MEMTODEV;
- src = dma_addr;
- dst = pch->fifo_addr;
- break;
- case DMA_DEV_TO_MEM:
- desc->rqcfg.src_inc = 0;
- desc->rqcfg.dst_inc = 1;
- desc->req.rqtype = DEVTOMEM;
- src = pch->fifo_addr;
- dst = dma_addr;
- break;
- default:
+ if (!is_slave_direction(direction)) {
dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
__func__, __LINE__);
return NULL;
}
- desc->rqcfg.brst_size = pch->burst_sz;
- desc->rqcfg.brst_len = 1;
+ for (i = 0; i < len / period_len; i++) {
+ desc = pl330_get_desc(pch);
+ if (!desc) {
+ dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+ __func__, __LINE__);
- pch->cyclic = true;
+ if (!first)
+ return NULL;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ while (!list_empty(&first->node)) {
+ desc = list_entry(first->node.next,
+ struct dma_pl330_desc, node);
+ list_move_tail(&desc->node, &pdmac->desc_pool);
+ }
+
+ list_move_tail(&first->node, &pdmac->desc_pool);
- fill_px(&desc->px, dst, src, period_len);
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+
+ return NULL;
+ }
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ desc->rqcfg.src_inc = 1;
+ desc->rqcfg.dst_inc = 0;
+ desc->req.rqtype = MEMTODEV;
+ src = dma_addr;
+ dst = pch->fifo_addr;
+ break;
+ case DMA_DEV_TO_MEM:
+ desc->rqcfg.src_inc = 0;
+ desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = DEVTOMEM;
+ src = pch->fifo_addr;
+ dst = dma_addr;
+ break;
+ default:
+ break;
+ }
+
+ desc->rqcfg.brst_size = pch->burst_sz;
+ desc->rqcfg.brst_len = 1;
+ fill_px(&desc->px, dst, src, period_len);
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->node);
+
+ dma_addr += period_len;
+ }
+
+ if (!desc)
+ return NULL;
+
+ pch->cyclic = true;
+ desc->txd.flags = flags;
return &desc->txd;
}
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index b67f45f5c271..5039fbc88254 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -400,8 +400,8 @@ static size_t sh_dmae_get_partial(struct shdma_chan *schan,
shdma_chan);
struct sh_dmae_desc *sh_desc = container_of(sdesc,
struct sh_dmae_desc, shdma_desc);
- return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
- sh_chan->xmit_shift;
+ return sh_desc->hw.tcr -
+ (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
}
/* Called from error IRQ or NMI */
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 7ef316fdc4d9..ac1b43a04285 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -54,6 +54,7 @@
#define FW_CDEV_KERNEL_VERSION 5
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
+#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
struct client {
u32 version;
@@ -1005,6 +1006,8 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
a->channel, a->speed, a->header_size, cb, client);
if (IS_ERR(context))
return PTR_ERR(context);
+ if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
+ context->drop_overflow_headers = true;
/* We only support one context at this time. */
spin_lock_irq(&client->lock);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9e1db6490b9a..afb701ec90ca 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2749,8 +2749,11 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
{
u32 *ctx_hdr;
- if (ctx->header_length + ctx->base.header_size > PAGE_SIZE)
+ if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
+ if (ctx->base.drop_overflow_headers)
+ return;
flush_iso_completions(ctx);
+ }
ctx_hdr = ctx->header + ctx->header_length;
ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
@@ -2910,8 +2913,11 @@ static int handle_it_packet(struct context *context,
sync_it_packet_for_cpu(context, d);
- if (ctx->header_length + 4 > PAGE_SIZE)
+ if (ctx->header_length + 4 > PAGE_SIZE) {
+ if (ctx->base.drop_overflow_headers)
+ return 1;
flush_iso_completions(ctx);
+ }
ctx_hdr = ctx->header + ctx->header_length;
ctx->last_timestamp = le16_to_cpu(last->res_count);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index eb760a218da4..232fa8fce26a 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -419,6 +419,13 @@ static void __init dmi_format_ids(char *buf, size_t len)
dmi_get_system_info(DMI_BIOS_DATE));
}
+/*
+ * Check for DMI/SMBIOS headers in the system firmware image. Any
+ * SMBIOS header must start 16 bytes before the DMI header, so take a
+ * 32 byte buffer and check for DMI at offset 16 and SMBIOS at offset
+ * 0. If the DMI header is present, set dmi_ver accordingly (SMBIOS
+ * takes precedence) and return 0. Otherwise return 1.
+ */
static int __init dmi_present(const u8 *buf)
{
int smbios_ver;
@@ -506,6 +513,13 @@ void __init dmi_scan_machine(void)
if (p == NULL)
goto error;
+ /*
+ * Iterate over all possible DMI header addresses q.
+ * Maintain the 32 bytes around q in buf. On the
+ * first iteration, substitute zero for the
+ * out-of-range bytes so there is no chance of falsely
+ * detecting an SMBIOS header.
+ */
memset(buf, 0, 16);
for (q = p; q < p + 0x10000; q += 16) {
memcpy_fromio(buf + 16, q, 16);
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c
index e3ceaacde45c..73b73969d361 100644
--- a/drivers/gpio/gpio-msm-v1.c
+++ b/drivers/gpio/gpio-msm-v1.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/err.h>
#include <mach/msm_gpiomux.h>
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index c57244ef428b..dfeb3a3a8f20 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1037,18 +1037,6 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}
-#if defined(CONFIG_OF_GPIO)
-static inline bool omap_gpio_chip_boot_dt(struct gpio_chip *chip)
-{
- return chip->of_node != NULL;
-}
-#else
-static inline bool omap_gpio_chip_boot_dt(struct gpio_chip *chip)
-{
- return false;
-}
-#endif
-
static void omap_gpio_chip_init(struct gpio_bank *bank)
{
int j;
@@ -1080,68 +1068,24 @@ static void omap_gpio_chip_init(struct gpio_bank *bank)
gpiochip_add(&bank->chip);
- /*
- * REVISIT these explicit calls to irq_create_mapping()
- * to do the GPIO to IRQ domain mapping for each GPIO in
- * the bank can be removed once all OMAP platforms have
- * been migrated to Device Tree boot only.
- * Since in DT boot irq_create_mapping() is called from
- * irq_create_of_mapping() only for the GPIO lines that
- * are used as interrupts.
- */
- if (!omap_gpio_chip_boot_dt(&bank->chip))
- for (j = 0; j < bank->width; j++)
- irq_create_mapping(bank->domain, j);
+ for (j = 0; j < bank->width; j++) {
+ int irq = irq_create_mapping(bank->domain, j);
+ irq_set_lockdep_class(irq, &gpio_lock_class);
+ irq_set_chip_data(irq, bank);
+ if (bank->is_mpuio) {
+ omap_mpuio_alloc_gc(bank, irq, bank->width);
+ } else {
+ irq_set_chip_and_handler(irq, &gpio_irq_chip,
+ handle_simple_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+ }
irq_set_chained_handler(bank->irq, gpio_irq_handler);
irq_set_handler_data(bank->irq, bank);
}
static const struct of_device_id omap_gpio_match[];
-static int omap_gpio_irq_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hwirq)
-{
- struct gpio_bank *bank = d->host_data;
- int gpio;
- int ret;
-
- if (!bank)
- return -EINVAL;
-
- irq_set_lockdep_class(virq, &gpio_lock_class);
- irq_set_chip_data(virq, bank);
- if (bank->is_mpuio) {
- omap_mpuio_alloc_gc(bank, virq, bank->width);
- } else {
- irq_set_chip_and_handler(virq, &gpio_irq_chip,
- handle_simple_irq);
- set_irq_flags(virq, IRQF_VALID);
- }
-
- /*
- * REVISIT most GPIO IRQ chip drivers need to call
- * gpio_request() before a GPIO line can be used as an
- * IRQ. Ideally this should be handled by the IRQ core
- * but until then this has to be done on a per driver
- * basis. Remove this once this is managed by the core.
- */
- if (omap_gpio_chip_boot_dt(&bank->chip)) {
- gpio = irq_to_gpio(bank, hwirq);
- ret = gpio_request_one(gpio, GPIOF_IN, NULL);
- if (ret) {
- dev_err(bank->dev, "Could not request GPIO%d\n", gpio);
- return ret;
- }
- }
-
- return 0;
-}
-
-static struct irq_domain_ops omap_gpio_irq_ops = {
- .xlate = irq_domain_xlate_onetwocell,
- .map = omap_gpio_irq_map,
-};
-
static int omap_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1207,10 +1151,10 @@ static int omap_gpio_probe(struct platform_device *pdev)
}
bank->domain = irq_domain_add_legacy(node, bank->width, irq_base,
- 0, &omap_gpio_irq_ops, bank);
+ 0, &irq_domain_simple_ops, NULL);
#else
bank->domain = irq_domain_add_linear(node, bank->width,
- &omap_gpio_irq_ops, bank);
+ &irq_domain_simple_ops, NULL);
#endif
if (!bank->domain) {
dev_err(dev, "Couldn't register an IRQ domain\n");
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 20fcf4ee3af0..32aecb34dbce 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -324,6 +324,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
}
astbo->bo.bdev = &ast->ttm.bdev;
+ astbo->bo.bdev->dev_mapping = dev->dev_mapping;
ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index ae2385cc71cb..75becdeac07d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -329,6 +329,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
}
cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+ cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a207cc3f2c57..1688ff500513 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -125,6 +125,9 @@ static struct edid_quirk {
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+
+ /* Medion MD 30217 PG */
+ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
};
/*
@@ -2882,6 +2885,58 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
EXPORT_SYMBOL(drm_edid_to_sad);
/**
+ * drm_edid_to_speaker_allocation - extracts Speaker Allocation Data Blocks from EDID
+ * @edid: EDID to parse
+ * @sadb: pointer to the speaker block
+ *
+ * Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
+ * Note: returned pointer needs to be kfreed
+ *
+ * Return number of found Speaker Allocation Blocks or negative number on error.
+ */
+int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
+{
+ int count = 0;
+ int i, start, end, dbl;
+ const u8 *cea;
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
+ return -ENOENT;
+ }
+
+ if (cea_revision(cea) < 3) {
+ DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
+ return -ENOTSUPP;
+ }
+
+ if (cea_db_offsets(cea, &start, &end)) {
+ DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
+ return -EPROTO;
+ }
+
+ for_each_cea_db(cea, i, start, end) {
+ const u8 *db = &cea[i];
+
+ if (cea_db_tag(db) == SPEAKER_BLOCK) {
+ dbl = cea_db_payload_len(db);
+
+ /* Speaker Allocation Data Block */
+ if (dbl == 3) {
+ *sadb = kmalloc(dbl, GFP_KERNEL);
+ memcpy(*sadb, &db[1], dbl);
+ count = dbl;
+ break;
+ }
+ }
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
+
+/**
* drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
* @connector: connector associated with the HDMI/DP sink
* @mode: the display mode
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 8bcce7866d36..f92da0a32f0d 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
- etime = ktime_sub_ns(etime, delta_ns);
+ if (delta_ns < 0)
+ etime = ktime_add_ns(etime, -delta_ns);
+ else
+ etime = ktime_sub_ns(etime, delta_ns);
*vblank_time = ktime_to_timeval(etime);
DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 95c75edef01a..30ef41bcd7b8 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 61b094f689a7..6e047bd53e2f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -12,7 +12,6 @@
*
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 3e106beca5b6..1c263dac3c1c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -14,7 +14,6 @@
#include <drm/drmP.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of_device.h>
@@ -130,7 +129,6 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos5_fimd_driver_data },
{},
};
-MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
#endif
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
@@ -1082,7 +1080,6 @@ static struct platform_device_id fimd_driver_ids[] = {
},
{},
};
-MODULE_DEVICE_TABLE(platform, fimd_driver_ids);
static const struct dev_pm_ops fimd_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 42a5a5466075..eddea4941483 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -8,7 +8,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -806,9 +805,20 @@ static void g2d_dma_start(struct g2d_data *g2d,
struct g2d_cmdlist_node *node =
list_first_entry(&runqueue_node->run_cmdlist,
struct g2d_cmdlist_node, list);
+ int ret;
+
+ ret = pm_runtime_get_sync(g2d->dev);
+ if (ret < 0) {
+ dev_warn(g2d->dev, "failed pm power on.\n");
+ return;
+ }
- pm_runtime_get_sync(g2d->dev);
- clk_enable(g2d->gate_clk);
+ ret = clk_prepare_enable(g2d->gate_clk);
+ if (ret < 0) {
+ dev_warn(g2d->dev, "failed to enable clock.\n");
+ pm_runtime_put_sync(g2d->dev);
+ return;
+ }
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
@@ -861,7 +871,7 @@ static void g2d_runqueue_worker(struct work_struct *work)
runqueue_work);
mutex_lock(&g2d->runqueue_mutex);
- clk_disable(g2d->gate_clk);
+ clk_disable_unprepare(g2d->gate_clk);
pm_runtime_put_sync(g2d->dev);
complete(&g2d->runqueue_node->complete);
@@ -1521,7 +1531,6 @@ static const struct of_device_id exynos_g2d_match[] = {
{ .compatible = "samsung,exynos5250-g2d" },
{},
};
-MODULE_DEVICE_TABLE(of, exynos_g2d_match);
#endif
struct platform_driver g2d_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 472e3b25e7f2..90b8a1a5344c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -12,7 +12,6 @@
*
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index aaa550d622f0..8d3bc01d6834 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/wait.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index b1ef8e7ff9c9..d2b6ab4def93 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -12,7 +12,6 @@
*
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/clk.h>
@@ -342,10 +341,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
*/
ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
prop_list->ipp_id);
- if (!ippdrv) {
+ if (IS_ERR(ippdrv)) {
DRM_ERROR("not found ipp%d driver.\n",
prop_list->ipp_id);
- return -EINVAL;
+ return PTR_ERR(ippdrv);
}
prop_list = ippdrv->prop_list;
@@ -970,9 +969,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
/* find command node */
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
qbuf->prop_id);
- if (!c_node) {
+ if (IS_ERR(c_node)) {
DRM_ERROR("failed to get command node.\n");
- return -EFAULT;
+ return PTR_ERR(c_node);
}
/* buffer control */
@@ -1106,9 +1105,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
cmd_ctrl->prop_id);
- if (!c_node) {
+ if (IS_ERR(c_node)) {
DRM_ERROR("invalid command node list.\n");
- return -EINVAL;
+ return PTR_ERR(c_node);
}
if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 427640aa5148..49669aa24c45 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -10,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 41cc74d83e4e..c57c56519add 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -13,7 +13,6 @@
#include <drm/drmP.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 62ef5971ac3c..2f5c6942c968 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index ef04255076c7..6e320ae9afed 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include "exynos_drm_drv.h"
#include "exynos_hdmi.h"
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 42ffb71c63bc..c9a137caea41 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -23,7 +23,6 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 77841a113617..6f01cdf5e125 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
&status))
goto log_fail;
- while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+ while ((status == SDVO_CMD_STATUS_PENDING ||
+ status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) {
udelay(15);
if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
SDVO_I2C_CMD_STATUS,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f22c81d040c0..52a3785a3fdf 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1749,6 +1749,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
+extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_pm_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 56708c64e68f..b6a58f720f9a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -760,6 +760,8 @@
will not assert AGPBUSY# and will only
be delivered when out of C3. */
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1<<9)
+#define INSTPM_SYNC_FLUSH (1<<5)
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
@@ -1939,10 +1941,16 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
-/* HDMI/DP bits are gen4+ */
-#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29)
+/*
+ * HDMI/DP bits are gen4+
+ *
+ * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
+ * Please check the detailed lore in the commit message for for experimental
+ * evidence.
+ */
+#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
-#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27)
+#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
@@ -4528,7 +4536,7 @@
#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
/* legacy values */
#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 10c1db596387..38452d82ac7d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8726,9 +8726,11 @@ check_crtc_state(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
+ enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
- if (encoder->get_config)
+ if (encoder->get_config &&
+ encoder->get_hw_state(encoder, &pipe))
encoder->get_config(encoder, &pipe_config);
}
@@ -10565,6 +10567,8 @@ struct intel_display_error_state {
u32 power_well_driver;
+ int num_transcoders;
+
struct intel_cursor_error_state {
u32 control;
u32 position;
@@ -10573,16 +10577,7 @@ struct intel_display_error_state {
} cursor[I915_MAX_PIPES];
struct intel_pipe_error_state {
- enum transcoder cpu_transcoder;
- u32 conf;
u32 source;
-
- u32 htotal;
- u32 hblank;
- u32 hsync;
- u32 vtotal;
- u32 vblank;
- u32 vsync;
} pipe[I915_MAX_PIPES];
struct intel_plane_error_state {
@@ -10594,6 +10589,19 @@ struct intel_display_error_state {
u32 surface;
u32 tile_offset;
} plane[I915_MAX_PIPES];
+
+ struct intel_transcoder_error_state {
+ enum transcoder cpu_transcoder;
+
+ u32 conf;
+
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ } transcoder[4];
};
struct intel_display_error_state *
@@ -10601,9 +10609,17 @@ intel_display_capture_error_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
- enum transcoder cpu_transcoder;
+ int transcoders[] = {
+ TRANSCODER_A,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP,
+ };
int i;
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return NULL;
+
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
@@ -10612,9 +10628,6 @@ intel_display_capture_error_state(struct drm_device *dev)
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
for_each_pipe(i) {
- cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
- error->pipe[i].cpu_transcoder = cpu_transcoder;
-
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10638,14 +10651,25 @@ intel_display_capture_error_state(struct drm_device *dev)
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
- error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->pipe[i].source = I915_READ(PIPESRC(i));
- error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
- error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
- error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
- error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
- error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
- error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+ }
+
+ error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+ if (HAS_DDI(dev_priv->dev))
+ error->num_transcoders++; /* Account for eDP. */
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ enum transcoder cpu_transcoder = transcoders[i];
+
+ error->transcoder[i].cpu_transcoder = cpu_transcoder;
+
+ error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+ error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+ error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+ error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+ error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+ error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+ error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
/* In the code above we read the registers without checking if the power
@@ -10666,22 +10690,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
{
int i;
+ if (!error)
+ return;
+
err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
if (HAS_POWER_WELL(dev))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(i) {
err_printf(m, "Pipe [%d]:\n", i);
- err_printf(m, " CPU transcoder: %c\n",
- transcoder_name(error->pipe[i].cpu_transcoder));
- err_printf(m, " CONF: %08x\n", error->pipe[i].conf);
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
- err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
- err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
- err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
- err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
- err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
- err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
@@ -10702,4 +10720,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " POS: %08x\n", error->cursor[i].position);
err_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ err_printf(m, " CPU transcoder: %c\n",
+ transcoder_name(error->transcoder[i].cpu_transcoder));
+ err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
+ err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
+ err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
+ err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
+ err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
+ err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
+ err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
+ }
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 01b5a519c43c..a43c33bc4a35 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -494,8 +494,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
goto out;
}
- /* scale to hardware */
- level = level * freq / max;
+ /* scale to hardware, but be careful to not overflow */
+ if (freq < max)
+ level = level * freq / max;
+ else
+ level = freq / max * level;
dev_priv->backlight.level = level;
if (dev_priv->backlight.device)
@@ -512,6 +515,17 @@ void intel_panel_disable_backlight(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
+ /*
+ * Do not disable backlight on the vgaswitcheroo path. When switching
+ * away from i915, the other client may depend on i915 to handle the
+ * backlight. This will leave the backlight on unnecessarily when
+ * another client is not activated.
+ */
+ if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
+ return;
+ }
+
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
dev_priv->backlight.enabled = false;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0150ba598bf0..46056820d1d2 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5297,8 +5297,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
}
} else {
if (enable_requested) {
+ unsigned long irqflags;
+ enum pipe p;
+
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+ POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
+
+ /*
+ * After this, the registers on the pipes that are part
+ * of the power well will become zero, so we have to
+ * adjust our counters according to that.
+ *
+ * FIXME: Should we do this in general in
+ * drm_vblank_post_modeset?
+ */
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for_each_pipe(p)
+ if (p != PIPE_A)
+ dev->last_vblank[p] = 0;
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 7de29d40d1ad..f05cceac5a52 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -962,6 +962,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio);
+
+ /* Flush the TLB for this page */
+ if (INTEL_INFO(dev)->gen >= 6) {
+ u32 reg = RING_INSTPM(ring->mmio_base);
+ I915_WRITE(reg,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+ 1000))
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ ring->name);
+ }
}
static int
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 251784aa2225..503a414cbdad 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -29,6 +29,7 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
int i;
if (!crtc->enabled)
@@ -36,6 +37,28 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+ if (fb && fb->bits_per_pixel == 16) {
+ int inc = (fb->depth == 15) ? 8 : 4;
+ u8 r, b;
+ for (i = 0; i < MGAG200_LUT_SIZE; i += inc) {
+ if (fb->depth == 16) {
+ if (i > (MGAG200_LUT_SIZE >> 1)) {
+ r = b = 0;
+ } else {
+ r = mga_crtc->lut_r[i << 1];
+ b = mga_crtc->lut_b[i << 1];
+ }
+ } else {
+ r = mga_crtc->lut_r[i];
+ b = mga_crtc->lut_b[i];
+ }
+ /* VGA registers */
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, r);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, b);
+ }
+ return;
+ }
for (i = 0; i < MGAG200_LUT_SIZE; i++) {
/* VGA registers */
WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
@@ -877,7 +900,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
if (crtc->fb->bits_per_pixel == 24)
- pitch = pitch >> (4 - bppshift);
+ pitch = (pitch * 3) >> (4 - bppshift);
else
pitch = pitch >> (4 - bppshift);
@@ -1251,6 +1274,24 @@ static void mga_crtc_destroy(struct drm_crtc *crtc)
kfree(mga_crtc);
}
+static void mga_crtc_disable(struct drm_crtc *crtc)
+{
+ int ret;
+ DRM_DEBUG_KMS("\n");
+ mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->fb) {
+ struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->fb);
+ struct drm_gem_object *obj = mga_fb->obj;
+ struct mgag200_bo *bo = gem_to_mga_bo(obj);
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return;
+ mgag200_bo_push_sysram(bo);
+ mgag200_bo_unreserve(bo);
+ }
+ crtc->fb = NULL;
+}
+
/* These provide the minimum set of functions required to handle a CRTC */
static const struct drm_crtc_funcs mga_crtc_funcs = {
.cursor_set = mga_crtc_cursor_set,
@@ -1261,6 +1302,7 @@ static const struct drm_crtc_funcs mga_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs mga_helper_funcs = {
+ .disable = mga_crtc_disable,
.dpms = mga_crtc_dpms,
.mode_fixup = mga_crtc_mode_fixup,
.mode_set = mga_crtc_mode_set,
@@ -1581,6 +1623,8 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+
mga_connector->i2c = mgag200_i2c_create(dev);
if (!mga_connector->i2c)
DRM_ERROR("failed to add ddc bus\n");
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index fd4539d9ad2c..07b192fe15c6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -324,6 +324,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
}
mgabo->bo.bdev = &mdev->ttm.bdev;
+ mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
@@ -354,6 +355,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
bo->pin_count++;
if (gpu_addr)
*gpu_addr = mgag200_bo_gpu_offset(bo);
+ return 0;
}
mgag200_ttm_placement(bo, pl_flag);
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index d8291724dbd4..7a4e0891c5f8 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
u32 splitoff;
u32 s, e;
+ BUG_ON(!type);
+
list_for_each_entry(this, &mm->free, fl_entry) {
e = this->offset + this->length;
s = this->offset;
@@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
struct nouveau_mm_node *prev, *this, *next;
u32 mask = align - 1;
+ BUG_ON(!type);
+
list_for_each_entry_reverse(this, &mm->free, fl_entry) {
u32 e = this->offset + this->length;
u32 s = this->offset;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
index 373dbcc523b2..a19e7d79b847 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -36,6 +36,8 @@ nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
if (data && data[0]) {
for (i = 0; i < size; i++)
nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+ for (; i < 0x60; i++)
+ nv_wr32(priv, 0x61c440 + soff, (i << 8));
nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
} else
if (data) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
index dc57e24fc1df..717639386ced 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -41,6 +41,8 @@ nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
if (data && data[0]) {
for (i = 0; i < size; i++)
nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+ for (; i < 0x60; i++)
+ nv_wr32(priv, 0x10ec00 + soff, (i << 8));
nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
} else
if (data) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index ab1e918469a8..526b75242899 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -47,14 +47,8 @@ int
nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
{
struct nv50_disp_priv *priv = (void *)object->engine;
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
- const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
- const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
- struct dcb_output outp;
- u8 ver, hdr;
u32 data;
int ret = -EINVAL;
@@ -62,8 +56,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
return -EINVAL;
data = *(u32 *)args;
- if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
- return -ENODEV;
switch (mthd & ~0x3f) {
case NV50_DISP_SOR_PWR:
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 49ecbb859b25..c19004301309 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -265,8 +265,8 @@ nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int
nv31_mpeg_init(struct nouveau_object *object)
{
- struct nouveau_engine *engine = nv_engine(object->engine);
- struct nv31_mpeg_priv *priv = (void *)engine;
+ struct nouveau_engine *engine = nv_engine(object);
+ struct nv31_mpeg_priv *priv = (void *)object;
struct nouveau_fb *pfb = nouveau_fb(object);
int ret, i;
@@ -284,7 +284,10 @@ nv31_mpeg_init(struct nouveau_object *object)
/* PMPEG init */
nv_wr32(priv, 0x00b32c, 0x00000000);
nv_wr32(priv, 0x00b314, 0x00000100);
- nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031);
+ if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv))
+ nv_wr32(priv, 0x00b220, 0x00000044);
+ else
+ nv_wr32(priv, 0x00b220, 0x00000031);
nv_wr32(priv, 0x00b300, 0x02001ec1);
nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index f7c581ad1991..dd6196072e9c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -61,6 +61,7 @@ nv40_mpeg_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/xtensa.c b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
index 0639bc59d0a5..5f6ede7c4892 100644
--- a/drivers/gpu/drm/nouveau/core/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
@@ -118,7 +118,13 @@ _nouveau_xtensa_init(struct nouveau_object *object)
return ret;
}
- ret = nouveau_gpuobj_new(object, NULL, fw->size, 0x1000, 0,
+ if (fw->size > 0x40000) {
+ nv_warn(xtensa, "firmware %s too large\n", name);
+ release_firmware(fw);
+ return -EINVAL;
+ }
+
+ ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
&xtensa->gpu_fw);
if (ret) {
release_firmware(fw);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index d5502267c30f..9d2cd2006250 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -20,8 +20,8 @@ nouveau_mc(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
}
-#define nouveau_mc_create(p,e,o,d) \
- nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_mc_create(p,e,o,m,d) \
+ nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
#define nouveau_mc_destroy(p) ({ \
struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
})
@@ -33,7 +33,8 @@ nouveau_mc(void *obj)
})
int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
+ struct nouveau_oclass *, const struct nouveau_mc_intr *,
+ int, void **);
void _nouveau_mc_dtor(struct nouveau_object *);
int _nouveau_mc_init(struct nouveau_object *);
int _nouveau_mc_fini(struct nouveau_object *, bool);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index f2e87b105666..fcf57fa309bf 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -55,7 +55,7 @@ struct nouveau_vma {
struct nouveau_vm {
struct nouveau_vmmgr *vmm;
struct nouveau_mm mm;
- int refcount;
+ struct kref refcount;
struct list_head pgd_list;
atomic_t engref[NVDEV_SUBDEV_NR];
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index 6c974dd83e8b..db9d6ddde52c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -81,7 +81,7 @@ void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **);
+void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
extern int nv50_fb_memtype[0x80];
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
index 19e3a9a63a02..ab7ef0ac9e34 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
@@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
switch (pfb914 & 0x00000003) {
- case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break;
- case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break;
- case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break;
+ case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break;
case 0x00000003: break;
}
- pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
- pfb->ram->tags = nv_rd32(pfb, 0x100320);
+ ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ ram->tags = nv_rd32(pfb, 0x100320);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
index 7192aa6e5577..63a6aab86028 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
@@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- pfb->ram->type = NV_MEM_TYPE_STOLEN;
+ ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ ram->type = NV_MEM_TYPE_STOLEN;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index af5aa7ee8ad9..903baff77fdd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -27,17 +27,10 @@
#include "priv.h"
void
-nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+__nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem)
{
struct nouveau_mm_node *this;
- struct nouveau_mem *mem;
- mem = *pmem;
- *pmem = NULL;
- if (unlikely(mem == NULL))
- return;
-
- mutex_lock(&pfb->base.mutex);
while (!list_empty(&mem->regions)) {
this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
@@ -46,6 +39,19 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
}
nouveau_mm_free(&pfb->tags, &mem->tag);
+}
+
+void
+nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+{
+ struct nouveau_mem *mem = *pmem;
+
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
+ return;
+
+ mutex_lock(&pfb->base.mutex);
+ __nv50_ram_put(pfb, mem);
mutex_unlock(&pfb->base.mutex);
kfree(mem);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 9c3634acbb9d..cf97c4de4a6b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -33,11 +33,19 @@ void
nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
{
struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb);
+ struct nouveau_mem *mem = *pmem;
- if ((*pmem)->tag)
- ltcg->tags_free(ltcg, &(*pmem)->tag);
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
+ return;
- nv50_ram_put(pfb, pmem);
+ mutex_lock(&pfb->base.mutex);
+ if (mem->tag)
+ ltcg->tags_free(ltcg, &mem->tag);
+ __nv50_ram_put(pfb, mem);
+ mutex_unlock(&pfb->base.mutex);
+
+ kfree(mem);
}
int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index bf489dcf46e2..c4c1d415e7fe 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -103,7 +103,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
int i;
intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
- if (nv_device(priv)->chipset >= 0x90)
+ if (nv_device(priv)->chipset > 0x92)
intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070);
hi = (intr0 & 0x0000ffff) | (intr1 << 16);
@@ -115,7 +115,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
}
nv_wr32(priv, 0xe054, intr0);
- if (nv_device(priv)->chipset >= 0x90)
+ if (nv_device(priv)->chipset > 0x92)
nv_wr32(priv, 0xe074, intr1);
}
@@ -146,7 +146,7 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_gpio_create(parent, engine, oclass,
- nv_device(parent)->chipset >= 0x90 ? 32 : 16,
+ nv_device(parent)->chipset > 0x92 ? 32 : 16,
&priv);
*pobject = nv_object(priv);
if (ret)
@@ -182,7 +182,7 @@ nv50_gpio_init(struct nouveau_object *object)
/* disable, and ack any pending gpio interrupts */
nv_wr32(priv, 0xe050, 0x00000000);
nv_wr32(priv, 0xe054, 0xffffffff);
- if (nv_device(priv)->chipset >= 0x90) {
+ if (nv_device(priv)->chipset > 0x92) {
nv_wr32(priv, 0xe070, 0x00000000);
nv_wr32(priv, 0xe074, 0xffffffff);
}
@@ -195,7 +195,7 @@ nv50_gpio_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_gpio_priv *priv = (void *)object;
nv_wr32(priv, 0xe050, 0x00000000);
- if (nv_device(priv)->chipset >= 0x90)
+ if (nv_device(priv)->chipset > 0x92)
nv_wr32(priv, 0xe070, 0x00000000);
return nouveau_gpio_fini(&priv->base, suspend);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
index bcca883018f4..cce65cc56514 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -30,8 +30,9 @@ struct nvc0_ltcg_priv {
struct nouveau_ltcg base;
u32 part_nr;
u32 subp_nr;
- struct nouveau_mm tags;
u32 num_tags;
+ u32 tag_base;
+ struct nouveau_mm tags;
struct nouveau_mm_node *tag_ram;
};
@@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
u32 tag_size, tag_margin, tag_align;
int ret;
- nv_wr32(priv, 0x17e8d8, priv->part_nr);
- if (nv_device(pfb)->card_type >= NV_E0)
- nv_wr32(priv, 0x17e000, priv->part_nr);
-
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
priv->num_tags = (pfb->ram->size >> 17) / 4;
if (priv->num_tags > (1 << 17))
@@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
tag_size += tag_align;
tag_size = (tag_size + 0xfff) >> 12; /* round up */
- ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1,
+ ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1,
&priv->tag_ram);
if (ret) {
priv->num_tags = 0;
@@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
tag_base += tag_align - 1;
ret = do_div(tag_base, tag_align);
- nv_wr32(priv, 0x17e8d4, tag_base);
+ priv->tag_base = tag_base;
}
ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
@@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
- nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
-
ret = nvc0_ltcg_init_tag_ram(pfb, priv);
if (ret)
return ret;
@@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object)
nouveau_ltcg_destroy(ltcg);
}
+static int
+nvc0_ltcg_init(struct nouveau_object *object)
+{
+ struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
+ struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+ int ret;
+
+ ret = nouveau_ltcg_init(ltcg);
+ if (ret)
+ return ret;
+
+ nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
+ nv_wr32(priv, 0x17e8d8, priv->part_nr);
+ if (nv_device(ltcg)->card_type >= NV_E0)
+ nv_wr32(priv, 0x17e000, priv->part_nr);
+ nv_wr32(priv, 0x17e8d4, priv->tag_base);
+ return 0;
+}
+
struct nouveau_oclass
nvc0_ltcg_oclass = {
.handle = NV_SUBDEV(LTCG, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_ltcg_ctor,
.dtor = nvc0_ltcg_dtor,
- .init = _nouveau_ltcg_init,
+ .init = nvc0_ltcg_init,
.fini = _nouveau_ltcg_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 2e7c5fd3de3d..20f9a538746e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -86,7 +86,9 @@ _nouveau_mc_dtor(struct nouveau_object *object)
int
nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
+ struct nouveau_oclass *oclass,
+ const struct nouveau_mc_intr *intr_map,
+ int length, void **pobject)
{
struct nouveau_device *device = nv_device(parent);
struct nouveau_mc *pmc;
@@ -98,6 +100,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ pmc->intr_map = intr_map;
+
ret = request_irq(device->pdev->irq, nouveau_mc_intr,
IRQF_SHARED, "nouveau", pmc);
if (ret < 0)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 8c769715227b..64aa4edb0d9d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv04_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 51919371810f..d9891782bf28 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv44_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv04_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 0cb322a5e72c..2b1afe225db8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -41,7 +41,7 @@ nv50_mc_intr[] = {
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
- { 0x0000d101, NVDEV_SUBDEV_FB },
+ { 0x0002d101, NVDEV_SUBDEV_FB },
{},
};
@@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv50_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index e82fd21b5041..0d57b4d3e001 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv98_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c5da3babbc62..104175c5a2dd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nvc0_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 67fcb6c852ac..ef3133e7575c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -361,7 +361,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
INIT_LIST_HEAD(&vm->pgd_list);
vm->vmm = vmm;
- vm->refcount = 1;
+ kref_init(&vm->refcount);
vm->fpde = offset >> (vmm->pgt_bits + 12);
vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
@@ -441,8 +441,9 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
}
static void
-nouveau_vm_del(struct nouveau_vm *vm)
+nouveau_vm_del(struct kref *kref)
{
+ struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount);
struct nouveau_vm_pgd *vpgd, *tmp;
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
@@ -458,27 +459,19 @@ int
nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
struct nouveau_gpuobj *pgd)
{
- struct nouveau_vm *vm;
- int ret;
-
- vm = ref;
- if (vm) {
- ret = nouveau_vm_link(vm, pgd);
+ if (ref) {
+ int ret = nouveau_vm_link(ref, pgd);
if (ret)
return ret;
- vm->refcount++;
+ kref_get(&ref->refcount);
}
- vm = *ptr;
- *ptr = ref;
-
- if (vm) {
- nouveau_vm_unlink(vm, pgd);
-
- if (--vm->refcount == 0)
- nouveau_vm_del(vm);
+ if (*ptr) {
+ nouveau_vm_unlink(*ptr, pgd);
+ kref_put(&(*ptr)->refcount, nouveau_vm_del);
}
+ *ptr = ref;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6413552df21c..d4fbf11360fe 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -607,6 +607,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
regp->ramdac_a34 = 0x1;
}
+static int
+nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+{
+ struct nv04_display *disp = nv04_display(crtc->dev);
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int ret;
+
+ ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret == 0) {
+ if (disp->image[nv_crtc->index])
+ nouveau_bo_unpin(disp->image[nv_crtc->index]);
+ nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
+ }
+
+ return ret;
+}
+
/**
* Sets up registers for the given mode/adjusted_mode pair.
*
@@ -623,10 +641,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_device *dev = crtc->dev;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_drm *drm = nouveau_drm(dev);
+ int ret;
NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
drm_mode_debug_printmodeline(adjusted_mode);
+ ret = nv_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
+
/* unlock must come after turning off FP_TG_CONTROL in output_prepare */
nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
@@ -723,6 +746,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
static void nv_crtc_destroy(struct drm_crtc *crtc)
{
+ struct nv04_display *disp = nv04_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
if (!nv_crtc)
@@ -730,6 +754,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
+ if (disp->image[nv_crtc->index])
+ nouveau_bo_unpin(disp->image[nv_crtc->index]);
+ nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
@@ -755,6 +783,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
}
static void
+nv_crtc_disable(struct drm_crtc *crtc)
+{
+ struct nv04_display *disp = nv04_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ if (disp->image[nv_crtc->index])
+ nouveau_bo_unpin(disp->image[nv_crtc->index]);
+ nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+}
+
+static void
nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
uint32_t size)
{
@@ -792,7 +830,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *drm_fb;
struct nouveau_framebuffer *fb;
int arb_burst, arb_lwm;
- int ret;
NV_DEBUG(drm, "index %d\n", nv_crtc->index);
@@ -802,10 +839,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
return 0;
}
-
/* If atomic, we want to switch to the fb we were passed, so
- * now we update pointers to do that. (We don't pin; just
- * assume we're already pinned and update the base address.)
+ * now we update pointers to do that.
*/
if (atomic) {
drm_fb = passed_fb;
@@ -813,17 +848,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
} else {
drm_fb = crtc->fb;
fb = nouveau_framebuffer(crtc->fb);
- /* If not atomic, we can go ahead and pin, and unpin the
- * old fb we were passed.
- */
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (passed_fb) {
- struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
- nouveau_bo_unpin(ofb->nvbo);
- }
}
nv_crtc->fb.offset = fb->nvbo->bo.offset;
@@ -878,6 +902,9 @@ static int
nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
+ int ret = nv_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
@@ -1074,6 +1101,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.mode_set_base = nv04_crtc_mode_set_base,
.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
.load_lut = nv_crtc_gamma_load,
+ .disable = nv_crtc_disable,
};
int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index a0a031dad13f..9928187f0a7d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -81,6 +81,7 @@ struct nv04_display {
uint32_t saved_vga_font[4][16384];
uint32_t dac_users[4];
struct nouveau_object *core;
+ struct nouveau_bo *image[2];
};
static inline struct nv04_display *
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e4444bacd0b2..755c38d06271 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -198,7 +198,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
size_t acc_size;
int ret;
int type = ttm_bo_type_device;
- int max_size = INT_MAX & ~((1 << drm->client.base.vm->vmm->lpg_shift) - 1);
+ int lpg_shift = 12;
+ int max_size;
+
+ if (drm->client.base.vm)
+ lpg_shift = drm->client.base.vm->vmm->lpg_shift;
+ max_size = INT_MAX & ~((1 << lpg_shift) - 1);
if (size <= 0 || size > max_size) {
nv_warn(drm, "skipped size %x\n", (u32)size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 44202bf7b819..77ffded68837 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -580,6 +580,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
ret = nv50_display_flip_next(crtc, fb, chan, 0);
if (ret)
goto fail_unreserve;
+ } else {
+ struct nv04_display *dispnv04 = nv04_display(dev);
+ nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
}
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 4c1bc061fae2..8f6d63d7edd3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -398,7 +398,8 @@ void
nouveau_fbcon_output_poll_changed(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- drm_fb_helper_hotplug_event(&drm->fbcon->helper);
+ if (drm->fbcon)
+ drm_fb_helper_hotplug_event(&drm->fbcon->helper);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 8e47a9bae8c3..22aa9963ea6f 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
u32 start = mem->start * PAGE_SIZE;
- u32 limit = mem->start + mem->size - 1;
+ u32 limit = start + mem->size - 1;
int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index 3af5bcd0b203..625f80d53dc2 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
if (clk < pll->vco1.max_freq)
pll->vco2.max_freq = 0;
- pclk->pll_calc(pclk, pll, clk, &coef);
+ ret = pclk->pll_calc(pclk, pll, clk, &coef);
if (ret == 0)
return -ERANGE;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index f9701e567db8..0ee363840035 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,6 +39,8 @@ nv50_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
+ u32 start = mem->start * PAGE_SIZE;
+ u32 limit = start + mem->size - 1;
int ret, i;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -51,26 +53,28 @@ nv50_fence_context_new(struct nouveau_channel *chan)
fctx->base.sync = nv17_fence_sync;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
+ NvSema, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
- .start = mem->start * PAGE_SIZE,
- .limit = mem->size - 1,
+ .start = start,
+ .limit = limit,
}, sizeof(struct nv_dma_class),
&object);
/* dma objects for display sync channel semaphore blocks */
for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ u32 start = bo->bo.mem.start * PAGE_SIZE;
+ u32 limit = start + bo->bo.mem.size - 1;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
NvEvoSema0 + i, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
- .start = bo->bo.offset,
- .limit = bo->bo.offset + 0xfff,
+ .start = start,
+ .limit = limit,
}, sizeof(struct nv_dma_class),
&object);
}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index c3df52c1a60c..306364a1ecda 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -72,14 +72,32 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
- r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
- evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
+ radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
+ evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
- trinity_smc.o ni_dpm.o si_smc.o si_dpm.o
+ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
+ ci_dpm.o dce6_afmt.o
+
+# add async DMA block
+radeon-y += \
+ r600_dma.o \
+ rv770_dma.o \
+ evergreen_dma.o \
+ ni_dma.o \
+ si_dma.o \
+ cik_sdma.o \
+
+# add UVD block
+radeon-y += \
+ radeon_uvd.o \
+ uvd_v1_0.o \
+ uvd_v2_2.o \
+ uvd_v3_1.o \
+ uvd_v4_2.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index fb441a790f3d..15da7ef344a4 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1222,12 +1222,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
int r;
mutex_lock(&ctx->mutex);
+ /* reset data block */
+ ctx->data_block = 0;
/* reset reg block */
ctx->reg_block = 0;
/* reset fb window */
ctx->fb_base = 0;
/* reset io mode */
ctx->io_mode = ATOM_IO_MM;
+ /* reset divmul */
+ ctx->divmul[0] = 0;
+ ctx->divmul[1] = 0;
r = atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 16b120c3f144..af10f8571d87 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -7661,618 +7661,6 @@ typedef struct _ATOM_POWERPLAY_INFO_V3
ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
}ATOM_POWERPLAY_INFO_V3;
-/* New PPlib */
-/**************************************************************************/
-typedef struct _ATOM_PPLIB_THERMALCONTROLLER
-
-{
- UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
- UCHAR ucI2cLine; // as interpreted by DAL I2C
- UCHAR ucI2cAddress;
- UCHAR ucFanParameters; // Fan Control Parameters.
- UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
- UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
- UCHAR ucReserved; // ----
- UCHAR ucFlags; // to be defined
-} ATOM_PPLIB_THERMALCONTROLLER;
-
-#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
-#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
-
-#define ATOM_PP_THERMALCONTROLLER_NONE 0
-#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_LM64 5
-#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
-#define ATOM_PP_THERMALCONTROLLER_RV770 8
-#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
-#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
-#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
-#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
-#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
-#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
-#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
-#define ATOM_PP_THERMALCONTROLLER_LM96163 17
-#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18
-
-// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
-// We probably should reserve the bit 0x80 for this use.
-// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
-// The driver can pick the correct internal controller based on the ASIC.
-
-#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
-#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
-
-typedef struct _ATOM_PPLIB_STATE
-{
- UCHAR ucNonClockStateIndex;
- UCHAR ucClockStateIndices[1]; // variable-sized
-} ATOM_PPLIB_STATE;
-
-
-typedef struct _ATOM_PPLIB_FANTABLE
-{
- UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
- UCHAR ucTHyst; // Temperature hysteresis. Integer.
- USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
- USHORT usTMed; // The middle temperature where we change slopes.
- USHORT usTHigh; // The high point above TMed for adjusting the second slope.
- USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
- USHORT usPWMMed; // The PWM value (in percent) at TMed.
- USHORT usPWMHigh; // The PWM value at THigh.
-} ATOM_PPLIB_FANTABLE;
-
-typedef struct _ATOM_PPLIB_FANTABLE2
-{
- ATOM_PPLIB_FANTABLE basicTable;
- USHORT usTMax; // The max temperature
-} ATOM_PPLIB_FANTABLE2;
-
-typedef struct _ATOM_PPLIB_EXTENDEDHEADER
-{
- USHORT usSize;
- ULONG ulMaxEngineClock; // For Overdrive.
- ULONG ulMaxMemoryClock; // For Overdrive.
- // Add extra system parameters here, always adjust size to include all fields.
- USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
- USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
- USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table
- USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table
-} ATOM_PPLIB_EXTENDEDHEADER;
-
-//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
-#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
-#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
-#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
-#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
-#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
-#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
-#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
-#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
-#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
-#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
-#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
-#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
-#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
-#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
-#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
-#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
-#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
-#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
-#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table.
-#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity.
-#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17.
-#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE
-{
- ATOM_COMMON_TABLE_HEADER sHeader;
-
- UCHAR ucDataRevision;
-
- UCHAR ucNumStates;
- UCHAR ucStateEntrySize;
- UCHAR ucClockInfoSize;
- UCHAR ucNonClockSize;
-
- // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
- USHORT usStateArrayOffset;
-
- // offset from start of this table to array of ASIC-specific structures,
- // currently ATOM_PPLIB_CLOCK_INFO.
- USHORT usClockInfoArrayOffset;
-
- // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
- USHORT usNonClockInfoArrayOffset;
-
- USHORT usBackbiasTime; // in microseconds
- USHORT usVoltageTime; // in microseconds
- USHORT usTableSize; //the size of this structure, or the extended structure
-
- ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
-
- ATOM_PPLIB_THERMALCONTROLLER sThermalController;
-
- USHORT usBootClockInfoOffset;
- USHORT usBootNonClockInfoOffset;
-
-} ATOM_PPLIB_POWERPLAYTABLE;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
-{
- ATOM_PPLIB_POWERPLAYTABLE basicTable;
- UCHAR ucNumCustomThermalPolicy;
- USHORT usCustomThermalPolicyArrayOffset;
-}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
-{
- ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
- USHORT usFormatID; // To be used ONLY by PPGen.
- USHORT usFanTableOffset;
- USHORT usExtendendedHeaderOffset;
-} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
-{
- ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
- ULONG ulGoldenPPID; // PPGen use only
- ULONG ulGoldenRevision; // PPGen use only
- USHORT usVddcDependencyOnSCLKOffset;
- USHORT usVddciDependencyOnMCLKOffset;
- USHORT usVddcDependencyOnMCLKOffset;
- USHORT usMaxClockVoltageOnDCOffset;
- USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
- USHORT usMvddDependencyOnMCLKOffset;
-} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
-{
- ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
- ULONG ulTDPLimit;
- ULONG ulNearTDPLimit;
- ULONG ulSQRampingThreshold;
- USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
- ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
- USHORT usTDPODLimit;
- USHORT usLoadLineSlope; // in milliOhms * 100
-} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
-
-//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
-#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
-#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
-#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
-#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
-#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
-#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
-// 2, 4, 6, 7 are reserved
-
-#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
-#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
-#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
-#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
-#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
-#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
-#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
-#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
-#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
-#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
-#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
-#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
-#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
-
-//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
-#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
-#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
-#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
-
-//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
-#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
-#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
-
-// 0 is 2.5Gb/s, 1 is 5Gb/s
-#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
-#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
-
-// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
-#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
-#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
-
-// lookup into reduced refresh-rate table
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
-
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
-// 2-15 TBD as needed.
-
-#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
-#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
-
-#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
-
-#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
-
-//memory related flags
-#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
-
-//M3 Arb //2bits, current 3 sets of parameters in total
-#define ATOM_PPLIB_M3ARB_MASK 0x00060000
-#define ATOM_PPLIB_M3ARB_SHIFT 17
-
-#define ATOM_PPLIB_ENABLE_DRR 0x00080000
-
-// remaining 16 bits are reserved
-typedef struct _ATOM_PPLIB_THERMAL_STATE
-{
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- UCHAR ucThermalAction;
-}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
-
-// Contained in an array starting at the offset
-// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
-// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
-#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
-#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
-typedef struct _ATOM_PPLIB_NONCLOCK_INFO
-{
- USHORT usClassification;
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- ULONG ulCapsAndSettings;
- UCHAR ucRequiredPower;
- USHORT usClassification2;
- ULONG ulVCLK;
- ULONG ulDCLK;
- UCHAR ucUnused[5];
-} ATOM_PPLIB_NONCLOCK_INFO;
-
-// Contained in an array starting at the offset
-// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
-// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
-typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
-{
- USHORT usEngineClockLow;
- UCHAR ucEngineClockHigh;
-
- USHORT usMemoryClockLow;
- UCHAR ucMemoryClockHigh;
-
- USHORT usVDDC;
- USHORT usUnused1;
- USHORT usUnused2;
-
- ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
-
-} ATOM_PPLIB_R600_CLOCK_INFO;
-
-// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
-#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
-#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
-#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
-#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
-#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
-#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
-
-typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
-{
- USHORT usEngineClockLow;
- UCHAR ucEngineClockHigh;
-
- USHORT usMemoryClockLow;
- UCHAR ucMemoryClockHigh;
-
- USHORT usVDDC;
- USHORT usVDDCI;
- USHORT usUnused;
-
- ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
-
-} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
-{
- USHORT usEngineClockLow;
- UCHAR ucEngineClockHigh;
-
- USHORT usMemoryClockLow;
- UCHAR ucMemoryClockHigh;
-
- USHORT usVDDC;
- USHORT usVDDCI;
- UCHAR ucPCIEGen;
- UCHAR ucUnused1;
-
- ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
-
-} ATOM_PPLIB_SI_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
-{
- USHORT usEngineClockLow;
- UCHAR ucEngineClockHigh;
-
- USHORT usMemoryClockLow;
- UCHAR ucMemoryClockHigh;
-
- UCHAR ucPCIEGen;
- USHORT usPCIELane;
-} ATOM_PPLIB_CI_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
-
-{
- USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
- UCHAR ucLowEngineClockHigh;
- USHORT usHighEngineClockLow; // High Engine clock in MHz.
- UCHAR ucHighEngineClockHigh;
- USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
- UCHAR ucMemoryClockHigh; // Currentyl unused.
- UCHAR ucPadding; // For proper alignment and size.
- USHORT usVDDC; // For the 780, use: None, Low, High, Variable
- UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
- UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
- USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
- ULONG ulFlags;
-} ATOM_PPLIB_RS780_CLOCK_INFO;
-
-#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
-#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
-#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
-#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
-
-#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
-#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
-#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
-
-#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
-#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
-#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
-
-typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
- USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
- UCHAR ucEngineClockHigh; //clockfrequency >> 16.
- UCHAR vddcIndex; //2-bit vddc index;
- USHORT tdpLimit;
- //please initalize to 0
- USHORT rsv1;
- //please initialize to 0s
- ULONG rsv2[2];
-}ATOM_PPLIB_SUMO_CLOCK_INFO;
-
-
-
-typedef struct _ATOM_PPLIB_STATE_V2
-{
- //number of valid dpm levels in this state; Driver uses it to calculate the whole
- //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
- UCHAR ucNumDPMLevels;
-
- //a index to the array of nonClockInfos
- UCHAR nonClockInfoIndex;
- /**
- * Driver will read the first ucNumDPMLevels in this array
- */
- UCHAR clockInfoIndex[1];
-} ATOM_PPLIB_STATE_V2;
-
-typedef struct _StateArray{
- //how many states we have
- UCHAR ucNumEntries;
-
- ATOM_PPLIB_STATE_V2 states[1];
-}StateArray;
-
-
-typedef struct _ClockInfoArray{
- //how many clock levels we have
- UCHAR ucNumEntries;
-
- //sizeof(ATOM_PPLIB_CLOCK_INFO)
- UCHAR ucEntrySize;
-
- UCHAR clockInfo[1];
-}ClockInfoArray;
-
-typedef struct _NonClockInfoArray{
-
- //how many non-clock levels we have. normally should be same as number of states
- UCHAR ucNumEntries;
- //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
- UCHAR ucEntrySize;
-
- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
-}NonClockInfoArray;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
-{
- USHORT usClockLow;
- UCHAR ucClockHigh;
- USHORT usVoltage;
-}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
-{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
-}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
-{
- USHORT usSclkLow;
- UCHAR ucSclkHigh;
- USHORT usMclkLow;
- UCHAR ucMclkHigh;
- USHORT usVddc;
- USHORT usVddci;
-}ATOM_PPLIB_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
-{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
-}ATOM_PPLIB_Clock_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_CAC_Leakage_Record
-{
- USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value.
- ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value.
-}ATOM_PPLIB_CAC_Leakage_Record;
-
-typedef struct _ATOM_PPLIB_CAC_Leakage_Table
-{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
-}ATOM_PPLIB_CAC_Leakage_Table;
-
-typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
-{
- USHORT usVoltage;
- USHORT usSclkLow;
- UCHAR ucSclkHigh;
- USHORT usMclkLow;
- UCHAR ucMclkHigh;
-}ATOM_PPLIB_PhaseSheddingLimits_Record;
-
-typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
-{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
-}ATOM_PPLIB_PhaseSheddingLimits_Table;
-
-typedef struct _VCEClockInfo{
- USHORT usEVClkLow;
- UCHAR ucEVClkHigh;
- USHORT usECClkLow;
- UCHAR ucECClkHigh;
-}VCEClockInfo;
-
-typedef struct _VCEClockInfoArray{
- UCHAR ucNumEntries;
- VCEClockInfo entries[1];
-}VCEClockInfoArray;
-
-typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
-{
- USHORT usVoltage;
- UCHAR ucVCEClockInfoIndex;
-}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
-{
- UCHAR numEntries;
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_VCE_State_Record
-{
- UCHAR ucVCEClockInfoIndex;
- UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
-}ATOM_PPLIB_VCE_State_Record;
-
-typedef struct _ATOM_PPLIB_VCE_State_Table
-{
- UCHAR numEntries;
- ATOM_PPLIB_VCE_State_Record entries[1];
-}ATOM_PPLIB_VCE_State_Table;
-
-
-typedef struct _ATOM_PPLIB_VCE_Table
-{
- UCHAR revid;
-// VCEClockInfoArray array;
-// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
-// ATOM_PPLIB_VCE_State_Table states;
-}ATOM_PPLIB_VCE_Table;
-
-
-typedef struct _UVDClockInfo{
- USHORT usVClkLow;
- UCHAR ucVClkHigh;
- USHORT usDClkLow;
- UCHAR ucDClkHigh;
-}UVDClockInfo;
-
-typedef struct _UVDClockInfoArray{
- UCHAR ucNumEntries;
- UVDClockInfo entries[1];
-}UVDClockInfoArray;
-
-typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
-{
- USHORT usVoltage;
- UCHAR ucUVDClockInfoIndex;
-}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
-{
- UCHAR numEntries;
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_UVD_State_Record
-{
- UCHAR ucUVDClockInfoIndex;
- UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
-}ATOM_PPLIB_UVD_State_Record;
-
-typedef struct _ATOM_PPLIB_UVD_State_Table
-{
- UCHAR numEntries;
- ATOM_PPLIB_UVD_State_Record entries[1];
-}ATOM_PPLIB_UVD_State_Table;
-
-
-typedef struct _ATOM_PPLIB_UVD_Table
-{
- UCHAR revid;
-// UVDClockInfoArray array;
-// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
-// ATOM_PPLIB_UVD_State_Table states;
-}ATOM_PPLIB_UVD_Table;
-
-
-typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
-{
- USHORT usVoltage;
- USHORT usSAMClockLow;
- UCHAR ucSAMClockHigh;
-}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
- UCHAR numEntries;
- ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_SAMU_Table
-{
- UCHAR revid;
- ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
-}ATOM_PPLIB_SAMU_Table;
-
-#define ATOM_PPM_A_A 1
-#define ATOM_PPM_A_I 2
-typedef struct _ATOM_PPLIB_PPM_Table
-{
- UCHAR ucRevId;
- UCHAR ucPpmDesign; //A+I or A+A
- USHORT usCpuCoreNumber;
- ULONG ulPlatformTDP;
- ULONG ulSmallACPlatformTDP;
- ULONG ulPlatformTDC;
- ULONG ulSmallACPlatformTDC;
- ULONG ulApuTDP;
- ULONG ulDGpuTDP;
- ULONG ulDGpuUlvPower;
- ULONG ulTjmax;
-} ATOM_PPLIB_PPM_Table;
-
-/**************************************************************************/
-
// Following definitions are for compatibility issue in different SW components.
#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
@@ -8485,3 +7873,6 @@ typedef struct {
#endif /* _ATOMBIOS_H */
+
+#include "pptable.h"
+
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b9d3b43f19c0..bf87f6d435f8 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1910,6 +1910,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ /* disable the GRPH */
+ if (ASIC_IS_DCE4(rdev))
+ WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
+ else if (ASIC_IS_AVIVO(rdev))
+ WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
+
if (ASIC_IS_DCE6(rdev))
atombios_powergate_crtc(crtc, ATOM_ENABLE);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 3569d89b9e41..00885417ffff 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -50,7 +50,7 @@ static char *pre_emph_names[] = {
* or from atom. Note that atom operates on
* dw units.
*/
-static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
@@ -100,7 +100,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
- radeon_copy_swap(base, send, send_bytes, true);
+ radeon_atom_copy_swap(base, send, send_bytes, true);
args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
@@ -137,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
recv_bytes = recv_size;
if (recv && recv_size)
- radeon_copy_swap(recv, base + 16, recv_bytes, false);
+ radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
return recv_bytes;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 092275d53d4a..dfac7965ea28 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -682,8 +682,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
@@ -710,8 +708,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio &&
- !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ radeon_audio)
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
@@ -722,8 +719,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_HDMIA:
default:
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio &&
- !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ radeon_audio)
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -737,8 +733,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio &&
- !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ radeon_audio)
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 082338df708a..deaf98cdca3a 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -27,10 +27,12 @@
#include "radeon.h"
#include "atom.h"
+extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
+
#define TARGET_HW_I2C_CLOCK 50
/* these are a limitation of ProcessI2cChannelTransaction not the hw */
-#define ATOM_MAX_HW_I2C_WRITE 2
+#define ATOM_MAX_HW_I2C_WRITE 3
#define ATOM_MAX_HW_I2C_READ 255
static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
@@ -50,20 +52,24 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
if (flags & HW_I2C_WRITE) {
if (num > ATOM_MAX_HW_I2C_WRITE) {
- DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
+ DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
return -EINVAL;
}
- memcpy(&out, buf, num);
+ args.ucRegIndex = buf[0];
+ if (num > 1)
+ memcpy(&out, &buf[1], num - 1);
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
return -EINVAL;
}
+ args.ucRegIndex = 0;
+ args.lpI2CDataOut = 0;
}
+ args.ucFlag = flags;
args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
- args.ucRegIndex = 0;
args.ucTransBytes = num;
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
@@ -77,7 +83,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
}
if (!(flags & HW_I2C_WRITE))
- memcpy(buf, base, num);
+ radeon_atom_copy_swap(buf, base, num, false);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0bfd55e08820..084e69414fd1 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2548,9 +2548,6 @@ int btc_dpm_init(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- u16 data_offset, size;
- u8 frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -2633,16 +2630,7 @@ int btc_dpm_init(struct radeon_device *rdev)
eg_pi->vddci_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -2659,8 +2647,7 @@ int btc_dpm_init(struct radeon_device *rdev)
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
@@ -2712,6 +2699,12 @@ int btc_dpm_init(struct radeon_device *rdev)
else
rdev->pm.dpm.dyn_state.sclk_mclk_delta = 10000;
+ /* make sure dc limits are valid */
+ if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
index 19a0114d2e3b..98d009e154bf 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -317,58 +317,4 @@ const u32 cayman_default_state[] =
0x00000010, /* */
};
-const u32 cayman_vs[] =
-{
- 0x00000004,
- 0x80400400,
- 0x0000a03c,
- 0x95000688,
- 0x00004000,
- 0x15000688,
- 0x00000000,
- 0x88000000,
- 0x04000000,
- 0x67961001,
-#ifdef __BIG_ENDIAN
- 0x00020000,
-#else
- 0x00000000,
-#endif
- 0x00000000,
- 0x04000000,
- 0x67961000,
-#ifdef __BIG_ENDIAN
- 0x00020008,
-#else
- 0x00000008,
-#endif
- 0x00000000,
-};
-
-const u32 cayman_ps[] =
-{
- 0x00000004,
- 0xa00c0000,
- 0x00000008,
- 0x80400000,
- 0x00000000,
- 0x95000688,
- 0x00000000,
- 0x88000000,
- 0x00380400,
- 0x00146b10,
- 0x00380000,
- 0x20146b10,
- 0x00380400,
- 0x40146b00,
- 0x80380000,
- 0x60146b00,
- 0x00000010,
- 0x000d1000,
- 0xb0800000,
- 0x00000000,
-};
-
-const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
-const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
new file mode 100644
index 000000000000..916630fdc796
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -0,0 +1,5239 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "radeon.h"
+#include "cikd.h"
+#include "r600_dpm.h"
+#include "ci_dpm.h"
+#include "atom.h"
+#include <linux/seq_file.h>
+
+#define MC_CG_ARB_FREQ_F0 0x0a
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+#define SMC_RAM_END 0x40000
+
+#define VOLTAGE_SCALE 4
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+
+static const struct ci_pt_defaults defaults_bonaire_xt =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_pro =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
+ { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
+ { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
+};
+
+static const struct ci_pt_defaults defaults_saturn_xt =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
+ { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
+ { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_saturn_pro =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
+ { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
+ { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
+};
+
+static const struct ci_pt_config_reg didt_config_ci[] =
+{
+ { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0xFFFFFFFF }
+};
+
+extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
+extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
+ u32 arb_freq_src, u32 arb_freq_dest);
+extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
+extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
+extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
+ u32 max_voltage_steps,
+ struct atom_voltage_table *voltage_table);
+extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable);
+
+static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
+ struct atom_voltage_table_entry *voltage_table,
+ u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
+static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
+static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
+ u32 target_tdp);
+static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
+
+static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = rdev->pm.dpm.priv;
+
+ return pi;
+}
+
+static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
+{
+ struct ci_ps *ps = rps->ps_priv;
+
+ return ps;
+}
+
+static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ switch (rdev->pdev->device) {
+ case 0x6650:
+ case 0x6658:
+ case 0x665C:
+ default:
+ pi->powertune_defaults = &defaults_bonaire_xt;
+ break;
+ case 0x6651:
+ case 0x665D:
+ pi->powertune_defaults = &defaults_bonaire_pro;
+ break;
+ case 0x6640:
+ pi->powertune_defaults = &defaults_saturn_xt;
+ break;
+ case 0x6641:
+ pi->powertune_defaults = &defaults_saturn_pro;
+ break;
+ }
+
+ pi->dte_tj_offset = 0;
+
+ pi->caps_power_containment = true;
+ pi->caps_cac = false;
+ pi->caps_sq_ramping = false;
+ pi->caps_db_ramping = false;
+ pi->caps_td_ramping = false;
+ pi->caps_tcp_ramping = false;
+
+ if (pi->caps_power_containment) {
+ pi->caps_cac = true;
+ pi->enable_bapm_feature = true;
+ pi->enable_tdc_limit_feature = true;
+ pi->enable_pkg_pwr_tracking_feature = true;
+ }
+}
+
+static u8 ci_convert_to_vid(u16 vddc)
+{
+ return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
+}
+
+static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
+ u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
+ u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
+ u32 i;
+
+ if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
+ return -EINVAL;
+ if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
+ return -EINVAL;
+ if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
+ return -EINVAL;
+
+ for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
+ hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
+ hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
+ } else {
+ lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
+ hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
+ }
+ }
+ return 0;
+}
+
+static int ci_populate_vddc_vid(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u8 *vid = pi->smc_powertune_table.VddCVid;
+ u32 i;
+
+ if (pi->vddc_voltage_table.count > 8)
+ return -EINVAL;
+
+ for (i = 0; i < pi->vddc_voltage_table.count; i++)
+ vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
+
+ return 0;
+}
+
+static int ci_populate_svi_load_line(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+
+ pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
+ pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
+ pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
+ pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int ci_populate_tdc_limit(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+ u16 tdc_limit;
+
+ tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
+ pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
+ pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ pt_defaults->tdc_vddc_throttle_release_limit_perc;
+ pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int ci_populate_dw8(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+ int ret;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, PmFuseTable) +
+ offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
+ (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
+ pi->sram_end);
+ if (ret)
+ return -EINVAL;
+ else
+ pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
+ u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
+ int i, min, max;
+
+ min = max = hi_vid[0];
+ for (i = 0; i < 8; i++) {
+ if (0 != hi_vid[i]) {
+ if (min > hi_vid[i])
+ min = hi_vid[i];
+ if (max < hi_vid[i])
+ max = hi_vid[i];
+ }
+
+ if (0 != lo_vid[i]) {
+ if (min > lo_vid[i])
+ min = lo_vid[i];
+ if (max < lo_vid[i])
+ max = lo_vid[i];
+ }
+ }
+
+ if ((min == 0) || (max == 0))
+ return -EINVAL;
+ pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
+ pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
+
+ return 0;
+}
+
+static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
+ u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+
+ hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
+ lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
+
+ pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
+ pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
+
+ return 0;
+}
+
+static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+ SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+ struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
+ int i, j, k;
+ const u16 *def1;
+ const u16 *def2;
+
+ dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
+ dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
+
+ dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
+ dpm_table->GpuTjMax =
+ (u8)(pi->thermal_temp_setting.temperature_high / 1000);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
+
+ if (ppm) {
+ dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
+ dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
+ } else {
+ dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
+ dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
+ }
+
+ dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
+ def1 = pt_defaults->bapmti_r;
+ def2 = pt_defaults->bapmti_rc;
+
+ for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU7_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU7_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
+ dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
+ def1++;
+ def2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ci_populate_pm_base(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 pm_fuse_table_offset;
+ int ret;
+
+ if (pi->caps_power_containment) {
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, pi->sram_end);
+ if (ret)
+ return ret;
+ ret = ci_populate_bapm_vddc_vid_sidd(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_vddc_vid(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_svi_load_line(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_tdc_limit(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_dw8(rdev);
+ if (ret)
+ return ret;
+ ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
+ if (ret)
+ return ret;
+ ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
+ (u8 *)&pi->smc_powertune_table,
+ sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 data;
+
+ if (pi->caps_sq_ramping) {
+ data = RREG32_DIDT(DIDT_SQ_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_SQ_CTRL0, data);
+ }
+
+ if (pi->caps_db_ramping) {
+ data = RREG32_DIDT(DIDT_DB_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_DB_CTRL0, data);
+ }
+
+ if (pi->caps_td_ramping) {
+ data = RREG32_DIDT(DIDT_TD_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_TD_CTRL0, data);
+ }
+
+ if (pi->caps_tcp_ramping) {
+ data = RREG32_DIDT(DIDT_TCP_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_TCP_CTRL0, data);
+ }
+}
+
+static int ci_program_pt_config_registers(struct radeon_device *rdev,
+ const struct ci_pt_config_reg *cac_config_regs)
+{
+ const struct ci_pt_config_reg *config_regs = cac_config_regs;
+ u32 data;
+ u32 cache = 0;
+
+ if (config_regs == NULL)
+ return -EINVAL;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
+ cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ } else {
+ switch (config_regs->type) {
+ case CISLANDS_CONFIGREG_SMC_IND:
+ data = RREG32_SMC(config_regs->offset);
+ break;
+ case CISLANDS_CONFIGREG_DIDT_IND:
+ data = RREG32_DIDT(config_regs->offset);
+ break;
+ default:
+ data = RREG32(config_regs->offset << 2);
+ break;
+ }
+
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ data |= cache;
+
+ switch (config_regs->type) {
+ case CISLANDS_CONFIGREG_SMC_IND:
+ WREG32_SMC(config_regs->offset, data);
+ break;
+ case CISLANDS_CONFIGREG_DIDT_IND:
+ WREG32_DIDT(config_regs->offset, data);
+ break;
+ default:
+ WREG32(config_regs->offset << 2, data);
+ break;
+ }
+ cache = 0;
+ }
+ config_regs++;
+ }
+ return 0;
+}
+
+static int ci_enable_didt(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ if (pi->caps_sq_ramping || pi->caps_db_ramping ||
+ pi->caps_td_ramping || pi->caps_tcp_ramping) {
+ cik_enter_rlc_safe_mode(rdev);
+
+ if (enable) {
+ ret = ci_program_pt_config_registers(rdev, didt_config_ci);
+ if (ret) {
+ cik_exit_rlc_safe_mode(rdev);
+ return ret;
+ }
+ }
+
+ ci_do_enable_didt(rdev, enable);
+
+ cik_exit_rlc_safe_mode(rdev);
+ }
+
+ return 0;
+}
+
+static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ int ret = 0;
+
+ if (enable) {
+ pi->power_containment_features = 0;
+ if (pi->caps_power_containment) {
+ if (pi->enable_bapm_feature) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
+ if (smc_result != PPSMC_Result_OK)
+ ret = -EINVAL;
+ else
+ pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
+ }
+
+ if (pi->enable_tdc_limit_feature) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
+ if (smc_result != PPSMC_Result_OK)
+ ret = -EINVAL;
+ else
+ pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
+ }
+
+ if (pi->enable_pkg_pwr_tracking_feature) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
+ if (smc_result != PPSMC_Result_OK) {
+ ret = -EINVAL;
+ } else {
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+ u32 default_pwr_limit =
+ (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
+
+ pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
+
+ ci_set_power_limit(rdev, default_pwr_limit);
+ }
+ }
+ }
+ } else {
+ if (pi->caps_power_containment && pi->power_containment_features) {
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
+
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
+
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
+ pi->power_containment_features = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ int ret = 0;
+
+ if (pi->caps_cac) {
+ if (enable) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
+ if (smc_result != PPSMC_Result_OK) {
+ ret = -EINVAL;
+ pi->cac_enabled = false;
+ } else {
+ pi->cac_enabled = true;
+ }
+ } else if (pi->cac_enabled) {
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
+ pi->cac_enabled = false;
+ }
+ }
+
+ return ret;
+}
+
+static int ci_power_control_set_level(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+ s32 adjust_percent;
+ s32 target_tdp;
+ int ret = 0;
+ bool adjust_polarity = false; /* ??? */
+
+ if (pi->caps_power_containment &&
+ (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
+ adjust_percent = adjust_polarity ?
+ rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
+ target_tdp = ((100 + adjust_percent) *
+ (s32)cac_tdp_table->configurable_tdp) / 100;
+ target_tdp *= 256;
+
+ ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
+ }
+
+ return ret;
+}
+
+void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (pi->uvd_power_gated == gate)
+ return;
+
+ pi->uvd_power_gated = gate;
+
+ ci_update_uvd_dpm(rdev, gate);
+}
+
+bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+ u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+
+ if (vblank_time < switch_limit)
+ return true;
+ else
+ return false;
+
+}
+
+static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct ci_ps *ps = ci_get_ps(rps);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_clock_and_voltage_limits *max_limits;
+ bool disable_mclk_switching;
+ u32 sclk, mclk;
+ int i;
+
+ if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
+ ci_dpm_vblank_too_short(rdev))
+ disable_mclk_switching = true;
+ else
+ disable_mclk_switching = false;
+
+ if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
+ pi->battery_state = true;
+ else
+ pi->battery_state = false;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (rdev->pm.dpm.ac_power == false) {
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].mclk > max_limits->mclk)
+ ps->performance_levels[i].mclk = max_limits->mclk;
+ if (ps->performance_levels[i].sclk > max_limits->sclk)
+ ps->performance_levels[i].sclk = max_limits->sclk;
+ }
+ }
+
+ /* XXX validate the min clocks required for display */
+
+ if (disable_mclk_switching) {
+ mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
+ sclk = ps->performance_levels[0].sclk;
+ } else {
+ mclk = ps->performance_levels[0].mclk;
+ sclk = ps->performance_levels[0].sclk;
+ }
+
+ ps->performance_levels[0].sclk = sclk;
+ ps->performance_levels[0].mclk = mclk;
+
+ if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
+ ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
+
+ if (disable_mclk_switching) {
+ if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
+ ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
+ } else {
+ if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
+ ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
+ }
+}
+
+static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp)
+{
+ int low_temp = 0 * 1000;
+ int high_temp = 255 * 1000;
+ u32 tmp;
+
+ if (low_temp < min_temp)
+ low_temp = min_temp;
+ if (high_temp > max_temp)
+ high_temp = max_temp;
+ if (high_temp < low_temp) {
+ DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ return -EINVAL;
+ }
+
+ tmp = RREG32_SMC(CG_THERMAL_INT);
+ tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
+ tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
+ CI_DIG_THERM_INTL(low_temp / 1000);
+ WREG32_SMC(CG_THERMAL_INT, tmp);
+
+#if 0
+ /* XXX: need to figure out how to handle this properly */
+ tmp = RREG32_SMC(CG_THERMAL_CTRL);
+ tmp &= DIG_THERM_DPM_MASK;
+ tmp |= DIG_THERM_DPM(high_temp / 1000);
+ WREG32_SMC(CG_THERMAL_CTRL, tmp);
+#endif
+
+ return 0;
+}
+
+#if 0
+static int ci_read_smc_soft_register(struct radeon_device *rdev,
+ u16 reg_offset, u32 *value)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ return ci_read_smc_sram_dword(rdev,
+ pi->soft_regs_start + reg_offset,
+ value, pi->sram_end);
+}
+#endif
+
+static int ci_write_smc_soft_register(struct radeon_device *rdev,
+ u16 reg_offset, u32 value)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ return ci_write_smc_sram_dword(rdev,
+ pi->soft_regs_start + reg_offset,
+ value, pi->sram_end);
+}
+
+static void ci_init_fps_limits(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
+
+ if (pi->caps_fps) {
+ u16 tmp;
+
+ tmp = 45;
+ table->FpsHighT = cpu_to_be16(tmp);
+
+ tmp = 30;
+ table->FpsLowT = cpu_to_be16(tmp);
+ }
+}
+
+static int ci_update_sclk_t(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret = 0;
+ u32 low_sclk_interrupt_t = 0;
+
+ if (pi->caps_sclk_throttle_low_notification) {
+ low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
+
+ ret = ci_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
+ (u8 *)&low_sclk_interrupt_t,
+ sizeof(u32), pi->sram_end);
+
+ }
+
+ return ret;
+}
+
+static void ci_get_leakage_voltages(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u16 leakage_id, virtual_voltage_id;
+ u16 vddc, vddci;
+ int i;
+
+ pi->vddc_leakage.count = 0;
+ pi->vddci_leakage.count = 0;
+
+ if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
+ for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
+ virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+ if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
+ virtual_voltage_id,
+ leakage_id) == 0) {
+ if (vddc != 0 && vddc != virtual_voltage_id) {
+ pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
+ pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
+ pi->vddc_leakage.count++;
+ }
+ if (vddci != 0 && vddci != virtual_voltage_id) {
+ pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
+ pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
+ pi->vddci_leakage.count++;
+ }
+ }
+ }
+ }
+}
+
+static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ bool want_thermal_protection;
+ enum radeon_dpm_event_src dpm_event_src;
+ u32 tmp;
+
+ switch (sources) {
+ case 0:
+ default:
+ want_thermal_protection = false;
+ break;
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
+ want_thermal_protection = true;
+ dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
+ break;
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+ want_thermal_protection = true;
+ dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
+ break;
+ case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+ (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+ want_thermal_protection = true;
+ dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+ break;
+ }
+
+ if (want_thermal_protection) {
+#if 0
+ /* XXX: need to figure out how to handle this properly */
+ tmp = RREG32_SMC(CG_THERMAL_CTRL);
+ tmp &= DPM_EVENT_SRC_MASK;
+ tmp |= DPM_EVENT_SRC(dpm_event_src);
+ WREG32_SMC(CG_THERMAL_CTRL, tmp);
+#endif
+
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ if (pi->thermal_protection)
+ tmp &= ~THERMAL_PROTECTION_DIS;
+ else
+ tmp |= THERMAL_PROTECTION_DIS;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+ } else {
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp |= THERMAL_PROTECTION_DIS;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+ }
+}
+
+static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
+ enum radeon_dpm_auto_throttle_src source,
+ bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (enable) {
+ if (!(pi->active_auto_throttle_sources & (1 << source))) {
+ pi->active_auto_throttle_sources |= 1 << source;
+ ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+ }
+ } else {
+ if (pi->active_auto_throttle_sources & (1 << source)) {
+ pi->active_auto_throttle_sources &= ~(1 << source);
+ ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+ }
+ }
+}
+
+static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
+{
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
+}
+
+static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+
+ if (!pi->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((!pi->sclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if ((!pi->mclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ pi->need_update_smu7_dpm_table = 0;
+ return 0;
+}
+
+static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+
+ if (enable) {
+ if (!pi->sclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if (!pi->mclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+
+ WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
+
+ WREG32_SMC(LCAC_MC0_CNTL, 0x05);
+ WREG32_SMC(LCAC_MC1_CNTL, 0x05);
+ WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
+
+ udelay(10);
+
+ WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
+ WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
+ WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
+ }
+ } else {
+ if (!pi->sclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if (!pi->mclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ci_start_dpm(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ int ret;
+ u32 tmp;
+
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp |= GLOBAL_PWRMGT_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+ tmp |= DYNAMIC_PM_EN;
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+ ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
+
+ WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
+
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+
+ ret = ci_enable_sclk_mclk_dpm(rdev, true);
+ if (ret)
+ return ret;
+
+ if (!pi->pcie_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+
+ if (!pi->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((!pi->sclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if ((!pi->mclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_stop_dpm(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ int ret;
+ u32 tmp;
+
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp &= ~GLOBAL_PWRMGT_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+
+ tmp = RREG32(SCLK_PWRMGT_CNTL);
+ tmp &= ~DYNAMIC_PM_EN;
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+ if (!pi->pcie_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ ret = ci_enable_sclk_mclk_dpm(rdev, false);
+ if (ret)
+ return ret;
+
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
+{
+ u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+
+ if (enable)
+ tmp &= ~SCLK_PWRMGT_OFF;
+ else
+ tmp |= SCLK_PWRMGT_OFF;
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+}
+
+#if 0
+static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
+ bool ac_power)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+ u32 power_limit;
+
+ if (ac_power)
+ power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
+ else
+ power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
+
+ ci_set_power_limit(rdev, power_limit);
+
+ if (pi->caps_automatic_dc_transition) {
+ if (ac_power)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
+ else
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
+ }
+
+ return 0;
+}
+#endif
+
+static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 parameter)
+{
+ WREG32(SMC_MSG_ARG_0, parameter);
+ return ci_send_msg_to_smc(rdev, msg);
+}
+
+static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 *parameter)
+{
+ PPSMC_Result smc_result;
+
+ smc_result = ci_send_msg_to_smc(rdev, msg);
+
+ if ((smc_result == PPSMC_Result_OK) && parameter)
+ *parameter = RREG32(SMC_MSG_ARG_0);
+
+ return smc_result;
+}
+
+static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!pi->sclk_dpm_key_disabled) {
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!pi->mclk_dpm_key_disabled) {
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!pi->pcie_dpm_key_disabled) {
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
+ u32 target_tdp)
+{
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ return 0;
+}
+
+static int ci_set_boot_state(struct radeon_device *rdev)
+{
+ return ci_enable_sclk_mclk_dpm(rdev, false);
+}
+
+static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
+{
+ u32 sclk_freq;
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_return_parameter(rdev,
+ PPSMC_MSG_API_GetSclkFrequency,
+ &sclk_freq);
+ if (smc_result != PPSMC_Result_OK)
+ sclk_freq = 0;
+
+ return sclk_freq;
+}
+
+static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
+{
+ u32 mclk_freq;
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_return_parameter(rdev,
+ PPSMC_MSG_API_GetMclkFrequency,
+ &mclk_freq);
+ if (smc_result != PPSMC_Result_OK)
+ mclk_freq = 0;
+
+ return mclk_freq;
+}
+
+static void ci_dpm_start_smc(struct radeon_device *rdev)
+{
+ int i;
+
+ ci_program_jump_on_start(rdev);
+ ci_start_smc_clock(rdev);
+ ci_start_smc(rdev);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
+ break;
+ }
+}
+
+static void ci_dpm_stop_smc(struct radeon_device *rdev)
+{
+ ci_reset_smc(rdev);
+ ci_stop_smc_clock(rdev);
+}
+
+static int ci_process_firmware_header(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+ int ret;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, DpmTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->dpm_table_start = tmp;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, SoftRegisters),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->soft_regs_start = tmp;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcRegisterTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->mc_reg_table_start = tmp;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, FanTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->fan_table_start = tmp;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->arb_table_start = tmp;
+
+ return 0;
+}
+
+static void ci_read_clock_registers(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ pi->clock_registers.cg_spll_func_cntl =
+ RREG32_SMC(CG_SPLL_FUNC_CNTL);
+ pi->clock_registers.cg_spll_func_cntl_2 =
+ RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
+ pi->clock_registers.cg_spll_func_cntl_3 =
+ RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
+ pi->clock_registers.cg_spll_func_cntl_4 =
+ RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
+ pi->clock_registers.cg_spll_spread_spectrum =
+ RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
+ pi->clock_registers.cg_spll_spread_spectrum_2 =
+ RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
+ pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
+ pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
+ pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
+ pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
+ pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
+ pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
+ pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
+ pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
+ pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
+}
+
+static void ci_init_sclk_t(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ pi->low_sclk_interrupt_t = 0;
+}
+
+static void ci_enable_thermal_protection(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+ if (enable)
+ tmp &= ~THERMAL_PROTECTION_DIS;
+ else
+ tmp |= THERMAL_PROTECTION_DIS;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+}
+
+static void ci_enable_acpi_power_management(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+ tmp |= STATIC_PM_EN;
+
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+}
+
+#if 0
+static int ci_enter_ulp_state(struct radeon_device *rdev)
+{
+
+ WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
+
+ udelay(25000);
+
+ return 0;
+}
+
+static int ci_exit_ulp_state(struct radeon_device *rdev)
+{
+ int i;
+
+ WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
+
+ udelay(7000);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(SMC_RESP_0) == 1)
+ break;
+ udelay(1000);
+ }
+
+ return 0;
+}
+#endif
+
+static int ci_notify_smc_display_change(struct radeon_device *rdev,
+ bool has_display)
+{
+ PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
+
+ return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
+}
+
+static int ci_enable_ds_master_switch(struct radeon_device *rdev,
+ bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (enable) {
+ if (pi->caps_sclk_ds) {
+ if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
+ return -EINVAL;
+ } else {
+ if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ } else {
+ if (pi->caps_sclk_ds) {
+ if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void ci_program_display_gap(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
+ u32 pre_vbi_time_in_us;
+ u32 frame_time_in_us;
+ u32 ref_clock = rdev->clock.spll.reference_freq;
+ u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
+ u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+
+ tmp &= ~DISP_GAP_MASK;
+ if (rdev->pm.dpm.new_active_crtc_count > 0)
+ tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ else
+ tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+ WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
+
+ if (refresh_rate == 0)
+ refresh_rate = 60;
+ if (vblank_time == 0xffffffff)
+ vblank_time = 500;
+ frame_time_in_us = 1000000 / refresh_rate;
+ pre_vbi_time_in_us =
+ frame_time_in_us - 200 - vblank_time;
+ tmp = pre_vbi_time_in_us * (ref_clock / 100);
+
+ WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
+ ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
+ ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
+
+
+ ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
+
+}
+
+static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (enable) {
+ if (pi->caps_sclk_ss_support) {
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp |= DYN_SPREAD_SPECTRUM_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+ }
+ } else {
+ tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
+ tmp &= ~SSEN;
+ WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
+
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp &= ~DYN_SPREAD_SPECTRUM_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+ }
+}
+
+static void ci_program_sstp(struct radeon_device *rdev)
+{
+ WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+}
+
+static void ci_enable_display_gap(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
+
+ tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
+ tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+ DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
+
+ WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void ci_program_vc(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+ tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+ WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
+ WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
+ WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
+ WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
+ WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
+ WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
+ WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
+ WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
+}
+
+static void ci_clear_vc(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+ tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+ WREG32_SMC(CG_FTV_0, 0);
+ WREG32_SMC(CG_FTV_1, 0);
+ WREG32_SMC(CG_FTV_2, 0);
+ WREG32_SMC(CG_FTV_3, 0);
+ WREG32_SMC(CG_FTV_4, 0);
+ WREG32_SMC(CG_FTV_5, 0);
+ WREG32_SMC(CG_FTV_6, 0);
+ WREG32_SMC(CG_FTV_7, 0);
+}
+
+static int ci_upload_firmware(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int i, ret;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
+ break;
+ }
+ WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
+
+ ci_stop_smc_clock(rdev);
+ ci_reset_smc(rdev);
+
+ ret = ci_load_smc_ucode(rdev, pi->sram_end);
+
+ return ret;
+
+}
+
+static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
+ struct atom_voltage_table *voltage_table)
+{
+ u32 i;
+
+ if (voltage_dependency_table == NULL)
+ return -EINVAL;
+
+ voltage_table->mask_low = 0;
+ voltage_table->phase_delay = 0;
+
+ voltage_table->count = voltage_dependency_table->count;
+ for (i = 0; i < voltage_table->count; i++) {
+ voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+ voltage_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
+static int ci_construct_voltage_tables(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+ ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT,
+ &pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ ret = ci_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
+ si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
+ &pi->vddc_voltage_table);
+
+ if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+ ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
+ VOLTAGE_OBJ_GPIO_LUT,
+ &pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ ret = ci_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
+ si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
+ &pi->vddci_voltage_table);
+
+ if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+ ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
+ VOLTAGE_OBJ_GPIO_LUT,
+ &pi->mvdd_voltage_table);
+ if (ret)
+ return ret;
+ } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ ret = ci_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ &pi->mvdd_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
+ si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
+ &pi->mvdd_voltage_table);
+
+ return 0;
+}
+
+static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
+ struct atom_voltage_table_entry *voltage_table,
+ SMU7_Discrete_VoltageLevel *smc_voltage_table)
+{
+ int ret;
+
+ ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
+ &smc_voltage_table->StdVoltageHiSidd,
+ &smc_voltage_table->StdVoltageLoSidd);
+
+ if (ret) {
+ smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
+ smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
+ }
+
+ smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
+ smc_voltage_table->StdVoltageHiSidd =
+ cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
+ smc_voltage_table->StdVoltageLoSidd =
+ cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
+}
+
+static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ unsigned int count;
+
+ table->VddcLevelCount = pi->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ ci_populate_smc_voltage_table(rdev,
+ &pi->vddc_voltage_table.entries[count],
+ &table->VddcLevel[count]);
+
+ if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+ table->VddcLevel[count].Smio |=
+ pi->vddc_voltage_table.entries[count].smio_low;
+ else
+ table->VddcLevel[count].Smio = 0;
+ }
+ table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ table->VddciLevelCount = pi->vddci_voltage_table.count;
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ ci_populate_smc_voltage_table(rdev,
+ &pi->vddci_voltage_table.entries[count],
+ &table->VddciLevel[count]);
+
+ if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+ table->VddciLevel[count].Smio |=
+ pi->vddci_voltage_table.entries[count].smio_low;
+ else
+ table->VddciLevel[count].Smio = 0;
+ }
+ table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ unsigned int count;
+
+ table->MvddLevelCount = pi->mvdd_voltage_table.count;
+ for (count = 0; count < table->MvddLevelCount; count++) {
+ ci_populate_smc_voltage_table(rdev,
+ &pi->mvdd_voltage_table.entries[count],
+ &table->MvddLevel[count]);
+
+ if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+ table->MvddLevel[count].Smio |=
+ pi->mvdd_voltage_table.entries[count].smio_low;
+ else
+ table->MvddLevel[count].Smio = 0;
+ }
+ table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ int ret;
+
+ ret = ci_populate_smc_vddc_table(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_vddci_table(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_mvdd_table(rdev, table);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
+ SMU7_Discrete_VoltageLevel *voltage)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 i = 0;
+
+ if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+ for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
+ if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
+ voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
+ struct atom_voltage_table_entry *voltage_table,
+ u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
+{
+ u16 v_index, idx;
+ bool voltage_found = false;
+ *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
+ *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
+
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
+ return -EINVAL;
+
+ if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+ for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+ if (voltage_table->value ==
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+ voltage_found = true;
+ if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
+ idx = v_index;
+ else
+ idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
+ *std_voltage_lo_sidd =
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
+ *std_voltage_hi_sidd =
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
+ break;
+ }
+ }
+
+ if (!voltage_found) {
+ for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+ if (voltage_table->value <=
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+ voltage_found = true;
+ if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
+ idx = v_index;
+ else
+ idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
+ *std_voltage_lo_sidd =
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
+ *std_voltage_hi_sidd =
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
+ const struct radeon_phase_shedding_limits_table *limits,
+ u32 sclk,
+ u32 *phase_shedding)
+{
+ unsigned int i;
+
+ *phase_shedding = 1;
+
+ for (i = 0; i < limits->count; i++) {
+ if (sclk < limits->entries[i].sclk) {
+ *phase_shedding = i;
+ break;
+ }
+ }
+}
+
+static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
+ const struct radeon_phase_shedding_limits_table *limits,
+ u32 mclk,
+ u32 *phase_shedding)
+{
+ unsigned int i;
+
+ *phase_shedding = 1;
+
+ for (i = 0; i < limits->count; i++) {
+ if (mclk < limits->entries[i].mclk) {
+ *phase_shedding = i;
+ break;
+ }
+ }
+}
+
+static int ci_init_arb_table_index(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+ int ret;
+
+ ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= MC_CG_ARB_FREQ_F1 << 24;
+
+ return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
+ tmp, pi->sram_end);
+}
+
+static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ u32 clock, u32 *voltage)
+{
+ u32 i = 0;
+
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ *voltage = allowed_clock_voltage_table->entries[i].v;
+ return 0;
+ }
+ }
+
+ *voltage = allowed_clock_voltage_table->entries[i-1].v;
+
+ return 0;
+}
+
+static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
+ u32 sclk, u32 min_sclk_in_sr)
+{
+ u32 i;
+ u32 tmp;
+ u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
+ min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
+
+ if (sclk < min)
+ return 0;
+
+ for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
+ tmp = sclk / (1 << i);
+ if (tmp >= min || i == 0)
+ break;
+ }
+
+ return (u8)i;
+}
+
+static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
+{
+ return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int ci_reset_to_default(struct radeon_device *rdev)
+{
+ return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
+ u32 sclk,
+ u32 mclk,
+ SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ u32 dram_timing;
+ u32 dram_timing2;
+ u32 burst_time;
+
+ radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
+
+ dram_timing = RREG32(MC_ARB_DRAM_TIMING);
+ dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+ burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
+
+ arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
+ arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
+ arb_regs->McArbBurstTime = (u8)burst_time;
+
+ return 0;
+}
+
+static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ SMU7_Discrete_MCArbDramTimingTable arb_regs;
+ u32 i, j;
+ int ret = 0;
+
+ memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
+ ret = ci_populate_memory_timing_parameters(rdev,
+ pi->dpm_table.sclk_table.dpm_levels[i].value,
+ pi->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (ret)
+ break;
+ }
+ }
+
+ if (ret == 0)
+ ret = ci_copy_bytes_to_smc(rdev,
+ pi->arb_table_start,
+ (u8 *)&arb_regs,
+ sizeof(SMU7_Discrete_MCArbDramTimingTable),
+ pi->sram_end);
+
+ return ret;
+}
+
+static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (pi->need_update_smu7_dpm_table == 0)
+ return 0;
+
+ return ci_do_program_memory_timing_parameters(rdev);
+}
+
+static void ci_populate_smc_initial_state(struct radeon_device *rdev,
+ struct radeon_ps *radeon_boot_state)
+{
+ struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 level = 0;
+
+ for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
+ boot_state->performance_levels[0].sclk) {
+ pi->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
+ boot_state->performance_levels[0].mclk) {
+ pi->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+}
+
+static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
+{
+ u32 i;
+ u32 mask_value = 0;
+
+ for (i = dpm_table->count; i > 0; i--) {
+ mask_value = mask_value << 1;
+ if (dpm_table->dpm_levels[i-1].enabled)
+ mask_value |= 0x1;
+ else
+ mask_value &= 0xFFFFFFFE;
+ }
+
+ return mask_value;
+}
+
+static void ci_populate_smc_link_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ u32 i;
+
+ for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].DownT = cpu_to_be32(5);
+ table->LinkLevel[i].UpT = cpu_to_be32(30);
+ }
+
+ pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+}
+
+static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->UvdLevelCount =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].VclkFrequency =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
+ table->UvdLevel[count].MinVddc =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+ table->UvdLevel[count].MinVddcPhases = 1;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->UvdLevel[count].VclkFrequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->UvdLevel[count].DclkFrequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
+
+ table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
+ table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
+ table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
+ }
+
+ return ret;
+}
+
+static int ci_populate_smc_vce_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->VceLevelCount =
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency =
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
+ table->VceLevel[count].MinVoltage =
+ (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+ table->VceLevel[count].MinPhases = 1;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->VceLevel[count].Frequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->VceLevel[count].Divider = (u8)dividers.post_divider;
+
+ table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
+ table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
+ }
+
+ return ret;
+
+}
+
+static int ci_populate_smc_acp_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->AcpLevelCount = (u8)
+ (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency =
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
+ table->AcpLevel[count].MinVoltage =
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
+ table->AcpLevel[count].MinPhases = 1;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->AcpLevel[count].Frequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->AcpLevel[count].Divider = (u8)dividers.post_divider;
+
+ table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
+ table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
+ }
+
+ return ret;
+}
+
+static int ci_populate_smc_samu_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->SamuLevelCount =
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ table->SamuLevel[count].Frequency =
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
+ table->SamuLevel[count].MinVoltage =
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+ table->SamuLevel[count].MinPhases = 1;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->SamuLevel[count].Frequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->SamuLevel[count].Divider = (u8)dividers.post_divider;
+
+ table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
+ table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
+ }
+
+ return ret;
+}
+
+static int ci_calculate_mclk_params(struct radeon_device *rdev,
+ u32 memory_clock,
+ SMU7_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dll_state_on)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 dll_cntl = pi->clock_registers.dll_cntl;
+ u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
+ u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
+ u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
+ u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
+ u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
+ u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
+ struct atom_mpll_param mpll_param;
+ int ret;
+
+ ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
+ if (ret)
+ return ret;
+
+ mpll_func_cntl &= ~BWCTRL_MASK;
+ mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
+
+ mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
+ mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
+ CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
+
+ mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
+ mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
+
+ if (pi->mem_gddr5) {
+ mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
+ mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
+ YCLK_POST_DIV(mpll_param.post_div);
+ }
+
+ if (pi->caps_mclk_ss_support) {
+ struct radeon_atom_ss ss;
+ u32 freq_nom;
+ u32 tmp;
+ u32 reference_clock = rdev->clock.mpll.reference_freq;
+
+ if (pi->mem_gddr5)
+ freq_nom = memory_clock * 4;
+ else
+ freq_nom = memory_clock * 2;
+
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+ if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+ u32 clks = reference_clock * 5 / ss.rate;
+ u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 &= ~CLKV_MASK;
+ mpll_ss1 |= CLKV(clkv);
+
+ mpll_ss2 &= ~CLKS_MASK;
+ mpll_ss2 |= CLKS(clks);
+ }
+ }
+
+ mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+ mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
+
+ if (dll_state_on)
+ mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
+ else
+ mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static int ci_populate_single_memory_level(struct radeon_device *rdev,
+ u32 memory_clock,
+ SMU7_Discrete_MemoryLevel *memory_level)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+ bool dll_state_on;
+
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
+ ret = ci_get_dependency_volt_by_clk(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ memory_clock, &memory_level->MinVddc);
+ if (ret)
+ return ret;
+ }
+
+ if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
+ ret = ci_get_dependency_volt_by_clk(rdev,
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ memory_clock, &memory_level->MinVddci);
+ if (ret)
+ return ret;
+ }
+
+ if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
+ ret = ci_get_dependency_volt_by_clk(rdev,
+ &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ memory_clock, &memory_level->MinMvdd);
+ if (ret)
+ return ret;
+ }
+
+ memory_level->MinVddcPhases = 1;
+
+ if (pi->vddc_phase_shed_control)
+ ci_populate_phase_value_based_on_mclk(rdev,
+ &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ memory_clock,
+ &memory_level->MinVddcPhases);
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 1;
+ memory_level->UpH = 0;
+ memory_level->DownH = 100;
+ memory_level->VoltageDownH = 0;
+ memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
+
+ memory_level->StutterEnable = false;
+ memory_level->StrobeEnable = false;
+ memory_level->EdcReadEnable = false;
+ memory_level->EdcWriteEnable = false;
+ memory_level->RttEnable = false;
+
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (pi->mclk_stutter_mode_threshold &&
+ (memory_clock <= pi->mclk_stutter_mode_threshold) &&
+ (pi->uvd_enabled == false) &&
+ (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+ (rdev->pm.dpm.new_active_crtc_count <= 2))
+ memory_level->StutterEnable = true;
+
+ if (pi->mclk_strobe_mode_threshold &&
+ (memory_clock <= pi->mclk_strobe_mode_threshold))
+ memory_level->StrobeEnable = 1;
+
+ if (pi->mem_gddr5) {
+ memory_level->StrobeRatio =
+ si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
+ if (pi->mclk_edc_enable_threshold &&
+ (memory_clock > pi->mclk_edc_enable_threshold))
+ memory_level->EdcReadEnable = true;
+
+ if (pi->mclk_edc_wr_enable_threshold &&
+ (memory_clock > pi->mclk_edc_wr_enable_threshold))
+ memory_level->EdcWriteEnable = true;
+
+ if (memory_level->StrobeEnable) {
+ if (si_get_mclk_frequency_ratio(memory_clock, true) >=
+ ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
+ dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+ else
+ dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+ } else {
+ dll_state_on = pi->dll_default_on;
+ }
+ } else {
+ memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+ }
+
+ ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+ if (ret)
+ return ret;
+
+ memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
+ memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
+ memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
+ memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
+
+ memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
+ memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
+ memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
+ memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
+ memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
+ memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
+ memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
+ memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
+ memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
+ memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
+ memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
+
+ return 0;
+}
+
+static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct atom_clock_dividers dividers;
+ SMU7_Discrete_VoltageLevel voltage_level;
+ u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
+ u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
+ u32 dll_cntl = pi->clock_registers.dll_cntl;
+ u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
+ int ret;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (pi->acpi_vddc)
+ table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
+ else
+ table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
+
+ table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
+
+ table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
+ table->ACPILevel.SclkFrequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->ACPILevel.SclkDid = (u8)dividers.post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl &= ~SPLL_PWRON;
+ spll_func_cntl |= SPLL_RESET;
+
+ spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
+ table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
+ table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
+ table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
+ table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
+ table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
+ table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
+ table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
+ table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
+ table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
+ table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
+ table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
+ table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
+ table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
+
+ table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+ table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+ if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+ if (pi->acpi_vddci)
+ table->MemoryACPILevel.MinVddci =
+ cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinVddci =
+ cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
+ }
+
+ if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd = 0;
+ else
+ table->MemoryACPILevel.MinMvdd =
+ cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
+
+ mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
+ mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+ dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
+
+ table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
+ table->MemoryACPILevel.MpllFuncCntl =
+ cpu_to_be32(pi->clock_registers.mpll_func_cntl);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
+ table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
+ table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpH = 0;
+ table->MemoryACPILevel.DownH = 100;
+ table->MemoryACPILevel.VoltageDownH = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ cpu_to_be16((u16)pi->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = false;
+ table->MemoryACPILevel.StrobeEnable = false;
+ table->MemoryACPILevel.EdcReadEnable = false;
+ table->MemoryACPILevel.EdcWriteEnable = false;
+ table->MemoryACPILevel.RttEnable = false;
+
+ return 0;
+}
+
+
+static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ulv_parm *ulv = &pi->ulv;
+
+ if (ulv->supported) {
+ if (enable)
+ return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ else
+ return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_populate_ulv_level(struct radeon_device *rdev,
+ SMU7_Discrete_Ulv *state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ if (ulv_voltage == 0) {
+ pi->ulv.supported = false;
+ return 0;
+ }
+
+ if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
+ state->VddcOffset = 0;
+ else
+ state->VddcOffset =
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
+ } else {
+ if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
+ state->VddcOffsetVid = 0;
+ else
+ state->VddcOffsetVid = (u8)
+ ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+ }
+ state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
+
+ state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
+ state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
+ state->VddcOffset = cpu_to_be16(state->VddcOffset);
+
+ return 0;
+}
+
+static int ci_calculate_sclk_params(struct radeon_device *rdev,
+ u32 engine_clock,
+ SMU7_Discrete_GraphicsLevel *sclk)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct atom_clock_dividers dividers;
+ u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
+ u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
+ u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 reference_divider;
+ u32 fbdiv;
+ int ret;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
+ engine_clock, false, &dividers);
+ if (ret)
+ return ret;
+
+ reference_divider = 1 + dividers.ref_div;
+ fbdiv = dividers.fb_div & 0x3FFFFFF;
+
+ spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+ spll_func_cntl_3 |= SPLL_DITHEN;
+
+ if (pi->caps_sclk_ss_support) {
+ struct radeon_atom_ss ss;
+ u32 vco_freq = engine_clock * dividers.post_div;
+
+ if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+ u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+ u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum &= ~CLK_S_MASK;
+ cg_spll_spread_spectrum |= CLK_S(clk_s);
+ cg_spll_spread_spectrum |= SSEN;
+
+ cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+ cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (u8)dividers.post_divider;
+
+ return 0;
+}
+
+static int ci_populate_single_graphic_level(struct radeon_device *rdev,
+ u32 engine_clock,
+ u16 sclk_activity_level_t,
+ SMU7_Discrete_GraphicsLevel *graphic_level)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
+ if (ret)
+ return ret;
+
+ ret = ci_get_dependency_volt_by_clk(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ engine_clock, &graphic_level->MinVddc);
+ if (ret)
+ return ret;
+
+ graphic_level->SclkFrequency = engine_clock;
+
+ graphic_level->Flags = 0;
+ graphic_level->MinVddcPhases = 1;
+
+ if (pi->vddc_phase_shed_control)
+ ci_populate_phase_value_based_on_sclk(rdev,
+ &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ engine_clock,
+ &graphic_level->MinVddcPhases);
+
+ graphic_level->ActivityLevel = sclk_activity_level_t;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ graphic_level->EnabledForActivity = 1;
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpH = 0;
+ graphic_level->DownH = 0;
+ graphic_level->VoltageDownH = 0;
+ graphic_level->PowerThrottle = 0;
+
+ if (pi->caps_sclk_ds)
+ graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
+ engine_clock,
+ CISLAND_MINIMUM_ENGINE_CLOCK);
+
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
+ graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
+ graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
+ graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
+ graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
+ graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
+ graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
+ graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
+ graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
+ graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
+ graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
+
+ return 0;
+}
+
+static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ u32 level_array_address = pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+ u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
+ SMU7_MAX_LEVELS_GRAPHICS;
+ SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
+ u32 i, ret;
+
+ memset(levels, 0, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ ret = ci_populate_single_graphic_level(rdev,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (u16)pi->activity_target[i],
+ &pi->smc_state_table.GraphicsLevel[i]);
+ if (ret)
+ return ret;
+ if (i == (dpm_table->sclk_table.count - 1))
+ pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+ }
+
+ pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ ret = ci_copy_bytes_to_smc(rdev, level_array_address,
+ (u8 *)levels, level_array_size,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ci_populate_ulv_state(struct radeon_device *rdev,
+ SMU7_Discrete_Ulv *ulv_level)
+{
+ return ci_populate_ulv_level(rdev, ulv_level);
+}
+
+static int ci_populate_all_memory_levels(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ u32 level_array_address = pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
+ u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
+ SMU7_MAX_LEVELS_MEMORY;
+ SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
+ u32 i, ret;
+
+ memset(levels, 0, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ if (dpm_table->mclk_table.dpm_levels[i].value == 0)
+ return -EINVAL;
+ ret = ci_populate_single_memory_level(rdev,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &pi->smc_state_table.MemoryLevel[i]);
+ if (ret)
+ return ret;
+ }
+
+ pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
+
+ pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+ pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ ret = ci_copy_bytes_to_smc(rdev, level_array_address,
+ (u8 *)levels, level_array_size,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ci_reset_single_dpm_table(struct radeon_device *rdev,
+ struct ci_single_dpm_table* dpm_table,
+ u32 count)
+{
+ u32 i;
+
+ dpm_table->count = count;
+ for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
+ dpm_table->dpm_levels[i].enabled = false;
+}
+
+static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
+ u32 index, u32 pcie_gen, u32 pcie_lanes)
+{
+ dpm_table->dpm_levels[index].value = pcie_gen;
+ dpm_table->dpm_levels[index].param1 = pcie_lanes;
+ dpm_table->dpm_levels[index].enabled = true;
+}
+
+static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
+ return -EINVAL;
+
+ if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
+ pi->pcie_gen_powersaving = pi->pcie_gen_performance;
+ pi->pcie_lane_powersaving = pi->pcie_lane_performance;
+ } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
+ pi->pcie_gen_performance = pi->pcie_gen_powersaving;
+ pi->pcie_lane_performance = pi->pcie_lane_powersaving;
+ }
+
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.pcie_speed_table,
+ SMU7_MAX_LEVELS_LINK);
+
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+ pi->pcie_gen_powersaving.min,
+ pi->pcie_lane_powersaving.min);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
+ pi->pcie_gen_performance.min,
+ pi->pcie_lane_performance.min);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
+ pi->pcie_gen_powersaving.min,
+ pi->pcie_lane_powersaving.max);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
+ pi->pcie_gen_performance.min,
+ pi->pcie_lane_performance.max);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
+ pi->pcie_gen_powersaving.max,
+ pi->pcie_lane_powersaving.max);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
+ pi->pcie_gen_performance.max,
+ pi->pcie_lane_performance.max);
+
+ pi->dpm_table.pcie_speed_table.count = 6;
+
+ return 0;
+}
+
+static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
+ struct radeon_cac_leakage_table *std_voltage_table =
+ &rdev->pm.dpm.dyn_state.cac_leakage_table;
+ u32 i;
+
+ if (allowed_sclk_vddc_table == NULL)
+ return -EINVAL;
+ if (allowed_sclk_vddc_table->count < 1)
+ return -EINVAL;
+ if (allowed_mclk_table == NULL)
+ return -EINVAL;
+ if (allowed_mclk_table->count < 1)
+ return -EINVAL;
+
+ memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
+
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.sclk_table,
+ SMU7_MAX_LEVELS_GRAPHICS);
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.mclk_table,
+ SMU7_MAX_LEVELS_MEMORY);
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.vddc_table,
+ SMU7_MAX_LEVELS_VDDC);
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.vddci_table,
+ SMU7_MAX_LEVELS_VDDCI);
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.mvdd_table,
+ SMU7_MAX_LEVELS_MVDD);
+
+ pi->dpm_table.sclk_table.count = 0;
+ for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
+ if ((i == 0) ||
+ (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
+ allowed_sclk_vddc_table->entries[i].clk)) {
+ pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
+ allowed_sclk_vddc_table->entries[i].clk;
+ pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
+ pi->dpm_table.sclk_table.count++;
+ }
+ }
+
+ pi->dpm_table.mclk_table.count = 0;
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ if ((i==0) ||
+ (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
+ allowed_mclk_table->entries[i].clk)) {
+ pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
+ allowed_mclk_table->entries[i].clk;
+ pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
+ pi->dpm_table.mclk_table.count++;
+ }
+ }
+
+ for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
+ pi->dpm_table.vddc_table.dpm_levels[i].value =
+ allowed_sclk_vddc_table->entries[i].v;
+ pi->dpm_table.vddc_table.dpm_levels[i].param1 =
+ std_voltage_table->entries[i].leakage;
+ pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
+ }
+ pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
+
+ allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
+ if (allowed_mclk_table) {
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ pi->dpm_table.vddci_table.dpm_levels[i].value =
+ allowed_mclk_table->entries[i].v;
+ pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
+ }
+ pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
+ }
+
+ allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
+ if (allowed_mclk_table) {
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ pi->dpm_table.mvdd_table.dpm_levels[i].value =
+ allowed_mclk_table->entries[i].v;
+ pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
+ }
+ pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
+ }
+
+ ci_setup_default_pcie_tables(rdev);
+
+ return 0;
+}
+
+static int ci_find_boot_level(struct ci_single_dpm_table *table,
+ u32 value, u32 *boot_level)
+{
+ u32 i;
+ int ret = -EINVAL;
+
+ for(i = 0; i < table->count; i++) {
+ if (value == table->dpm_levels[i].value) {
+ *boot_level = i;
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int ci_init_smc_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ulv_parm *ulv = &pi->ulv;
+ struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
+ SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
+ int ret;
+
+ ret = ci_setup_default_dpm_tables(rdev);
+ if (ret)
+ return ret;
+
+ if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
+ ci_populate_smc_voltage_tables(rdev, table);
+
+ ci_init_fps_limits(rdev);
+
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (pi->mem_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (ulv->supported) {
+ ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
+ if (ret)
+ return ret;
+ WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+ }
+
+ ret = ci_populate_all_graphic_levels(rdev);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_all_memory_levels(rdev);
+ if (ret)
+ return ret;
+
+ ci_populate_smc_link_level(rdev, table);
+
+ ret = ci_populate_smc_acpi_level(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_vce_level(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_acp_level(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_samu_level(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_do_program_memory_timing_parameters(rdev);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_uvd_level(rdev, table);
+ if (ret)
+ return ret;
+
+ table->UvdBootLevel = 0;
+ table->VceBootLevel = 0;
+ table->AcpBootLevel = 0;
+ table->SamuBootLevel = 0;
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
+ pi->vbios_boot_state.sclk_bootup_value,
+ (u32 *)&pi->smc_state_table.GraphicsBootLevel);
+
+ ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
+ pi->vbios_boot_state.mclk_bootup_value,
+ (u32 *)&pi->smc_state_table.MemoryBootLevel);
+
+ table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
+ table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
+ table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
+
+ ci_populate_smc_initial_state(rdev, radeon_boot_state);
+
+ ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
+ if (ret)
+ return ret;
+
+ table->UVDInterval = 1;
+ table->VCEInterval = 1;
+ table->ACPInterval = 1;
+ table->SAMUInterval = 1;
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
+ CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
+ table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
+ CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->VddcVddciDelta = 4000;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+ if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
+ table->SVI2Enable = 1;
+ else
+ table->SVI2Enable = 0;
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ table->SystemFlags = cpu_to_be32(table->SystemFlags);
+ table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
+ table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
+ table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
+ table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
+ table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
+ table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
+ table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
+ table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
+ table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
+ table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
+ table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
+ table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
+ table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
+
+ ret = ci_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, SystemFlags),
+ (u8 *)&table->SystemFlags,
+ sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ci_trim_single_dpm_states(struct radeon_device *rdev,
+ struct ci_single_dpm_table *dpm_table,
+ u32 low_limit, u32 high_limit)
+{
+ u32 i;
+
+ for (i = 0; i < dpm_table->count; i++) {
+ if ((dpm_table->dpm_levels[i].value < low_limit) ||
+ (dpm_table->dpm_levels[i].value > high_limit))
+ dpm_table->dpm_levels[i].enabled = false;
+ else
+ dpm_table->dpm_levels[i].enabled = true;
+ }
+}
+
+static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
+ u32 speed_low, u32 lanes_low,
+ u32 speed_high, u32 lanes_high)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
+ u32 i, j;
+
+ for (i = 0; i < pcie_table->count; i++) {
+ if ((pcie_table->dpm_levels[i].value < speed_low) ||
+ (pcie_table->dpm_levels[i].param1 < lanes_low) ||
+ (pcie_table->dpm_levels[i].value > speed_high) ||
+ (pcie_table->dpm_levels[i].param1 > lanes_high))
+ pcie_table->dpm_levels[i].enabled = false;
+ else
+ pcie_table->dpm_levels[i].enabled = true;
+ }
+
+ for (i = 0; i < pcie_table->count; i++) {
+ if (pcie_table->dpm_levels[i].enabled) {
+ for (j = i + 1; j < pcie_table->count; j++) {
+ if (pcie_table->dpm_levels[j].enabled) {
+ if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
+ (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
+ pcie_table->dpm_levels[j].enabled = false;
+ }
+ }
+ }
+ }
+}
+
+static int ci_trim_dpm_states(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_ps *state = ci_get_ps(radeon_state);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 high_limit_count;
+
+ if (state->performance_level_count < 1)
+ return -EINVAL;
+
+ if (state->performance_level_count == 1)
+ high_limit_count = 0;
+ else
+ high_limit_count = 1;
+
+ ci_trim_single_dpm_states(rdev,
+ &pi->dpm_table.sclk_table,
+ state->performance_levels[0].sclk,
+ state->performance_levels[high_limit_count].sclk);
+
+ ci_trim_single_dpm_states(rdev,
+ &pi->dpm_table.mclk_table,
+ state->performance_levels[0].mclk,
+ state->performance_levels[high_limit_count].mclk);
+
+ ci_trim_pcie_dpm_states(rdev,
+ state->performance_levels[0].pcie_gen,
+ state->performance_levels[0].pcie_lane,
+ state->performance_levels[high_limit_count].pcie_gen,
+ state->performance_levels[high_limit_count].pcie_lane);
+
+ return 0;
+}
+
+static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
+{
+ struct radeon_clock_voltage_dependency_table *disp_voltage_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
+ struct radeon_clock_voltage_dependency_table *vddc_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 requested_voltage = 0;
+ u32 i;
+
+ if (disp_voltage_table == NULL)
+ return -EINVAL;
+ if (!disp_voltage_table->count)
+ return -EINVAL;
+
+ for (i = 0; i < disp_voltage_table->count; i++) {
+ if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
+ requested_voltage = disp_voltage_table->entries[i].v;
+ }
+
+ for (i = 0; i < vddc_table->count; i++) {
+ if (requested_voltage <= vddc_table->entries[i].v) {
+ requested_voltage = vddc_table->entries[i].v;
+ return (ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_VddC_Request,
+ requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result result;
+
+ if (!pi->sclk_dpm_key_disabled) {
+ if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ result = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ if (result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ if (!pi->mclk_dpm_key_disabled) {
+ if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ result = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ if (result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ if (!pi->pcie_dpm_key_disabled) {
+ if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ result = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_PCIeDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
+ if (result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ ci_apply_disp_minimum_voltage_request(rdev);
+
+ return 0;
+}
+
+static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *state = ci_get_ps(radeon_state);
+ struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
+ u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
+ struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
+ u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
+ u32 i;
+
+ pi->need_update_smu7_dpm_table = 0;
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (sclk == sclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= sclk_table->count) {
+ pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ } else {
+ /* XXX check display min clock requirements */
+ if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
+ pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
+ }
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (mclk == mclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= mclk_table->count)
+ pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+
+ if (rdev->pm.dpm.current_active_crtc_count !=
+ rdev->pm.dpm.new_active_crtc_count)
+ pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
+}
+
+static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *state = ci_get_ps(radeon_state);
+ u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
+ u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ int ret;
+
+ if (!pi->need_update_smu7_dpm_table)
+ return 0;
+
+ if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
+ dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
+
+ if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
+ dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
+
+ if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
+ ret = ci_populate_all_graphic_levels(rdev);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
+ ret = ci_populate_all_memory_levels(rdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct radeon_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
+
+ for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_uvd_dpm)
+ break;
+ }
+ }
+
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
+
+ if (pi->last_mclk_dpm_enable_mask & 0x1) {
+ pi->uvd_enabled = true;
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ }
+ } else {
+ if (pi->last_mclk_dpm_enable_mask & 0x1) {
+ pi->uvd_enabled = false;
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ }
+ }
+
+ return (ci_send_msg_to_smc(rdev, enable ?
+ PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+#if 0
+static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct radeon_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
+ for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_vce_dpm)
+ break;
+ }
+ }
+
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.vce_dpm_enable_mask);
+ }
+
+ return (ci_send_msg_to_smc(rdev, enable ?
+ PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct radeon_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
+ for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_samu_dpm)
+ break;
+ }
+ }
+
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.samu_dpm_enable_mask);
+ }
+ return (ci_send_msg_to_smc(rdev, enable ?
+ PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct radeon_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
+ for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_acp_dpm)
+ break;
+ }
+ }
+
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_ACPDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.acp_dpm_enable_mask);
+ }
+
+ return (ci_send_msg_to_smc(rdev, enable ?
+ PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+#endif
+
+static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (!gate) {
+ if (pi->caps_uvd_dpm ||
+ (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
+ pi->smc_state_table.UvdBootLevel = 0;
+ else
+ pi->smc_state_table.UvdBootLevel =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
+
+ tmp = RREG32_SMC(DPM_TABLE_475);
+ tmp &= ~UvdBootLevel_MASK;
+ tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
+ WREG32_SMC(DPM_TABLE_475, tmp);
+ }
+
+ return ci_enable_uvd_dpm(rdev, !gate);
+}
+
+#if 0
+static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
+{
+ u8 i;
+ u32 min_evclk = 30000; /* ??? */
+ struct radeon_vce_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].evclk >= min_evclk)
+ return i;
+ }
+
+ return table->count - 1;
+}
+
+static int ci_update_vce_dpm(struct radeon_device *rdev,
+ struct radeon_ps *radeon_new_state,
+ struct radeon_ps *radeon_current_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0);
+ bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0);
+ int ret = 0;
+ u32 tmp;
+
+ if (new_vce_clock_non_zero != old_vce_clock_non_zero) {
+ if (new_vce_clock_non_zero) {
+ pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
+
+ tmp = RREG32_SMC(DPM_TABLE_475);
+ tmp &= ~VceBootLevel_MASK;
+ tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
+ WREG32_SMC(DPM_TABLE_475, tmp);
+
+ ret = ci_enable_vce_dpm(rdev, true);
+ } else {
+ ret = ci_enable_vce_dpm(rdev, false);
+ }
+ }
+ return ret;
+}
+
+static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
+{
+ return ci_enable_samu_dpm(rdev, gate);
+}
+
+static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (!gate) {
+ pi->smc_state_table.AcpBootLevel = 0;
+
+ tmp = RREG32_SMC(DPM_TABLE_475);
+ tmp &= ~AcpBootLevel_MASK;
+ tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
+ WREG32_SMC(DPM_TABLE_475, tmp);
+ }
+
+ return ci_enable_acp_dpm(rdev, !gate);
+}
+#endif
+
+static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ ret = ci_trim_dpm_states(rdev, radeon_state);
+ if (ret)
+ return ret;
+
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
+ pi->last_mclk_dpm_enable_mask =
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ if (pi->uvd_enabled) {
+ if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
+ }
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
+
+ return 0;
+}
+
+static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
+ u32 level_mask)
+{
+ u32 level = 0;
+
+ while ((level_mask & (1 << level)) == 0)
+ level++;
+
+ return level;
+}
+
+
+int ci_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ u32 tmp, levels, i;
+ int ret;
+
+ if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+ if ((!pi->sclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ levels = 0;
+ tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ levels++;
+ if (levels) {
+ ret = ci_dpm_force_state_sclk(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ if ((!pi->mclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ levels = 0;
+ tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ levels++;
+ if (levels) {
+ ret = ci_dpm_force_state_mclk(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ if ((!pi->pcie_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ levels = 0;
+ tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ while (tmp >>= 1)
+ levels++;
+ if (levels) {
+ ret = ci_dpm_force_state_pcie(rdev, level);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+ CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+ if ((!pi->sclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ levels = ci_get_lowest_enabled_level(rdev,
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ ret = ci_dpm_force_state_sclk(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ if ((!pi->mclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ levels = ci_get_lowest_enabled_level(rdev,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ ret = ci_dpm_force_state_mclk(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ if ((!pi->pcie_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ levels = ci_get_lowest_enabled_level(rdev,
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
+ ret = ci_dpm_force_state_pcie(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+ CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
+ if (!pi->sclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ if (!pi->mclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ if (!pi->pcie_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ rdev->pm.dpm.forced_level = level;
+
+ return 0;
+}
+
+static int ci_set_mc_special_registers(struct radeon_device *rdev,
+ struct ci_mc_reg_table *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u8 i, j, k;
+ u32 temp_reg;
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ switch(table->mc_reg_address[i].s1 << 2) {
+ case MC_SEQ_MISC1:
+ temp_reg = RREG32(MC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ temp_reg = RREG32(MC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ if (!pi->mem_gddr5)
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+ if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ if (!pi->mem_gddr5) {
+ table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
+ table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ }
+ j++;
+ if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ }
+ break;
+ case MC_SEQ_RESERVE_M:
+ temp_reg = RREG32(MC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+ bool result = true;
+
+ switch(in_reg) {
+ case MC_SEQ_RAS_TIMING >> 2:
+ *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
+ break;
+ case MC_SEQ_DLL_STBY >> 2:
+ *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
+ break;
+ case MC_SEQ_G5PDX_CMD0 >> 2:
+ *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
+ break;
+ case MC_SEQ_G5PDX_CMD1 >> 2:
+ *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
+ break;
+ case MC_SEQ_G5PDX_CTRL >> 2:
+ *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
+ break;
+ case MC_SEQ_CAS_TIMING >> 2:
+ *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
+ break;
+ case MC_SEQ_MISC_TIMING >> 2:
+ *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
+ break;
+ case MC_SEQ_MISC_TIMING2 >> 2:
+ *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
+ break;
+ case MC_SEQ_PMG_DVS_CMD >> 2:
+ *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
+ break;
+ case MC_SEQ_PMG_DVS_CTL >> 2:
+ *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
+ break;
+ case MC_SEQ_RD_CTL_D0 >> 2:
+ *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
+ break;
+ case MC_SEQ_RD_CTL_D1 >> 2:
+ *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
+ break;
+ case MC_SEQ_WR_CTL_D0 >> 2:
+ *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
+ break;
+ case MC_SEQ_WR_CTL_D1 >> 2:
+ *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
+ break;
+ case MC_PMG_CMD_EMRS >> 2:
+ *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+ break;
+ case MC_PMG_CMD_MRS >> 2:
+ *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+ break;
+ case MC_PMG_CMD_MRS1 >> 2:
+ *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+ break;
+ case MC_SEQ_PMG_TIMING >> 2:
+ *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
+ break;
+ case MC_PMG_CMD_MRS2 >> 2:
+ *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
+ break;
+ case MC_SEQ_WR_CTL_2 >> 2:
+ *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static void ci_set_valid_flag(struct ci_mc_reg_table *table)
+{
+ u8 i, j;
+
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->valid_flag |= 1 << i;
+ break;
+ }
+ }
+ }
+}
+
+static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
+{
+ u32 i;
+ u16 address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+ address : table->mc_reg_address[i].s1;
+ }
+}
+
+static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
+ struct ci_mc_reg_table *ci_table)
+{
+ u8 i, j;
+
+ if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+ return -EINVAL;
+
+ for (i = 0; i < table->last; i++)
+ ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+ ci_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ci_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++)
+ ci_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ ci_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct atom_mc_reg_table *table;
+ struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
+ u8 module_index = rv770_get_memory_module_index(rdev);
+ int ret;
+
+ table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
+ WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
+ WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
+ WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
+ WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
+ WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
+ WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
+ WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
+ WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
+ WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
+ WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
+ WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
+ WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
+ WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
+ WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
+ WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
+ WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
+ WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
+ WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
+ WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
+
+ ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
+ if (ret)
+ goto init_mc_done;
+
+ ret = ci_copy_vbios_mc_reg_table(table, ci_table);
+ if (ret)
+ goto init_mc_done;
+
+ ci_set_s0_mc_reg_index(ci_table);
+
+ ret = ci_set_mc_special_registers(rdev, ci_table);
+ if (ret)
+ goto init_mc_done;
+
+ ci_set_valid_flag(ci_table);
+
+init_mc_done:
+ kfree(table);
+
+ return ret;
+}
+
+static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
+ SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 i, j;
+
+ for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
+ if (pi->mc_reg_table.valid_flag & (1 << j)) {
+ if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (u8)i;
+
+ return 0;
+}
+
+static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
+ SMU7_Discrete_MCRegisterSet *data,
+ u32 num_entries, u32 valid_flag)
+{
+ u32 i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & (1 << j)) {
+ data->value[i] = cpu_to_be32(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
+ const u32 memory_clock,
+ SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 i = 0;
+
+ for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
+ if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+ break;
+ }
+
+ if ((i == pi->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, pi->mc_reg_table.last,
+ pi->mc_reg_table.valid_flag);
+}
+
+static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
+ SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 i;
+
+ for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
+ ci_convert_mc_reg_table_entry_to_smc(rdev,
+ pi->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_reg_table->data[i]);
+}
+
+static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+ ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
+ if (ret)
+ return ret;
+ ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
+
+ return ci_copy_bytes_to_smc(rdev,
+ pi->mc_reg_table_start,
+ (u8 *)&pi->smc_mc_reg_table,
+ sizeof(SMU7_Discrete_MCRegisters),
+ pi->sram_end);
+}
+
+static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+ memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+ ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
+
+ return ci_copy_bytes_to_smc(rdev,
+ pi->mc_reg_table_start +
+ offsetof(SMU7_Discrete_MCRegisters, data[0]),
+ (u8 *)&pi->smc_mc_reg_table.data[0],
+ sizeof(SMU7_Discrete_MCRegisterSet) *
+ pi->dpm_table.mclk_table.count,
+ pi->sram_end);
+}
+
+static void ci_enable_voltage_control(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+ tmp |= VOLT_PWRMGT_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+}
+
+static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_ps *state = ci_get_ps(radeon_state);
+ int i;
+ u16 pcie_speed, max_speed = 0;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+ pcie_speed = state->performance_levels[i].pcie_gen;
+ if (max_speed < pcie_speed)
+ max_speed = pcie_speed;
+ }
+
+ return max_speed;
+}
+
+static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
+{
+ u32 speed_cntl = 0;
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
+ speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+
+ return (u16)speed_cntl;
+}
+
+static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
+{
+ u32 link_width = 0;
+
+ link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
+ link_width >>= LC_LINK_WIDTH_RD_SHIFT;
+
+ switch (link_width) {
+ case RADEON_PCIE_LC_LINK_WIDTH_X1:
+ return 1;
+ case RADEON_PCIE_LC_LINK_WIDTH_X2:
+ return 2;
+ case RADEON_PCIE_LC_LINK_WIDTH_X4:
+ return 4;
+ case RADEON_PCIE_LC_LINK_WIDTH_X8:
+ return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X12:
+ /* not actually supported */
+ return 12;
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
+ case RADEON_PCIE_LC_LINK_WIDTH_X16:
+ default:
+ return 16;
+ }
+}
+
+static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
+ struct radeon_ps *radeon_new_state,
+ struct radeon_ps *radeon_current_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ enum radeon_pcie_gen target_link_speed =
+ ci_get_maximum_link_speed(rdev, radeon_new_state);
+ enum radeon_pcie_gen current_link_speed;
+
+ if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
+ current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
+ else
+ current_link_speed = pi->force_pcie_gen;
+
+ pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
+ pi->pspp_notify_required = false;
+ if (target_link_speed > current_link_speed) {
+ switch (target_link_speed) {
+ case RADEON_PCIE_GEN3:
+ if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
+ break;
+ pi->force_pcie_gen = RADEON_PCIE_GEN2;
+ if (current_link_speed == RADEON_PCIE_GEN2)
+ break;
+ case RADEON_PCIE_GEN2:
+ if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
+ break;
+ default:
+ pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
+ break;
+ }
+ } else {
+ if (target_link_speed < current_link_speed)
+ pi->pspp_notify_required = true;
+ }
+}
+
+static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
+ struct radeon_ps *radeon_new_state,
+ struct radeon_ps *radeon_current_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ enum radeon_pcie_gen target_link_speed =
+ ci_get_maximum_link_speed(rdev, radeon_new_state);
+ u8 request;
+
+ if (pi->pspp_notify_required) {
+ if (target_link_speed == RADEON_PCIE_GEN3)
+ request = PCIE_PERF_REQ_PECI_GEN3;
+ else if (target_link_speed == RADEON_PCIE_GEN2)
+ request = PCIE_PERF_REQ_PECI_GEN2;
+ else
+ request = PCIE_PERF_REQ_PECI_GEN1;
+
+ if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
+ (ci_get_current_pcie_speed(rdev) > 0))
+ return;
+
+ radeon_acpi_pcie_performance_request(rdev, request, false);
+ }
+}
+
+static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
+ struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
+
+ if (allowed_sclk_vddc_table == NULL)
+ return -EINVAL;
+ if (allowed_sclk_vddc_table->count < 1)
+ return -EINVAL;
+ if (allowed_mclk_vddc_table == NULL)
+ return -EINVAL;
+ if (allowed_mclk_vddc_table->count < 1)
+ return -EINVAL;
+ if (allowed_mclk_vddci_table == NULL)
+ return -EINVAL;
+ if (allowed_mclk_vddci_table->count < 1)
+ return -EINVAL;
+
+ pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
+ pi->max_vddc_in_pp_table =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+ pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
+ pi->max_vddci_in_pp_table =
+ allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
+ allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
+ allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+
+ return 0;
+}
+
+static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
+ u32 leakage_index;
+
+ for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+ if (leakage_table->leakage_id[leakage_index] == *vddc) {
+ *vddc = leakage_table->actual_voltage[leakage_index];
+ break;
+ }
+ }
+}
+
+static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
+ u32 leakage_index;
+
+ for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+ if (leakage_table->leakage_id[leakage_index] == *vddci) {
+ *vddci = leakage_table->actual_voltage[leakage_index];
+ break;
+ }
+ }
+}
+
+static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_vce_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_uvd_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_phase_shedding_limits_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
+ }
+}
+
+static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_clock_and_voltage_limits *table)
+{
+ if (table) {
+ ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
+ ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
+ }
+}
+
+static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_cac_leakage_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
+ }
+}
+
+static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
+{
+
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
+ ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
+ ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
+ ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
+ ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
+ ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
+ ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
+ ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.cac_leakage_table);
+
+}
+
+static void ci_get_memory_type(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ tmp = RREG32(MC_SEQ_MISC0);
+
+ if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
+ MC_SEQ_MISC0_GDDR5_VALUE)
+ pi->mem_gddr5 = true;
+ else
+ pi->mem_gddr5 = false;
+
+}
+
+void ci_update_current_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct ci_ps *new_ps = ci_get_ps(rps);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ pi->current_rps = *rps;
+ pi->current_ps = *new_ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+void ci_update_requested_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct ci_ps *new_ps = ci_get_ps(rps);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ pi->requested_rps = *rps;
+ pi->requested_ps = *new_ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
+int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+ struct radeon_ps *new_ps = &requested_ps;
+
+ ci_update_requested_ps(rdev, new_ps);
+
+ ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
+
+ return 0;
+}
+
+void ci_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+
+ ci_update_current_ps(rdev, new_ps);
+}
+
+
+void ci_dpm_setup_asic(struct radeon_device *rdev)
+{
+ ci_read_clock_registers(rdev);
+ ci_get_memory_type(rdev);
+ ci_enable_acpi_power_management(rdev);
+ ci_init_sclk_t(rdev);
+}
+
+int ci_dpm_enable(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+ int ret;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
+ if (ci_is_smc_running(rdev))
+ return -EINVAL;
+ if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+ ci_enable_voltage_control(rdev);
+ ret = ci_construct_voltage_tables(rdev);
+ if (ret) {
+ DRM_ERROR("ci_construct_voltage_tables failed\n");
+ return ret;
+ }
+ }
+ if (pi->caps_dynamic_ac_timing) {
+ ret = ci_initialize_mc_reg_table(rdev);
+ if (ret)
+ pi->caps_dynamic_ac_timing = false;
+ }
+ if (pi->dynamic_ss)
+ ci_enable_spread_spectrum(rdev, true);
+ if (pi->thermal_protection)
+ ci_enable_thermal_protection(rdev, true);
+ ci_program_sstp(rdev);
+ ci_enable_display_gap(rdev);
+ ci_program_vc(rdev);
+ ret = ci_upload_firmware(rdev);
+ if (ret) {
+ DRM_ERROR("ci_upload_firmware failed\n");
+ return ret;
+ }
+ ret = ci_process_firmware_header(rdev);
+ if (ret) {
+ DRM_ERROR("ci_process_firmware_header failed\n");
+ return ret;
+ }
+ ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
+ if (ret) {
+ DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
+ return ret;
+ }
+ ret = ci_init_smc_table(rdev);
+ if (ret) {
+ DRM_ERROR("ci_init_smc_table failed\n");
+ return ret;
+ }
+ ret = ci_init_arb_table_index(rdev);
+ if (ret) {
+ DRM_ERROR("ci_init_arb_table_index failed\n");
+ return ret;
+ }
+ if (pi->caps_dynamic_ac_timing) {
+ ret = ci_populate_initial_mc_reg_table(rdev);
+ if (ret) {
+ DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
+ return ret;
+ }
+ }
+ ret = ci_populate_pm_base(rdev);
+ if (ret) {
+ DRM_ERROR("ci_populate_pm_base failed\n");
+ return ret;
+ }
+ ci_dpm_start_smc(rdev);
+ ci_enable_vr_hot_gpio_interrupt(rdev);
+ ret = ci_notify_smc_display_change(rdev, false);
+ if (ret) {
+ DRM_ERROR("ci_notify_smc_display_change failed\n");
+ return ret;
+ }
+ ci_enable_sclk_control(rdev, true);
+ ret = ci_enable_ulv(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_ulv failed\n");
+ return ret;
+ }
+ ret = ci_enable_ds_master_switch(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_ds_master_switch failed\n");
+ return ret;
+ }
+ ret = ci_start_dpm(rdev);
+ if (ret) {
+ DRM_ERROR("ci_start_dpm failed\n");
+ return ret;
+ }
+ ret = ci_enable_didt(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_didt failed\n");
+ return ret;
+ }
+ ret = ci_enable_smc_cac(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_smc_cac failed\n");
+ return ret;
+ }
+ ret = ci_enable_power_containment(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_power_containment failed\n");
+ return ret;
+ }
+ if (rdev->irq.installed &&
+ r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+#if 0
+ PPSMC_Result result;
+#endif
+ ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret) {
+ DRM_ERROR("ci_set_thermal_temperature_range failed\n");
+ return ret;
+ }
+ rdev->irq.dpm_thermal = true;
+ radeon_irq_set(rdev);
+#if 0
+ result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+
+ if (result != PPSMC_Result_OK)
+ DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+#endif
+ }
+
+ ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+ ci_dpm_powergate_uvd(rdev, true);
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
+
+ ci_update_current_ps(rdev, boot_ps);
+
+ return 0;
+}
+
+void ci_dpm_disable(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
+ ci_dpm_powergate_uvd(rdev, false);
+
+ if (!ci_is_smc_running(rdev))
+ return;
+
+ if (pi->thermal_protection)
+ ci_enable_thermal_protection(rdev, false);
+ ci_enable_power_containment(rdev, false);
+ ci_enable_smc_cac(rdev, false);
+ ci_enable_didt(rdev, false);
+ ci_enable_spread_spectrum(rdev, false);
+ ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+ ci_stop_dpm(rdev);
+ ci_enable_ds_master_switch(rdev, true);
+ ci_enable_ulv(rdev, false);
+ ci_clear_vc(rdev);
+ ci_reset_to_default(rdev);
+ ci_dpm_stop_smc(rdev);
+ ci_force_switch_to_arb_f0(rdev);
+
+ ci_update_current_ps(rdev, boot_ps);
+}
+
+int ci_dpm_set_power_state(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+ struct radeon_ps *old_ps = &pi->current_rps;
+ int ret;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
+ ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
+ if (pi->pcie_performance_request)
+ ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
+ ret = ci_freeze_sclk_mclk_dpm(rdev);
+ if (ret) {
+ DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
+ return ret;
+ }
+ ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
+ if (ret) {
+ DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
+ return ret;
+ }
+ ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
+ if (ret) {
+ DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
+ return ret;
+ }
+#if 0
+ ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
+ if (ret) {
+ DRM_ERROR("ci_update_vce_dpm failed\n");
+ return ret;
+ }
+#endif
+ ret = ci_update_sclk_t(rdev);
+ if (ret) {
+ DRM_ERROR("ci_update_sclk_t failed\n");
+ return ret;
+ }
+ if (pi->caps_dynamic_ac_timing) {
+ ret = ci_update_and_upload_mc_reg_table(rdev);
+ if (ret) {
+ DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
+ return ret;
+ }
+ }
+ ret = ci_program_memory_timing_parameters(rdev);
+ if (ret) {
+ DRM_ERROR("ci_program_memory_timing_parameters failed\n");
+ return ret;
+ }
+ ret = ci_unfreeze_sclk_mclk_dpm(rdev);
+ if (ret) {
+ DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
+ return ret;
+ }
+ ret = ci_upload_dpm_level_enable_mask(rdev);
+ if (ret) {
+ DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
+ return ret;
+ }
+ if (pi->pcie_performance_request)
+ ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
+
+ ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
+ if (ret) {
+ DRM_ERROR("ci_dpm_force_performance_level failed\n");
+ return ret;
+ }
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
+
+ return 0;
+}
+
+int ci_dpm_power_control_set_level(struct radeon_device *rdev)
+{
+ return ci_power_control_set_level(rdev);
+}
+
+void ci_dpm_reset_asic(struct radeon_device *rdev)
+{
+ ci_set_boot_state(rdev);
+}
+
+void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+ ci_program_display_gap(rdev);
+}
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+ struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+ struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
+};
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
+ struct radeon_ps *rps,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+ u8 table_rev)
+{
+ rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ rps->class = le16_to_cpu(non_clock_info->usClassification);
+ rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+ if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+ rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+ rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+ } else {
+ rps->vclk = 0;
+ rps->dclk = 0;
+ }
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+ rdev->pm.dpm.boot_ps = rps;
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
+ struct radeon_ps *rps, int index,
+ union pplib_clock_info *clock_info)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *ps = ci_get_ps(rps);
+ struct ci_pl *pl = &ps->performance_levels[index];
+
+ ps->performance_level_count = index + 1;
+
+ pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
+ pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
+ pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
+ pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
+
+ pl->pcie_gen = r600_get_pcie_gen_support(rdev,
+ pi->sys_pcie_mask,
+ pi->vbios_boot_state.pcie_gen_bootup_value,
+ clock_info->ci.ucPCIEGen);
+ pl->pcie_lane = r600_get_pcie_lane_support(rdev,
+ pi->vbios_boot_state.pcie_lane_bootup_value,
+ le16_to_cpu(clock_info->ci.usPCIELane));
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+ pi->acpi_pcie_gen = pl->pcie_gen;
+ }
+
+ if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
+ pi->ulv.supported = true;
+ pi->ulv.pl = *pl;
+ pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
+ }
+
+ /* patch up boot state */
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
+ pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
+ pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
+ pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
+ }
+
+ switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+ pi->use_pcie_powersaving_levels = true;
+ if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
+ pi->pcie_gen_powersaving.max = pl->pcie_gen;
+ if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
+ pi->pcie_gen_powersaving.min = pl->pcie_gen;
+ if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
+ pi->pcie_lane_powersaving.max = pl->pcie_lane;
+ if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
+ pi->pcie_lane_powersaving.min = pl->pcie_lane;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+ pi->use_pcie_performance_levels = true;
+ if (pi->pcie_gen_performance.max < pl->pcie_gen)
+ pi->pcie_gen_performance.max = pl->pcie_gen;
+ if (pi->pcie_gen_performance.min > pl->pcie_gen)
+ pi->pcie_gen_performance.min = pl->pcie_gen;
+ if (pi->pcie_lane_performance.max < pl->pcie_lane)
+ pi->pcie_lane_performance.max = pl->pcie_lane;
+ if (pi->pcie_lane_performance.min > pl->pcie_lane)
+ pi->pcie_lane_performance.min = pl->pcie_lane;
+ break;
+ default:
+ break;
+ }
+}
+
+static int ci_parse_power_table(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, k, non_clock_array_index, clock_array_index;
+ union pplib_clock_info *clock_info;
+ struct _StateArray *state_array;
+ struct _ClockInfoArray *clock_info_array;
+ struct _NonClockInfoArray *non_clock_info_array;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u8 *power_state_offset;
+ struct ci_ps *ps;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ state_array = (struct _StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset));
+ clock_info_array = (struct _ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+ non_clock_info_array = (struct _NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+ rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!rdev->pm.dpm.ps)
+ return -ENOMEM;
+ power_state_offset = (u8 *)state_array->states;
+ rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
+ rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
+ rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ if (!rdev->pm.power_state[i].clock_info)
+ return -EINVAL;
+ ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(rdev->pm.dpm.ps);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.ps[i].ps_priv = ps;
+ ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+ non_clock_info,
+ non_clock_info_array->ucEntrySize);
+ k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = idx[j];
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
+ break;
+ clock_info = (union pplib_clock_info *)
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
+ ci_parse_pplib_clock_info(rdev,
+ &rdev->pm.dpm.ps[i], k,
+ clock_info);
+ k++;
+ }
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+ return 0;
+}
+
+int ci_get_vbios_boot_values(struct radeon_device *rdev,
+ struct ci_vbios_boot_state *boot_state)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
+ u8 frev, crev;
+ u16 data_offset;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ firmware_info =
+ (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
+ data_offset);
+ boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
+ boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
+ boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
+ boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
+ boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
+ boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
+ boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+void ci_dpm_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+ kfree(rdev->pm.dpm.ps[i].ps_priv);
+ }
+ kfree(rdev->pm.dpm.ps);
+ kfree(rdev->pm.dpm.priv);
+ kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+ r600_free_extended_power_table(rdev);
+}
+
+int ci_dpm_init(struct radeon_device *rdev)
+{
+ int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+ u16 data_offset, size;
+ u8 frev, crev;
+ struct ci_power_info *pi;
+ int ret;
+ u32 mask;
+
+ pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
+ if (pi == NULL)
+ return -ENOMEM;
+ rdev->pm.dpm.priv = pi;
+
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret)
+ pi->sys_pcie_mask = 0;
+ else
+ pi->sys_pcie_mask = mask;
+ pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
+
+ pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
+ pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
+ pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
+ pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
+
+ pi->pcie_lane_performance.max = 0;
+ pi->pcie_lane_performance.min = 16;
+ pi->pcie_lane_powersaving.max = 0;
+ pi->pcie_lane_powersaving.min = 16;
+
+ ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
+ if (ret) {
+ ci_dpm_fini(rdev);
+ return ret;
+ }
+ ret = ci_parse_power_table(rdev);
+ if (ret) {
+ ci_dpm_fini(rdev);
+ return ret;
+ }
+ ret = r600_parse_extended_power_table(rdev);
+ if (ret) {
+ ci_dpm_fini(rdev);
+ return ret;
+ }
+
+ pi->dll_default_on = false;
+ pi->sram_end = SMC_RAM_END;
+
+ pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
+
+ pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
+
+ pi->sclk_dpm_key_disabled = 0;
+ pi->mclk_dpm_key_disabled = 0;
+ pi->pcie_dpm_key_disabled = 0;
+
+ pi->caps_sclk_ds = true;
+
+ pi->mclk_strobe_mode_threshold = 40000;
+ pi->mclk_stutter_mode_threshold = 40000;
+ pi->mclk_edc_enable_threshold = 40000;
+ pi->mclk_edc_wr_enable_threshold = 40000;
+
+ ci_initialize_powertune_defaults(rdev);
+
+ pi->caps_fps = false;
+
+ pi->caps_sclk_throttle_low_notification = false;
+
+ pi->caps_uvd_dpm = true;
+
+ ci_get_leakage_voltages(rdev);
+ ci_patch_dependency_tables_with_leakage(rdev);
+ ci_set_private_data_variables_based_on_pptable(rdev);
+
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+ kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+ ci_dpm_fini(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+ rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+ rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+ rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+
+ rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
+ rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
+ rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+ rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+ pi->thermal_temp_setting.temperature_low = 99500;
+ pi->thermal_temp_setting.temperature_high = 100000;
+ pi->thermal_temp_setting.temperature_shutdown = 104000;
+
+ pi->uvd_enabled = false;
+
+ pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+ pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+ pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+ if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
+ pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+ else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
+ pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
+ if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
+ pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+ else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
+ pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+ else
+ rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
+ }
+
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
+ if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
+ pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+ else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
+ pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+ else
+ rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
+ }
+
+ pi->vddc_phase_shed_control = true;
+
+#if defined(CONFIG_ACPI)
+ pi->pcie_performance_request =
+ radeon_acpi_is_pcie_performance_request_supported(rdev);
+#else
+ pi->pcie_performance_request = false;
+#endif
+
+ if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ pi->caps_sclk_ss_support = true;
+ pi->caps_mclk_ss_support = true;
+ pi->dynamic_ss = true;
+ } else {
+ pi->caps_sclk_ss_support = false;
+ pi->caps_mclk_ss_support = false;
+ pi->dynamic_ss = true;
+ }
+
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+ pi->thermal_protection = true;
+ else
+ pi->thermal_protection = false;
+
+ pi->caps_dynamic_ac_timing = true;
+
+ pi->uvd_power_gated = false;
+
+ /* make sure dc limits are valid */
+ if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+ return 0;
+}
+
+void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m)
+{
+ u32 sclk = ci_get_average_sclk_freq(rdev);
+ u32 mclk = ci_get_average_mclk_freq(rdev);
+
+ seq_printf(m, "power level avg sclk: %u mclk: %u\n",
+ sclk, mclk);
+}
+
+void ci_dpm_print_power_state(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct ci_ps *ps = ci_get_ps(rps);
+ struct ci_pl *pl;
+ int i;
+
+ r600_dpm_print_class_info(rps->class, rps->class2);
+ r600_dpm_print_cap_info(rps->caps);
+ printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ for (i = 0; i < ps->performance_level_count; i++) {
+ pl = &ps->performance_levels[i];
+ printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
+ i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
+ }
+ r600_dpm_print_ps_status(rdev, rps);
+}
+
+u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
+
+ if (low)
+ return requested_state->performance_levels[0].sclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+}
+
+u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
+
+ if (low)
+ return requested_state->performance_levels[0].mclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
new file mode 100644
index 000000000000..93bbed977ffb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CI_DPM_H__
+#define __CI_DPM_H__
+
+#include "ppsmc.h"
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 6
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+#include "smu7_discrete.h"
+
+#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
+
+struct ci_pl {
+ u32 mclk;
+ u32 sclk;
+ enum radeon_pcie_gen pcie_gen;
+ u16 pcie_lane;
+};
+
+struct ci_ps {
+ u16 performance_level_count;
+ bool dc_compatible;
+ u32 sclk_t;
+ struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS];
+};
+
+struct ci_dpm_level {
+ bool enabled;
+ u32 value;
+ u32 param1;
+};
+
+#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define MAX_REGULAR_DPM_NUMBER 8
+#define CISLAND_MINIMUM_ENGINE_CLOCK 800
+
+struct ci_single_dpm_table {
+ u32 count;
+ struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct ci_dpm_table {
+ struct ci_single_dpm_table sclk_table;
+ struct ci_single_dpm_table mclk_table;
+ struct ci_single_dpm_table pcie_speed_table;
+ struct ci_single_dpm_table vddc_table;
+ struct ci_single_dpm_table vddci_table;
+ struct ci_single_dpm_table mvdd_table;
+};
+
+struct ci_mc_reg_entry {
+ u32 mclk_max;
+ u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_mc_reg_table {
+ u8 last;
+ u8 num_entries;
+ u16 valid_flag;
+ struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_ulv_parm
+{
+ bool supported;
+ u32 cg_ulv_parameter;
+ u32 volt_change_delay;
+ struct ci_pl pl;
+};
+
+#define CISLANDS_MAX_LEAKAGE_COUNT 8
+
+struct ci_leakage_voltage {
+ u16 count;
+ u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT];
+ u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT];
+};
+
+struct ci_dpm_level_enable_mask {
+ u32 uvd_dpm_enable_mask;
+ u32 vce_dpm_enable_mask;
+ u32 acp_dpm_enable_mask;
+ u32 samu_dpm_enable_mask;
+ u32 sclk_dpm_enable_mask;
+ u32 mclk_dpm_enable_mask;
+ u32 pcie_dpm_enable_mask;
+};
+
+struct ci_vbios_boot_state
+{
+ u16 mvdd_bootup_value;
+ u16 vddc_bootup_value;
+ u16 vddci_bootup_value;
+ u32 sclk_bootup_value;
+ u32 mclk_bootup_value;
+ u16 pcie_gen_bootup_value;
+ u16 pcie_lane_bootup_value;
+};
+
+struct ci_clock_registers {
+ u32 cg_spll_func_cntl;
+ u32 cg_spll_func_cntl_2;
+ u32 cg_spll_func_cntl_3;
+ u32 cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2;
+ u32 dll_cntl;
+ u32 mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl;
+ u32 mpll_func_cntl;
+ u32 mpll_func_cntl_1;
+ u32 mpll_func_cntl_2;
+ u32 mpll_ss1;
+ u32 mpll_ss2;
+};
+
+struct ci_thermal_temperature_setting {
+ s32 temperature_low;
+ s32 temperature_high;
+ s32 temperature_shutdown;
+};
+
+struct ci_pcie_perf_range {
+ u16 max;
+ u16 min;
+};
+
+enum ci_pt_config_reg_type {
+ CISLANDS_CONFIGREG_MMR = 0,
+ CISLANDS_CONFIGREG_SMC_IND,
+ CISLANDS_CONFIGREG_DIDT_IND,
+ CISLANDS_CONFIGREG_CACHE,
+ CISLANDS_CONFIGREG_MAX
+};
+
+#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
+#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
+#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
+
+struct ci_pt_config_reg {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 value;
+ enum ci_pt_config_reg_type type;
+};
+
+struct ci_pt_defaults {
+ u8 svi_load_line_en;
+ u8 svi_load_line_vddc;
+ u8 tdc_vddc_throttle_release_limit_perc;
+ u8 tdc_mawt;
+ u8 tdc_waterfall_ctl;
+ u8 dte_ambient_temp_base;
+ u32 display_cac;
+ u32 bapm_temp_gradient;
+ u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+ u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+};
+
+#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
+#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
+#define DPMTABLE_UPDATE_SCLK 0x00000004
+#define DPMTABLE_UPDATE_MCLK 0x00000008
+
+struct ci_power_info {
+ struct ci_dpm_table dpm_table;
+ u32 voltage_control;
+ u32 mvdd_control;
+ u32 vddci_control;
+ u32 active_auto_throttle_sources;
+ struct ci_clock_registers clock_registers;
+ u16 acpi_vddc;
+ u16 acpi_vddci;
+ enum radeon_pcie_gen force_pcie_gen;
+ enum radeon_pcie_gen acpi_pcie_gen;
+ struct ci_leakage_voltage vddc_leakage;
+ struct ci_leakage_voltage vddci_leakage;
+ u16 max_vddc_in_pp_table;
+ u16 min_vddc_in_pp_table;
+ u16 max_vddci_in_pp_table;
+ u16 min_vddci_in_pp_table;
+ u32 mclk_strobe_mode_threshold;
+ u32 mclk_stutter_mode_threshold;
+ u32 mclk_edc_enable_threshold;
+ u32 mclk_edc_wr_enable_threshold;
+ struct ci_vbios_boot_state vbios_boot_state;
+ /* smc offsets */
+ u32 sram_end;
+ u32 dpm_table_start;
+ u32 soft_regs_start;
+ u32 mc_reg_table_start;
+ u32 fan_table_start;
+ u32 arb_table_start;
+ /* smc tables */
+ SMU7_Discrete_DpmTable smc_state_table;
+ SMU7_Discrete_MCRegisters smc_mc_reg_table;
+ SMU7_Discrete_PmFuses smc_powertune_table;
+ /* other stuff */
+ struct ci_mc_reg_table mc_reg_table;
+ struct atom_voltage_table vddc_voltage_table;
+ struct atom_voltage_table vddci_voltage_table;
+ struct atom_voltage_table mvdd_voltage_table;
+ struct ci_ulv_parm ulv;
+ u32 power_containment_features;
+ const struct ci_pt_defaults *powertune_defaults;
+ u32 dte_tj_offset;
+ bool vddc_phase_shed_control;
+ struct ci_thermal_temperature_setting thermal_temp_setting;
+ struct ci_dpm_level_enable_mask dpm_level_enable_mask;
+ u32 need_update_smu7_dpm_table;
+ u32 sclk_dpm_key_disabled;
+ u32 mclk_dpm_key_disabled;
+ u32 pcie_dpm_key_disabled;
+ struct ci_pcie_perf_range pcie_gen_performance;
+ struct ci_pcie_perf_range pcie_lane_performance;
+ struct ci_pcie_perf_range pcie_gen_powersaving;
+ struct ci_pcie_perf_range pcie_lane_powersaving;
+ u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS];
+ u32 mclk_activity_target;
+ u32 low_sclk_interrupt_t;
+ u32 last_mclk_dpm_enable_mask;
+ u32 sys_pcie_mask;
+ /* caps */
+ bool caps_power_containment;
+ bool caps_cac;
+ bool caps_sq_ramping;
+ bool caps_db_ramping;
+ bool caps_td_ramping;
+ bool caps_tcp_ramping;
+ bool caps_fps;
+ bool caps_sclk_ds;
+ bool caps_sclk_ss_support;
+ bool caps_mclk_ss_support;
+ bool caps_uvd_dpm;
+ bool caps_vce_dpm;
+ bool caps_samu_dpm;
+ bool caps_acp_dpm;
+ bool caps_automatic_dc_transition;
+ bool caps_sclk_throttle_low_notification;
+ bool caps_dynamic_ac_timing;
+ /* flags */
+ bool thermal_protection;
+ bool pcie_performance_request;
+ bool dynamic_ss;
+ bool dll_default_on;
+ bool cac_enabled;
+ bool uvd_enabled;
+ bool battery_state;
+ bool pspp_notify_required;
+ bool mem_gddr5;
+ bool enable_bapm_feature;
+ bool enable_tdc_limit_feature;
+ bool enable_pkg_pwr_tracking_feature;
+ bool use_pcie_performance_levels;
+ bool use_pcie_powersaving_levels;
+ bool uvd_power_gated;
+ /* driver states */
+ struct radeon_ps current_rps;
+ struct ci_ps current_ps;
+ struct radeon_ps requested_rps;
+ struct ci_ps requested_ps;
+};
+
+#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
+#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO 0x1
+#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2 0x2
+
+#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT 256
+
+#define CISLANDS_VRC_DFLT0 0x3FFFC000
+#define CISLANDS_VRC_DFLT1 0x000400
+#define CISLANDS_VRC_DFLT2 0xC00080
+#define CISLANDS_VRC_DFLT3 0xC00200
+#define CISLANDS_VRC_DFLT4 0xC01680
+#define CISLANDS_VRC_DFLT5 0xC00033
+#define CISLANDS_VRC_DFLT6 0xC00033
+#define CISLANDS_VRC_DFLT7 0x3FFFC000
+
+#define CISLANDS_CGULVPARAMETER_DFLT 0x00040035
+#define CISLAND_TARGETACTIVITY_DFLT 30
+#define CISLAND_MCLK_TARGETACTIVITY_DFLT 10
+
+#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
+#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
+#define PCIE_PERF_REQ_PECI_GEN1 2
+#define PCIE_PERF_REQ_PECI_GEN2 3
+#define PCIE_PERF_REQ_PECI_GEN3 4
+
+int ci_copy_bytes_to_smc(struct radeon_device *rdev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit);
+void ci_start_smc(struct radeon_device *rdev);
+void ci_reset_smc(struct radeon_device *rdev);
+int ci_program_jump_on_start(struct radeon_device *rdev);
+void ci_stop_smc_clock(struct radeon_device *rdev);
+void ci_start_smc_clock(struct radeon_device *rdev);
+bool ci_is_smc_running(struct radeon_device *rdev);
+PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
+PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev);
+int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit);
+int ci_read_smc_sram_dword(struct radeon_device *rdev,
+ u32 smc_address, u32 *value, u32 limit);
+int ci_write_smc_sram_dword(struct radeon_device *rdev,
+ u32 smc_address, u32 value, u32 limit);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
new file mode 100644
index 000000000000..53b43dd3cf1e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "radeon.h"
+#include "cikd.h"
+#include "ppsmc.h"
+#include "radeon_ucode.h"
+
+static int ci_set_smc_sram_address(struct radeon_device *rdev,
+ u32 smc_address, u32 limit)
+{
+ if (smc_address & 3)
+ return -EINVAL;
+ if ((smc_address + 3) > limit)
+ return -EINVAL;
+
+ WREG32(SMC_IND_INDEX_0, smc_address);
+ WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+ return 0;
+}
+
+int ci_copy_bytes_to_smc(struct radeon_device *rdev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit)
+{
+ u32 data, original_data;
+ u32 addr;
+ u32 extra_shift;
+ int ret;
+
+ if (smc_start_address & 3)
+ return -EINVAL;
+ if ((smc_start_address + byte_count) > limit)
+ return -EINVAL;
+
+ addr = smc_start_address;
+
+ while (byte_count >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+ ret = ci_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ /* RMW for the final bytes */
+ if (byte_count > 0) {
+ data = 0;
+
+ ret = ci_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ original_data = RREG32(SMC_IND_DATA_0);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ data = (data << 8) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ ret = ci_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+ }
+ return 0;
+}
+
+void ci_start_smc(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+ tmp &= ~RST_REG;
+ WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+void ci_reset_smc(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+ tmp |= RST_REG;
+ WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+int ci_program_jump_on_start(struct radeon_device *rdev)
+{
+ static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
+
+ return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
+}
+
+void ci_stop_smc_clock(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+ tmp |= CK_DISABLE;
+
+ WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+void ci_start_smc_clock(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+ tmp &= ~CK_DISABLE;
+
+ WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+bool ci_is_smc_running(struct radeon_device *rdev)
+{
+ u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+ u32 pc_c = RREG32_SMC(SMC_PC_C);
+
+ if (!(clk & CK_DISABLE) && (0x20100 <= pc_c))
+ return true;
+
+ return false;
+}
+
+PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
+{
+ u32 tmp;
+ int i;
+
+ if (!ci_is_smc_running(rdev))
+ return PPSMC_Result_Failed;
+
+ WREG32(SMC_MESSAGE_0, msg);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(SMC_RESP_0);
+ if (tmp != 0)
+ break;
+ udelay(1);
+ }
+ tmp = RREG32(SMC_RESP_0);
+
+ return (PPSMC_Result)tmp;
+}
+
+PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i;
+
+ if (!ci_is_smc_running(rdev))
+ return PPSMC_Result_OK;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+ if ((tmp & CKEN) == 0)
+ break;
+ udelay(1);
+ }
+
+ return PPSMC_Result_OK;
+}
+
+int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
+{
+ u32 ucode_start_address;
+ u32 ucode_size;
+ const u8 *src;
+ u32 data;
+
+ if (!rdev->smc_fw)
+ return -EINVAL;
+
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ ucode_start_address = BONAIRE_SMC_UCODE_START;
+ ucode_size = BONAIRE_SMC_UCODE_SIZE;
+ break;
+ default:
+ DRM_ERROR("unknown asic in smc ucode loader\n");
+ BUG();
+ }
+
+ if (ucode_size & 3)
+ return -EINVAL;
+
+ src = (const u8 *)rdev->smc_fw->data;
+ WREG32(SMC_IND_INDEX_0, ucode_start_address);
+ WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+ while (ucode_size >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ src += 4;
+ ucode_size -= 4;
+ }
+ WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+ return 0;
+}
+
+int ci_read_smc_sram_dword(struct radeon_device *rdev,
+ u32 smc_address, u32 *value, u32 limit)
+{
+ int ret;
+
+ ret = ci_set_smc_sram_address(rdev, smc_address, limit);
+ if (ret)
+ return ret;
+
+ *value = RREG32(SMC_IND_DATA_0);
+ return 0;
+}
+
+int ci_write_smc_sram_dword(struct radeon_device *rdev,
+ u32 smc_address, u32 value, u32 limit)
+{
+ int ret;
+
+ ret = ci_set_smc_sram_address(rdev, smc_address, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, value);
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 6adbc998349e..a3bba0587276 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -30,22 +30,8 @@
#include "cikd.h"
#include "atom.h"
#include "cik_blit_shaders.h"
-
-/* GFX */
-#define CIK_PFP_UCODE_SIZE 2144
-#define CIK_ME_UCODE_SIZE 2144
-#define CIK_CE_UCODE_SIZE 2144
-/* compute */
-#define CIK_MEC_UCODE_SIZE 4192
-/* interrupts */
-#define BONAIRE_RLC_UCODE_SIZE 2048
-#define KB_RLC_UCODE_SIZE 2560
-#define KV_RLC_UCODE_SIZE 2560
-/* gddr controller */
-#define CIK_MC_UCODE_SIZE 7866
-/* sdma */
-#define CIK_SDMA_UCODE_SIZE 1050
-#define CIK_SDMA_UCODE_VERSION 64
+#include "radeon_ucode.h"
+#include "clearstate_ci.h"
MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
@@ -54,6 +40,7 @@ MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
+MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
MODULE_FIRMWARE("radeon/KAVERI_me.bin");
MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
@@ -72,10 +59,61 @@ extern void r600_ih_ring_fini(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void sumo_rlc_fini(struct radeon_device *rdev);
+extern int sumo_rlc_init(struct radeon_device *rdev);
extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
-extern void si_rlc_fini(struct radeon_device *rdev);
-extern int si_rlc_init(struct radeon_device *rdev);
+extern void si_rlc_reset(struct radeon_device *rdev);
+extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
+extern int cik_sdma_resume(struct radeon_device *rdev);
+extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
+extern void cik_sdma_fini(struct radeon_device *rdev);
+extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
static void cik_rlc_stop(struct radeon_device *rdev);
+static void cik_pcie_gen3_enable(struct radeon_device *rdev);
+static void cik_program_aspm(struct radeon_device *rdev);
+static void cik_init_pg(struct radeon_device *rdev);
+static void cik_init_cg(struct radeon_device *rdev);
+
+/* get temperature in millidegrees */
+int ci_get_temp(struct radeon_device *rdev)
+{
+ u32 temp;
+ int actual_temp = 0;
+
+ temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+ CTF_TEMP_SHIFT;
+
+ if (temp & 0x200)
+ actual_temp = 255;
+ else
+ actual_temp = temp & 0x1ff;
+
+ actual_temp = actual_temp * 1000;
+
+ return actual_temp;
+}
+
+/* get temperature in millidegrees */
+int kv_get_temp(struct radeon_device *rdev)
+{
+ u32 temp;
+ int actual_temp = 0;
+
+ temp = RREG32_SMC(0xC0300E0C);
+
+ if (temp)
+ actual_temp = (temp / 8) - 49;
+ else
+ actual_temp = 0;
+
+ actual_temp = actual_temp * 1000;
+
+ return actual_temp;
+}
/*
* Indirect registers accessor
@@ -98,6 +136,778 @@ void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
(void)RREG32(PCIE_DATA);
}
+static const u32 spectre_rlc_save_restore_register_list[] =
+{
+ (0x0e00 << 16) | (0xc12c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc140 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc150 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc15c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc168 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc170 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc178 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc204 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8228 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x829c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x869c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x98f4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x98f8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc260 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x90e8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c000 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c00c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c1c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8900 >> 2),
+ 0x00000000,
+ 0x3,
+ (0x0e00 << 16) | (0xc130 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc134 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc1fc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc208 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc264 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc268 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc26c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc270 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc274 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc278 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc27c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc280 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc284 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc288 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc28c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc290 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc294 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc298 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc29c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2ac >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x301d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30238 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30250 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30254 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30258 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3025c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc99c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9834 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f00 >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f00 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f04 >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f04 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f08 >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f08 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f0c >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f0c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x9b7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bf0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bcc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8b24 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30a04 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a10 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a14 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a2c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc704 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc708 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc768 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc770 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc774 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc778 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc77c >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc780 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc784 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc788 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc78c >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc798 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc79c >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7a0 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7a4 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7a8 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7ac >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7b0 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9100 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c010 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92a8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92ac >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92c4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92c8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92cc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c04 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c38 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c3c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xae00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9604 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac08 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac0c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac58 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac68 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac6c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac70 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac74 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac78 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac80 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac84 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac88 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac8c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x970c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9714 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9718 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x971c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88bc >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0x89c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8980 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30938 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3093c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30940 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30904 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c210 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c214 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c218 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8904 >> 2),
+ 0x00000000,
+ 0x5,
+ (0x0e00 << 16) | (0x8c28 >> 2),
+ (0x0e00 << 16) | (0x8c2c >> 2),
+ (0x0e00 << 16) | (0x8c30 >> 2),
+ (0x0e00 << 16) | (0x8c34 >> 2),
+ (0x0e00 << 16) | (0x9600 >> 2),
+};
+
+static const u32 kalindi_rlc_save_restore_register_list[] =
+{
+ (0x0e00 << 16) | (0xc12c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc140 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc150 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc15c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc168 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc170 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc204 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8228 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x829c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x869c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x98f4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x98f8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc260 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x90e8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c000 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c00c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c1c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8900 >> 2),
+ 0x00000000,
+ 0x3,
+ (0x0e00 << 16) | (0xc130 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc134 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc1fc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc208 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc264 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc268 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc26c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc270 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc274 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc28c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc290 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc294 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc298 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2ac >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x301d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30238 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30250 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30254 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30258 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3025c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc99c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9834 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f00 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f04 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f08 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f0c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x9b7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bf0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bcc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8b24 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30a04 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a10 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a14 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a2c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc704 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc708 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc768 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc770 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc774 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc798 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc79c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9100 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c010 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c04 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c38 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c3c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xae00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9604 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac08 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac0c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac58 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac68 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac6c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac70 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac74 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac78 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac80 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac84 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac88 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac8c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x970c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9714 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9718 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x971c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88bc >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0x89c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8980 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30938 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3093c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30940 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30904 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3e1fc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c210 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c214 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c218 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8904 >> 2),
+ 0x00000000,
+ 0x5,
+ (0x0e00 << 16) | (0x8c28 >> 2),
+ (0x0e00 << 16) | (0x8c2c >> 2),
+ (0x0e00 << 16) | (0x8c30 >> 2),
+ (0x0e00 << 16) | (0x8c34 >> 2),
+ (0x0e00 << 16) | (0x9600 >> 2),
+};
+
static const u32 bonaire_golden_spm_registers[] =
{
0x30800, 0xe0ffffff, 0xe0000000
@@ -744,7 +1554,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
const char *chip_name;
size_t pfp_req_size, me_req_size, ce_req_size,
mec_req_size, rlc_req_size, mc_req_size,
- sdma_req_size;
+ sdma_req_size, smc_req_size;
char fw_name[30];
int err;
@@ -760,6 +1570,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
mc_req_size = CIK_MC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
break;
case CHIP_KAVERI:
chip_name = "KAVERI";
@@ -851,7 +1662,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
err = -EINVAL;
}
- /* No MC ucode on APUs */
+ /* No SMC, MC ucode on APUs */
if (!(rdev->flags & RADEON_IS_IGP)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
@@ -863,6 +1674,21 @@ static int cik_init_microcode(struct radeon_device *rdev)
rdev->mc_fw->size, fw_name);
err = -EINVAL;
}
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
+ printk(KERN_ERR
+ "cik_smc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->smc_fw->size, fw_name);
+ err = -EINVAL;
+ }
}
out:
@@ -881,6 +1707,8 @@ out:
rdev->rlc_fw = NULL;
release_firmware(rdev->mc_fw);
rdev->mc_fw = NULL;
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
}
return err;
}
@@ -1880,7 +2708,46 @@ static void cik_gpu_init(struct radeon_device *rdev)
gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_KAVERI:
- /* TODO */
+ rdev->config.cik.max_shader_engines = 1;
+ rdev->config.cik.max_tile_pipes = 4;
+ if ((rdev->pdev->device == 0x1304) ||
+ (rdev->pdev->device == 0x1305) ||
+ (rdev->pdev->device == 0x130C) ||
+ (rdev->pdev->device == 0x130F) ||
+ (rdev->pdev->device == 0x1310) ||
+ (rdev->pdev->device == 0x1311) ||
+ (rdev->pdev->device == 0x131C)) {
+ rdev->config.cik.max_cu_per_sh = 8;
+ rdev->config.cik.max_backends_per_se = 2;
+ } else if ((rdev->pdev->device == 0x1309) ||
+ (rdev->pdev->device == 0x130A) ||
+ (rdev->pdev->device == 0x130D) ||
+ (rdev->pdev->device == 0x1313)) {
+ rdev->config.cik.max_cu_per_sh = 6;
+ rdev->config.cik.max_backends_per_se = 2;
+ } else if ((rdev->pdev->device == 0x1306) ||
+ (rdev->pdev->device == 0x1307) ||
+ (rdev->pdev->device == 0x130B) ||
+ (rdev->pdev->device == 0x130E) ||
+ (rdev->pdev->device == 0x1315) ||
+ (rdev->pdev->device == 0x131B)) {
+ rdev->config.cik.max_cu_per_sh = 4;
+ rdev->config.cik.max_backends_per_se = 1;
+ } else {
+ rdev->config.cik.max_cu_per_sh = 3;
+ rdev->config.cik.max_backends_per_se = 1;
+ }
+ rdev->config.cik.max_sh_per_se = 1;
+ rdev->config.cik.max_texture_channel_caches = 4;
+ rdev->config.cik.max_gprs = 256;
+ rdev->config.cik.max_gs_threads = 16;
+ rdev->config.cik.max_hw_contexts = 8;
+
+ rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
+ rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_KABINI:
default:
@@ -2587,11 +3454,12 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
if (rdev->wb.enabled) {
rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
} else {
+ mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
rptr = RREG32(CP_HQD_PQ_RPTR);
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
}
- rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
return rptr;
}
@@ -2604,11 +3472,12 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
if (rdev->wb.enabled) {
wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
} else {
+ mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
wptr = RREG32(CP_HQD_PQ_WPTR);
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
}
- wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
return wptr;
}
@@ -2616,10 +3485,8 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
void cik_compute_ring_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- u32 wptr = (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask;
-
- rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(wptr);
- WDOORBELL32(ring->doorbell_offset, wptr);
+ rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
+ WDOORBELL32(ring->doorbell_offset, ring->wptr);
}
/**
@@ -2897,6 +3764,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
WREG32(CP_CPF_DEBUG, tmp);
/* init the pipes */
+ mutex_lock(&rdev->srbm_mutex);
for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
int me = (i < 4) ? 1 : 2;
int pipe = (i < 4) ? i : (i - 4);
@@ -2919,6 +3787,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
WREG32(CP_HPD_EOP_CONTROL, tmp);
}
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
/* init the queues. Just two for now. */
for (i = 0; i < 2; i++) {
@@ -2972,6 +3841,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
mqd->static_thread_mgmt23[0] = 0xffffffff;
mqd->static_thread_mgmt23[1] = 0xffffffff;
+ mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, rdev->ring[idx].me,
rdev->ring[idx].pipe,
rdev->ring[idx].queue, 0);
@@ -3099,6 +3969,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
@@ -3142,13 +4013,6 @@ static int cik_cp_resume(struct radeon_device *rdev)
{
int r;
- /* Reset all cp blocks */
- WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
- RREG32(GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(GRBM_SOFT_RESET, 0);
- RREG32(GRBM_SOFT_RESET);
-
r = cik_cp_load_microcode(rdev);
if (r)
return r;
@@ -3163,579 +4027,6 @@ static int cik_cp_resume(struct radeon_device *rdev)
return 0;
}
-/*
- * sDMA - System DMA
- * Starting with CIK, the GPU has new asynchronous
- * DMA engines. These engines are used for compute
- * and gfx. There are two DMA engines (SDMA0, SDMA1)
- * and each one supports 1 ring buffer used for gfx
- * and 2 queues used for compute.
- *
- * The programming model is very similar to the CP
- * (ring buffer, IBs, etc.), but sDMA has it's own
- * packet format that is different from the PM4 format
- * used by the CP. sDMA supports copying data, writing
- * embedded data, solid fills, and a number of other
- * things. It also has support for tiling/detiling of
- * buffers.
- */
-/**
- * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- *
- * Schedule an IB in the DMA ring (CIK).
- */
-void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
- struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
- u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
-
- if (rdev->wb.enabled) {
- u32 next_rptr = ring->wptr + 5;
- while ((next_rptr & 7) != 4)
- next_rptr++;
- next_rptr += 4;
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
- radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- radeon_ring_write(ring, 1); /* number of DWs to follow */
- radeon_ring_write(ring, next_rptr);
- }
-
- /* IB packet must end on a 8 DW boundary */
- while ((ring->wptr & 7) != 4)
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
- radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
- radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
- radeon_ring_write(ring, ib->length_dw);
-
-}
-
-/**
- * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
- *
- * @rdev: radeon_device pointer
- * @fence: radeon fence object
- *
- * Add a DMA fence packet to the ring to write
- * the fence seq number and DMA trap packet to generate
- * an interrupt if needed (CIK).
- */
-void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
- u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
- SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
- u32 ref_and_mask;
-
- if (fence->ring == R600_RING_TYPE_DMA_INDEX)
- ref_and_mask = SDMA0;
- else
- ref_and_mask = SDMA1;
-
- /* write the fence */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
- radeon_ring_write(ring, addr & 0xffffffff);
- radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
- radeon_ring_write(ring, fence->seq);
- /* generate an interrupt */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
- /* flush HDP */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
- radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
- radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
- radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
- radeon_ring_write(ring, ref_and_mask); /* MASK */
- radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
-}
-
-/**
- * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- * @semaphore: radeon semaphore object
- * @emit_wait: wait or signal semaphore
- *
- * Add a DMA semaphore packet to the ring wait on or signal
- * other rings (CIK).
- */
-void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- u64 addr = semaphore->gpu_addr;
- u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
- radeon_ring_write(ring, addr & 0xfffffff8);
- radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
-}
-
-/**
- * cik_sdma_gfx_stop - stop the gfx async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the gfx async dma ring buffers (CIK).
- */
-static void cik_sdma_gfx_stop(struct radeon_device *rdev)
-{
- u32 rb_cntl, reg_offset;
- int i;
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-
- for (i = 0; i < 2; i++) {
- if (i == 0)
- reg_offset = SDMA0_REGISTER_OFFSET;
- else
- reg_offset = SDMA1_REGISTER_OFFSET;
- rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
- rb_cntl &= ~SDMA_RB_ENABLE;
- WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
- WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
- }
-}
-
-/**
- * cik_sdma_rlc_stop - stop the compute async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the compute async dma queues (CIK).
- */
-static void cik_sdma_rlc_stop(struct radeon_device *rdev)
-{
- /* XXX todo */
-}
-
-/**
- * cik_sdma_enable - stop the async dma engines
- *
- * @rdev: radeon_device pointer
- * @enable: enable/disable the DMA MEs.
- *
- * Halt or unhalt the async dma engines (CIK).
- */
-static void cik_sdma_enable(struct radeon_device *rdev, bool enable)
-{
- u32 me_cntl, reg_offset;
- int i;
-
- for (i = 0; i < 2; i++) {
- if (i == 0)
- reg_offset = SDMA0_REGISTER_OFFSET;
- else
- reg_offset = SDMA1_REGISTER_OFFSET;
- me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
- if (enable)
- me_cntl &= ~SDMA_HALT;
- else
- me_cntl |= SDMA_HALT;
- WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
- }
-}
-
-/**
- * cik_sdma_gfx_resume - setup and start the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Set up the gfx DMA ring buffers and enable them (CIK).
- * Returns 0 for success, error for failure.
- */
-static int cik_sdma_gfx_resume(struct radeon_device *rdev)
-{
- struct radeon_ring *ring;
- u32 rb_cntl, ib_cntl;
- u32 rb_bufsz;
- u32 reg_offset, wb_offset;
- int i, r;
-
- for (i = 0; i < 2; i++) {
- if (i == 0) {
- ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- reg_offset = SDMA0_REGISTER_OFFSET;
- wb_offset = R600_WB_DMA_RPTR_OFFSET;
- } else {
- ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
- reg_offset = SDMA1_REGISTER_OFFSET;
- wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
- }
-
- WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
- WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
-
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = rb_bufsz << 1;
-#ifdef __BIG_ENDIAN
- rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
-#endif
- WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
- WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
-
- /* set the wb address whether it's enabled or not */
- WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
- upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
- WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
- ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
-
- if (rdev->wb.enabled)
- rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
-
- WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
- WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
-
- ring->wptr = 0;
- WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
-
- ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
-
- /* enable DMA RB */
- WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
-
- ib_cntl = SDMA_IB_ENABLE;
-#ifdef __BIG_ENDIAN
- ib_cntl |= SDMA_IB_SWAP_ENABLE;
-#endif
- /* enable DMA IBs */
- WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
-
- ring->ready = true;
-
- r = radeon_ring_test(rdev, ring->idx, ring);
- if (r) {
- ring->ready = false;
- return r;
- }
- }
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
- return 0;
-}
-
-/**
- * cik_sdma_rlc_resume - setup and start the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Set up the compute DMA queues and enable them (CIK).
- * Returns 0 for success, error for failure.
- */
-static int cik_sdma_rlc_resume(struct radeon_device *rdev)
-{
- /* XXX todo */
- return 0;
-}
-
-/**
- * cik_sdma_load_microcode - load the sDMA ME ucode
- *
- * @rdev: radeon_device pointer
- *
- * Loads the sDMA0/1 ucode.
- * Returns 0 for success, -EINVAL if the ucode is not available.
- */
-static int cik_sdma_load_microcode(struct radeon_device *rdev)
-{
- const __be32 *fw_data;
- int i;
-
- if (!rdev->sdma_fw)
- return -EINVAL;
-
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
-
- /* halt the MEs */
- cik_sdma_enable(rdev, false);
-
- /* sdma0 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
-
- /* sdma1 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
-
- WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
- WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
- return 0;
-}
-
-/**
- * cik_sdma_resume - setup and start the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Set up the DMA engines and enable them (CIK).
- * Returns 0 for success, error for failure.
- */
-static int cik_sdma_resume(struct radeon_device *rdev)
-{
- int r;
-
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
- RREG32(SRBM_SOFT_RESET);
-
- r = cik_sdma_load_microcode(rdev);
- if (r)
- return r;
-
- /* unhalt the MEs */
- cik_sdma_enable(rdev, true);
-
- /* start the gfx rings and rlc compute queues */
- r = cik_sdma_gfx_resume(rdev);
- if (r)
- return r;
- r = cik_sdma_rlc_resume(rdev);
- if (r)
- return r;
-
- return 0;
-}
-
-/**
- * cik_sdma_fini - tear down the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engines and free the rings (CIK).
- */
-static void cik_sdma_fini(struct radeon_device *rdev)
-{
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
- /* halt the MEs */
- cik_sdma_enable(rdev, false);
- radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
- radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
- /* XXX - compute dma queue tear down */
-}
-
-/**
- * cik_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (CIK).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int cik_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_bytes, cur_size_in_bytes;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
- num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_bytes = size_in_bytes;
- if (cur_size_in_bytes > 0x1fffff)
- cur_size_in_bytes = 0x1fffff;
- size_in_bytes -= cur_size_in_bytes;
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
- radeon_ring_write(ring, cur_size_in_bytes);
- radeon_ring_write(ring, 0); /* src/dst endian swap */
- radeon_ring_write(ring, src_offset & 0xffffffff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
- src_offset += cur_size_in_bytes;
- dst_offset += cur_size_in_bytes;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
-/**
- * cik_sdma_ring_test - simple async dma engine test
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test the DMA engine by writing using it to write an
- * value to memory. (CIK).
- * Returns 0 for success, error for failure.
- */
-int cik_sdma_ring_test(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ring_lock(rdev, ring, 4);
- if (r) {
- DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
- return r;
- }
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
- radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
- radeon_ring_write(ring, 1); /* number of DWs to follow */
- radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
-
- if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- return r;
-}
-
-/**
- * cik_sdma_ib_test - test an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test a simple IB in the DMA ring (CIK).
- * Returns 0 on success, error on failure.
- */
-int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- struct radeon_ib ib;
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp = 0;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
- if (r) {
- DRM_ERROR("radeon: failed to get ib (%d).\n", r);
- return r;
- }
-
- ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
- ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
- ib.ptr[3] = 1;
- ib.ptr[4] = 0xDEADBEEF;
- ib.length_dw = 5;
-
- r = radeon_ib_schedule(rdev, &ib, NULL);
- if (r) {
- radeon_ib_free(rdev, &ib);
- DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
- return r;
- }
- r = radeon_fence_wait(ib.fence, false);
- if (r) {
- DRM_ERROR("radeon: fence wait failed (%d).\n", r);
- return r;
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < rdev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
- } else {
- DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
- r = -EINVAL;
- }
- radeon_ib_free(rdev, &ib);
- return r;
-}
-
-
static void cik_print_gpu_status_regs(struct radeon_device *rdev)
{
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
@@ -3785,7 +4076,7 @@ static void cik_print_gpu_status_regs(struct radeon_device *rdev)
* mask to be used by cik_gpu_soft_reset().
* Returns a mask of the blocks to be reset.
*/
-static u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -4036,34 +4327,6 @@ bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * cik_sdma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up (CIK).
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = cik_gpu_check_soft_reset(rdev);
- u32 mask;
-
- if (ring->idx == R600_RING_TYPE_DMA_INDEX)
- mask = RADEON_RESET_DMA;
- else
- mask = RADEON_RESET_DMA1;
-
- if (!(reset_mask & mask)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
/* MC */
/**
* cik_mc_program - program the GPU memory controller
@@ -4320,6 +4583,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
+ mutex_lock(&rdev->srbm_mutex);
for (i = 0; i < 16; i++) {
cik_srbm_select(rdev, 0, 0, 0, i);
/* CP and shaders */
@@ -4335,6 +4599,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
/* XXX SDMA RLC - todo */
}
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
cik_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -4598,131 +4863,8 @@ void cik_vm_set_page(struct radeon_device *rdev,
}
} else {
/* DMA */
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
- }
- }
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
- }
-}
-
-/**
- * cik_dma_vm_flush - cik vm flush using sDMA
- *
- * @rdev: radeon_device pointer
- *
- * Update the page table base and flush the VM TLB
- * using sDMA (CIK).
- */
-void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
-{
- struct radeon_ring *ring = &rdev->ring[ridx];
- u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
- SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
- u32 ref_and_mask;
-
- if (vm == NULL)
- return;
-
- if (ridx == R600_RING_TYPE_DMA_INDEX)
- ref_and_mask = SDMA0;
- else
- ref_and_mask = SDMA1;
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- if (vm->id < 8) {
- radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
- } else {
- radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
}
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
-
- /* update SH_MEM_* regs */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
- radeon_ring_write(ring, VMID(vm->id));
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SH_MEM_BASES >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
- radeon_ring_write(ring, 1);
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
- radeon_ring_write(ring, VMID(0));
-
- /* flush HDP */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
- radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
- radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
- radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
- radeon_ring_write(ring, ref_and_mask); /* MASK */
- radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
-
- /* flush TLB */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
- radeon_ring_write(ring, 1 << vm->id);
}
/*
@@ -4731,31 +4873,34 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
* variety of functions, the most important of which is
* the interrupt controller.
*/
-/**
- * cik_rlc_stop - stop the RLC ME
- *
- * @rdev: radeon_device pointer
- *
- * Halt the RLC ME (MicroEngine) (CIK).
- */
-static void cik_rlc_stop(struct radeon_device *rdev)
+static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
+ bool enable)
{
- int i, j, k;
- u32 mask, tmp;
+ u32 tmp = RREG32(CP_INT_CNTL_RING0);
- tmp = RREG32(CP_INT_CNTL_RING0);
- tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ if (enable)
+ tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ else
+ tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING0, tmp);
+}
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
+static void cik_enable_lbpw(struct radeon_device *rdev, bool enable)
+{
+ u32 tmp;
- tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
- WREG32(RLC_CGCG_CGLS_CTRL, tmp);
+ tmp = RREG32(RLC_LB_CNTL);
+ if (enable)
+ tmp |= LOAD_BALANCE_ENABLE;
+ else
+ tmp &= ~LOAD_BALANCE_ENABLE;
+ WREG32(RLC_LB_CNTL, tmp);
+}
- WREG32(RLC_CNTL, 0);
+static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
+{
+ u32 i, j, k;
+ u32 mask;
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
@@ -4777,6 +4922,84 @@ static void cik_rlc_stop(struct radeon_device *rdev)
}
}
+static void cik_update_rlc(struct radeon_device *rdev, u32 rlc)
+{
+ u32 tmp;
+
+ tmp = RREG32(RLC_CNTL);
+ if (tmp != rlc)
+ WREG32(RLC_CNTL, rlc);
+}
+
+static u32 cik_halt_rlc(struct radeon_device *rdev)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_CNTL);
+
+ if (data & RLC_ENABLE) {
+ u32 i;
+
+ data &= ~RLC_ENABLE;
+ WREG32(RLC_CNTL, data);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0)
+ break;
+ udelay(1);
+ }
+
+ cik_wait_for_rlc_serdes(rdev);
+ }
+
+ return orig;
+}
+
+void cik_enter_rlc_safe_mode(struct radeon_device *rdev)
+{
+ u32 tmp, i, mask;
+
+ tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE);
+ WREG32(RLC_GPR_REG2, tmp);
+
+ mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if ((RREG32(RLC_GPM_STAT) & mask) == mask)
+ break;
+ udelay(1);
+ }
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if ((RREG32(RLC_GPR_REG2) & REQ) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+void cik_exit_rlc_safe_mode(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE);
+ WREG32(RLC_GPR_REG2, tmp);
+}
+
+/**
+ * cik_rlc_stop - stop the RLC ME
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Halt the RLC ME (MicroEngine) (CIK).
+ */
+static void cik_rlc_stop(struct radeon_device *rdev)
+{
+ WREG32(RLC_CNTL, 0);
+
+ cik_enable_gui_idle_interrupt(rdev, false);
+
+ cik_wait_for_rlc_serdes(rdev);
+}
+
/**
* cik_rlc_start - start the RLC ME
*
@@ -4786,13 +5009,9 @@ static void cik_rlc_stop(struct radeon_device *rdev)
*/
static void cik_rlc_start(struct radeon_device *rdev)
{
- u32 tmp;
-
WREG32(RLC_CNTL, RLC_ENABLE);
- tmp = RREG32(CP_INT_CNTL_RING0);
- tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
- WREG32(CP_INT_CNTL_RING0, tmp);
+ cik_enable_gui_idle_interrupt(rdev, true);
udelay(50);
}
@@ -4808,8 +5027,7 @@ static void cik_rlc_start(struct radeon_device *rdev)
*/
static int cik_rlc_resume(struct radeon_device *rdev)
{
- u32 i, size;
- u32 clear_state_info[3];
+ u32 i, size, tmp;
const __be32 *fw_data;
if (!rdev->rlc_fw)
@@ -4830,12 +5048,15 @@ static int cik_rlc_resume(struct radeon_device *rdev)
cik_rlc_stop(rdev);
- WREG32(GRBM_SOFT_RESET, SOFT_RESET_RLC);
- RREG32(GRBM_SOFT_RESET);
- udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- RREG32(GRBM_SOFT_RESET);
- udelay(50);
+ /* disable CG */
+ tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
+ WREG32(RLC_CGCG_CGLS_CTRL, tmp);
+
+ si_rlc_reset(rdev);
+
+ cik_init_pg(rdev);
+
+ cik_init_cg(rdev);
WREG32(RLC_LB_CNTR_INIT, 0);
WREG32(RLC_LB_CNTR_MAX, 0x00008000);
@@ -4854,20 +5075,757 @@ static int cik_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
WREG32(RLC_GPM_UCODE_ADDR, 0);
- /* XXX */
- clear_state_info[0] = 0;//upper_32_bits(rdev->rlc.save_restore_gpu_addr);
- clear_state_info[1] = 0;//rdev->rlc.save_restore_gpu_addr;
- clear_state_info[2] = 0;//cik_default_size;
- WREG32(RLC_GPM_SCRATCH_ADDR, 0x3d);
- for (i = 0; i < 3; i++)
- WREG32(RLC_GPM_SCRATCH_DATA, clear_state_info[i]);
- WREG32(RLC_DRIVER_DMA_STATUS, 0);
+ /* XXX - find out what chips support lbpw */
+ cik_enable_lbpw(rdev, false);
+
+ if (rdev->family == CHIP_BONAIRE)
+ WREG32(RLC_DRIVER_DMA_STATUS, 0);
cik_rlc_start(rdev);
return 0;
}
+static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
+{
+ u32 data, orig, tmp, tmp2;
+
+ orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
+ cik_enable_gui_idle_interrupt(rdev, true);
+
+ tmp = cik_halt_rlc(rdev);
+
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+ WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+ tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
+ WREG32(RLC_SERDES_WR_CTRL, tmp2);
+
+ cik_update_rlc(rdev, tmp);
+
+ data |= CGCG_EN | CGLS_EN;
+ } else {
+ cik_enable_gui_idle_interrupt(rdev, false);
+
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+
+ data &= ~(CGCG_EN | CGLS_EN);
+ }
+
+ if (orig != data)
+ WREG32(RLC_CGCG_CGLS_CTRL, data);
+
+}
+
+static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
+{
+ u32 data, orig, tmp = 0;
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
+ if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) {
+ if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
+ orig = data = RREG32(CP_MEM_SLP_CNTL);
+ data |= CP_MEM_LS_EN;
+ if (orig != data)
+ WREG32(CP_MEM_SLP_CNTL, data);
+ }
+ }
+
+ orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data &= 0xfffffffd;
+ if (orig != data)
+ WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+ tmp = cik_halt_rlc(rdev);
+
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+ WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+ data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
+ WREG32(RLC_SERDES_WR_CTRL, data);
+
+ cik_update_rlc(rdev, tmp);
+
+ if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) {
+ orig = data = RREG32(CGTS_SM_CTRL_REG);
+ data &= ~SM_MODE_MASK;
+ data |= SM_MODE(0x2);
+ data |= SM_MODE_ENABLE;
+ data &= ~CGTS_OVERRIDE;
+ if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) &&
+ (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS))
+ data &= ~CGTS_LS_OVERRIDE;
+ data &= ~ON_MONITOR_ADD_MASK;
+ data |= ON_MONITOR_ADD_EN;
+ data |= ON_MONITOR_ADD(0x96);
+ if (orig != data)
+ WREG32(CGTS_SM_CTRL_REG, data);
+ }
+ } else {
+ orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data |= 0x00000002;
+ if (orig != data)
+ WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+ data = RREG32(RLC_MEM_SLP_CNTL);
+ if (data & RLC_MEM_LS_EN) {
+ data &= ~RLC_MEM_LS_EN;
+ WREG32(RLC_MEM_SLP_CNTL, data);
+ }
+
+ data = RREG32(CP_MEM_SLP_CNTL);
+ if (data & CP_MEM_LS_EN) {
+ data &= ~CP_MEM_LS_EN;
+ WREG32(CP_MEM_SLP_CNTL, data);
+ }
+
+ orig = data = RREG32(CGTS_SM_CTRL_REG);
+ data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE;
+ if (orig != data)
+ WREG32(CGTS_SM_CTRL_REG, data);
+
+ tmp = cik_halt_rlc(rdev);
+
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+ WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+ data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
+ WREG32(RLC_SERDES_WR_CTRL, data);
+
+ cik_update_rlc(rdev, tmp);
+ }
+}
+
+static const u32 mc_cg_registers[] =
+{
+ MC_HUB_MISC_HUB_CG,
+ MC_HUB_MISC_SIP_CG,
+ MC_HUB_MISC_VM_CG,
+ MC_XPB_CLK_GAT,
+ ATC_MISC_CG,
+ MC_CITF_MISC_WR_CG,
+ MC_CITF_MISC_RD_CG,
+ MC_CITF_MISC_VM_CG,
+ VM_L2_CG,
+};
+
+static void cik_enable_mc_ls(struct radeon_device *rdev,
+ bool enable)
+{
+ int i;
+ u32 orig, data;
+
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
+ data |= MC_LS_ENABLE;
+ else
+ data &= ~MC_LS_ENABLE;
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
+ }
+}
+
+static void cik_enable_mc_mgcg(struct radeon_device *rdev,
+ bool enable)
+{
+ int i;
+ u32 orig, data;
+
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
+ data |= MC_CG_ENABLE;
+ else
+ data &= ~MC_CG_ENABLE;
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
+ }
+}
+
+static void cik_enable_sdma_mgcg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
+ WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
+ WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
+ } else {
+ orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
+ data |= 0xff000000;
+ if (data != orig)
+ WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
+
+ orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
+ data |= 0xff000000;
+ if (data != orig)
+ WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
+ }
+}
+
+static void cik_enable_sdma_mgls(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) {
+ orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
+ data |= 0x100;
+ if (orig != data)
+ WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
+
+ orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
+ data |= 0x100;
+ if (orig != data)
+ WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
+ } else {
+ orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
+ data &= ~0x100;
+ if (orig != data)
+ WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
+
+ orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
+ data &= ~0x100;
+ if (orig != data)
+ WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
+ }
+}
+
+static void cik_enable_uvd_mgcg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
+ data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
+ data = 0xfff;
+ WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(UVD_CGC_CTRL);
+ data |= DCM;
+ if (orig != data)
+ WREG32(UVD_CGC_CTRL, data);
+ } else {
+ data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
+ data &= ~0xfff;
+ WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(UVD_CGC_CTRL);
+ data &= ~DCM;
+ if (orig != data)
+ WREG32(UVD_CGC_CTRL, data);
+ }
+}
+
+static void cik_enable_bif_mgls(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
+ data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
+ REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
+ else
+ data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
+ REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
+
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_CNTL2, data);
+}
+
+static void cik_enable_hdp_mgcg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32(HDP_HOST_PATH_CNTL);
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
+ data &= ~CLOCK_GATING_DIS;
+ else
+ data |= CLOCK_GATING_DIS;
+
+ if (orig != data)
+ WREG32(HDP_HOST_PATH_CNTL, data);
+}
+
+static void cik_enable_hdp_ls(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32(HDP_MEM_POWER_LS);
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
+ data |= HDP_LS_ENABLE;
+ else
+ data &= ~HDP_LS_ENABLE;
+
+ if (orig != data)
+ WREG32(HDP_MEM_POWER_LS, data);
+}
+
+void cik_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable)
+{
+ if (block & RADEON_CG_BLOCK_GFX) {
+ /* order matters! */
+ if (enable) {
+ cik_enable_mgcg(rdev, true);
+ cik_enable_cgcg(rdev, true);
+ } else {
+ cik_enable_cgcg(rdev, false);
+ cik_enable_mgcg(rdev, false);
+ }
+ }
+
+ if (block & RADEON_CG_BLOCK_MC) {
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ cik_enable_mc_mgcg(rdev, enable);
+ cik_enable_mc_ls(rdev, enable);
+ }
+ }
+
+ if (block & RADEON_CG_BLOCK_SDMA) {
+ cik_enable_sdma_mgcg(rdev, enable);
+ cik_enable_sdma_mgls(rdev, enable);
+ }
+
+ if (block & RADEON_CG_BLOCK_BIF) {
+ cik_enable_bif_mgls(rdev, enable);
+ }
+
+ if (block & RADEON_CG_BLOCK_UVD) {
+ if (rdev->has_uvd)
+ cik_enable_uvd_mgcg(rdev, enable);
+ }
+
+ if (block & RADEON_CG_BLOCK_HDP) {
+ cik_enable_hdp_mgcg(rdev, enable);
+ cik_enable_hdp_ls(rdev, enable);
+ }
+}
+
+static void cik_init_cg(struct radeon_device *rdev)
+{
+
+ cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true);
+
+ if (rdev->has_uvd)
+ si_init_uvd_internal_cg(rdev);
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
+}
+
+static void cik_fini_cg(struct radeon_device *rdev)
+{
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
+ cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false);
+}
+
+static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
+ data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
+ else
+ data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
+ data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
+ else
+ data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP))
+ data &= ~DISABLE_CP_PG;
+ else
+ data |= DISABLE_CP_PG;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS))
+ data &= ~DISABLE_GDS_PG;
+ else
+ data |= DISABLE_GDS_PG;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+#define CP_ME_TABLE_SIZE 96
+#define CP_ME_TABLE_OFFSET 2048
+#define CP_MEC_TABLE_OFFSET 4096
+
+void cik_init_cp_pg_table(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ volatile u32 *dst_ptr;
+ int me, i, max_me = 4;
+ u32 bo_offset = 0;
+ u32 table_offset;
+
+ if (rdev->family == CHIP_KAVERI)
+ max_me = 5;
+
+ if (rdev->rlc.cp_table_ptr == NULL)
+ return;
+
+ /* write the cp table buffer */
+ dst_ptr = rdev->rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 1) {
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 2) {
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else {
+ fw_data = (const __be32 *)rdev->mec_fw->data;
+ table_offset = CP_MEC_TABLE_OFFSET;
+ }
+
+ for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
+ dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]);
+ }
+ bo_offset += CP_ME_TABLE_SIZE;
+ }
+}
+
+static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
+ orig = data = RREG32(RLC_PG_CNTL);
+ data |= GFX_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+
+ orig = data = RREG32(RLC_AUTO_PG_CTRL);
+ data |= AUTO_PG_EN;
+ if (orig != data)
+ WREG32(RLC_AUTO_PG_CTRL, data);
+ } else {
+ orig = data = RREG32(RLC_PG_CNTL);
+ data &= ~GFX_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+
+ orig = data = RREG32(RLC_AUTO_PG_CTRL);
+ data &= ~AUTO_PG_EN;
+ if (orig != data)
+ WREG32(RLC_AUTO_PG_CTRL, data);
+
+ data = RREG32(DB_RENDER_CONTROL);
+ }
+}
+
+static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
+{
+ u32 mask = 0, tmp, tmp1;
+ int i;
+
+ cik_select_se_sh(rdev, se, sh);
+ tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+ tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+ tmp &= 0xffff0000;
+
+ tmp |= tmp1;
+ tmp >>= 16;
+
+ for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) {
+ mask <<= 1;
+ mask |= 1;
+ }
+
+ return (~tmp) & mask;
+}
+
+static void cik_init_ao_cu_mask(struct radeon_device *rdev)
+{
+ u32 i, j, k, active_cu_number = 0;
+ u32 mask, counter, cu_bitmap;
+ u32 tmp = 0;
+
+ for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
+ for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
+ mask = 1;
+ cu_bitmap = 0;
+ counter = 0;
+ for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) {
+ if (cik_get_cu_active_bitmap(rdev, i, j) & mask) {
+ if (counter < 2)
+ cu_bitmap |= mask;
+ counter ++;
+ }
+ mask <<= 1;
+ }
+
+ active_cu_number += counter;
+ tmp |= (cu_bitmap << (i * 16 + j * 8));
+ }
+ }
+
+ WREG32(RLC_PG_AO_CU_MASK, tmp);
+
+ tmp = RREG32(RLC_MAX_PG_CU);
+ tmp &= ~MAX_PU_CU_MASK;
+ tmp |= MAX_PU_CU(active_cu_number);
+ WREG32(RLC_MAX_PG_CU, tmp);
+}
+
+static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG))
+ data |= STATIC_PER_CU_PG_ENABLE;
+ else
+ data &= ~STATIC_PER_CU_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG))
+ data |= DYN_PER_CU_PG_ENABLE;
+ else
+ data &= ~DYN_PER_CU_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
+#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
+
+static void cik_init_gfx_cgpg(struct radeon_device *rdev)
+{
+ u32 data, orig;
+ u32 i;
+
+ if (rdev->rlc.cs_data) {
+ WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
+ WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr));
+ WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr));
+ WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size);
+ } else {
+ WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
+ for (i = 0; i < 3; i++)
+ WREG32(RLC_GPM_SCRATCH_DATA, 0);
+ }
+ if (rdev->rlc.reg_list) {
+ WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
+ for (i = 0; i < rdev->rlc.reg_list_size; i++)
+ WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]);
+ }
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ data |= GFX_PG_SRC;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8);
+
+ data = RREG32(CP_RB_WPTR_POLL_CNTL);
+ data &= ~IDLE_POLL_COUNT_MASK;
+ data |= IDLE_POLL_COUNT(0x60);
+ WREG32(CP_RB_WPTR_POLL_CNTL, data);
+
+ data = 0x10101010;
+ WREG32(RLC_PG_DELAY, data);
+
+ data = RREG32(RLC_PG_DELAY_2);
+ data &= ~0xff;
+ data |= 0x3;
+ WREG32(RLC_PG_DELAY_2, data);
+
+ data = RREG32(RLC_AUTO_PG_CTRL);
+ data &= ~GRBM_REG_SGIT_MASK;
+ data |= GRBM_REG_SGIT(0x700);
+ WREG32(RLC_AUTO_PG_CTRL, data);
+
+}
+
+static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable)
+{
+ cik_enable_gfx_cgpg(rdev, enable);
+ cik_enable_gfx_static_mgpg(rdev, enable);
+ cik_enable_gfx_dynamic_mgpg(rdev, enable);
+}
+
+u32 cik_get_csb_size(struct radeon_device *rdev)
+{
+ u32 count = 0;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (rdev->rlc.cs_data == NULL)
+ return 0;
+
+ /* begin clear state */
+ count += 2;
+ /* context control state */
+ count += 3;
+
+ for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT)
+ count += 2 + ext->reg_count;
+ else
+ return 0;
+ }
+ }
+ /* pa_sc_raster_config/pa_sc_raster_config1 */
+ count += 4;
+ /* end clear state */
+ count += 2;
+ /* clear state */
+ count += 2;
+
+ return count;
+}
+
+void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
+{
+ u32 count = 0, i;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (rdev->rlc.cs_data == NULL)
+ return;
+ if (buffer == NULL)
+ return;
+
+ buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
+ buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+
+ buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
+ buffer[count++] = 0x80000000;
+ buffer[count++] = 0x80000000;
+
+ for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
+ buffer[count++] = ext->reg_index - 0xa000;
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = ext->extent[i];
+ } else {
+ return;
+ }
+ }
+ }
+
+ buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
+ buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ buffer[count++] = 0x16000012;
+ buffer[count++] = 0x00000000;
+ break;
+ case CHIP_KAVERI:
+ buffer[count++] = 0x00000000; /* XXX */
+ buffer[count++] = 0x00000000;
+ break;
+ case CHIP_KABINI:
+ buffer[count++] = 0x00000000; /* XXX */
+ buffer[count++] = 0x00000000;
+ break;
+ default:
+ buffer[count++] = 0x00000000;
+ buffer[count++] = 0x00000000;
+ break;
+ }
+
+ buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
+ buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
+
+ buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
+ buffer[count++] = 0;
+}
+
+static void cik_init_pg(struct radeon_device *rdev)
+{
+ if (rdev->pg_flags) {
+ cik_enable_sck_slowdown_on_pu(rdev, true);
+ cik_enable_sck_slowdown_on_pd(rdev, true);
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ cik_init_gfx_cgpg(rdev);
+ cik_enable_cp_pg(rdev, true);
+ cik_enable_gds_pg(rdev, true);
+ }
+ cik_init_ao_cu_mask(rdev);
+ cik_update_gfx_pg(rdev, true);
+ }
+}
+
+static void cik_fini_pg(struct radeon_device *rdev)
+{
+ if (rdev->pg_flags) {
+ cik_update_gfx_pg(rdev, false);
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ cik_enable_cp_pg(rdev, false);
+ cik_enable_gds_pg(rdev, false);
+ }
+ }
+}
+
/*
* Interrupts
* Starting with r6xx, interrupts are handled via a ring buffer.
@@ -5086,6 +6044,7 @@ int cik_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 dma_cntl, dma_cntl1;
+ u32 thermal_int;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -5118,6 +6077,13 @@ int cik_irq_set(struct radeon_device *rdev)
cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ if (rdev->flags & RADEON_IS_IGP)
+ thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
+ ~(THERM_INTH_MASK | THERM_INTL_MASK);
+ else
+ thermal_int = RREG32_SMC(CG_THERMAL_INT) &
+ ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("cik_irq_set: sw int gfx\n");
@@ -5275,6 +6241,14 @@ int cik_irq_set(struct radeon_device *rdev)
hpd6 |= DC_HPDx_INT_EN;
}
+ if (rdev->irq.dpm_thermal) {
+ DRM_DEBUG("dpm thermal\n");
+ if (rdev->flags & RADEON_IS_IGP)
+ thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
+ else
+ thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+ }
+
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
@@ -5309,6 +6283,11 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ if (rdev->flags & RADEON_IS_IGP)
+ WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
+ else
+ WREG32_SMC(CG_THERMAL_INT, thermal_int);
+
return 0;
}
@@ -5520,6 +6499,7 @@ int cik_irq_process(struct radeon_device *rdev)
bool queue_hotplug = false;
bool queue_reset = false;
u32 addr, status, mc_client;
+ bool queue_thermal = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -5753,6 +6733,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 146:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
@@ -5870,6 +6854,19 @@ restart_ih:
break;
}
break;
+ case 230: /* thermal low to high */
+ DRM_DEBUG("IH: thermal low to high\n");
+ rdev->pm.dpm.thermal.high_to_low = false;
+ queue_thermal = true;
+ break;
+ case 231: /* thermal high to low */
+ DRM_DEBUG("IH: thermal high to low\n");
+ rdev->pm.dpm.thermal.high_to_low = true;
+ queue_thermal = true;
+ break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: GUI idle\n");
+ break;
case 241: /* SDMA Privileged inst */
case 247: /* SDMA Privileged inst */
DRM_ERROR("Illegal instruction in SDMA command stream\n");
@@ -5909,9 +6906,6 @@ restart_ih:
break;
}
break;
- case 233: /* GUI IDLE */
- DRM_DEBUG("IH: GUI idle\n");
- break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -5925,6 +6919,8 @@ restart_ih:
schedule_work(&rdev->hotplug_work);
if (queue_reset)
schedule_work(&rdev->reset_work);
+ if (queue_thermal)
+ schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
@@ -5954,6 +6950,18 @@ static int cik_startup(struct radeon_device *rdev)
struct radeon_ring *ring;
int r;
+ /* enable pcie gen2/3 link */
+ cik_pcie_gen3_enable(rdev);
+ /* enable aspm */
+ cik_program_aspm(rdev);
+
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ cik_mc_program(rdev);
+
if (rdev->flags & RADEON_IS_IGP) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
!rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
@@ -5981,18 +6989,26 @@ static int cik_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
- cik_mc_program(rdev);
r = cik_pcie_gart_enable(rdev);
if (r)
return r;
cik_gpu_init(rdev);
/* allocate rlc buffers */
- r = si_rlc_init(rdev);
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (rdev->family == CHIP_KAVERI) {
+ rdev->rlc.reg_list = spectre_rlc_save_restore_register_list;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
+ } else {
+ rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
+ }
+ }
+ rdev->rlc.cs_data = ci_cs_data;
+ rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+ r = sumo_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -6040,12 +7056,15 @@ static int cik_startup(struct radeon_device *rdev)
return r;
}
- r = cik_uvd_resume(rdev);
+ r = radeon_uvd_resume(rdev);
if (!r) {
- r = radeon_fence_driver_start_ring(rdev,
- R600_RING_TYPE_UVD_INDEX);
- if (r)
- dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ r = uvd_v4_2_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
}
if (r)
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
@@ -6068,7 +7087,7 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
@@ -6077,7 +7096,7 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
- 0, 0xfffff, PACKET3(PACKET3_NOP, 0x3FFF));
+ PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
ring->me = 1; /* first MEC */
@@ -6089,7 +7108,7 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
- 0, 0xffffffff, PACKET3(PACKET3_NOP, 0x3FFF));
+ PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
/* dGPU only have 1 MEC */
@@ -6102,7 +7121,7 @@ static int cik_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
- 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+ SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
if (r)
return r;
@@ -6110,7 +7129,7 @@ static int cik_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
- 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+ SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
if (r)
return r;
@@ -6124,12 +7143,11 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
@@ -6146,6 +7164,10 @@ static int cik_startup(struct radeon_device *rdev)
return r;
}
+ r = dce6_audio_init(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -6191,11 +7213,14 @@ int cik_resume(struct radeon_device *rdev)
*/
int cik_suspend(struct radeon_device *rdev)
{
+ dce6_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
cik_cp_enable(rdev, false);
cik_sdma_enable(rdev, false);
- r600_uvd_rbc_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
+ cik_fini_pg(rdev);
+ cik_fini_cg(rdev);
cik_irq_suspend(rdev);
radeon_wb_disable(rdev);
cik_pcie_gart_disable(rdev);
@@ -6316,7 +7341,7 @@ int cik_init(struct radeon_device *rdev)
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
cik_irq_fini(rdev);
- si_rlc_fini(rdev);
+ sumo_rlc_fini(rdev);
cik_mec_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -6351,13 +7376,16 @@ void cik_fini(struct radeon_device *rdev)
{
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
+ cik_fini_pg(rdev);
+ cik_fini_cg(rdev);
cik_irq_fini(rdev);
- si_rlc_fini(rdev);
+ sumo_rlc_fini(rdev);
cik_mec_fini(rdev);
radeon_wb_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
cik_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -6386,8 +7414,8 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
struct radeon_crtc *radeon_crtc,
struct drm_display_mode *mode)
{
- u32 tmp;
-
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
/*
* Line Buffer Setup
* There are 6 line buffers, one for each display controllers.
@@ -6397,22 +7425,37 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
* them using the stereo blender.
*/
if (radeon_crtc->base.enabled && mode) {
- if (mode->crtc_hdisplay < 1920)
+ if (mode->crtc_hdisplay < 1920) {
tmp = 1;
- else if (mode->crtc_hdisplay < 2560)
+ buffer_alloc = 2;
+ } else if (mode->crtc_hdisplay < 2560) {
tmp = 2;
- else if (mode->crtc_hdisplay < 4096)
+ buffer_alloc = 2;
+ } else if (mode->crtc_hdisplay < 4096) {
tmp = 0;
- else {
+ buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
+ } else {
DRM_DEBUG_KMS("Mode too big for LB!\n");
tmp = 0;
+ buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
}
- } else
+ } else {
tmp = 1;
+ buffer_alloc = 0;
+ }
WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
@@ -6814,7 +7857,7 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
u32 lb_size, u32 num_heads)
{
struct drm_display_mode *mode = &radeon_crtc->base.mode;
- struct dce8_wm_params wm;
+ struct dce8_wm_params wm_low, wm_high;
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
@@ -6824,35 +7867,82 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
- wm.yclk = rdev->pm.current_mclk * 10;
- wm.sclk = rdev->pm.current_sclk * 10;
- wm.disp_clk = mode->clock;
- wm.src_width = mode->crtc_hdisplay;
- wm.active_time = mode->crtc_hdisplay * pixel_period;
- wm.blank_time = line_time - wm.active_time;
- wm.interlaced = false;
+ /* watermark for high clocks */
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+ rdev->pm.dpm_enabled) {
+ wm_high.yclk =
+ radeon_dpm_get_mclk(rdev, false) * 10;
+ wm_high.sclk =
+ radeon_dpm_get_sclk(rdev, false) * 10;
+ } else {
+ wm_high.yclk = rdev->pm.current_mclk * 10;
+ wm_high.sclk = rdev->pm.current_sclk * 10;
+ }
+
+ wm_high.disp_clk = mode->clock;
+ wm_high.src_width = mode->crtc_hdisplay;
+ wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.blank_time = line_time - wm_high.active_time;
+ wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm.interlaced = true;
- wm.vsc = radeon_crtc->vsc;
- wm.vtaps = 1;
+ wm_high.interlaced = true;
+ wm_high.vsc = radeon_crtc->vsc;
+ wm_high.vtaps = 1;
if (radeon_crtc->rmx_type != RMX_OFF)
- wm.vtaps = 2;
- wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm.lb_size = lb_size;
- wm.dram_channels = cik_get_number_of_dram_channels(rdev);
- wm.num_heads = num_heads;
+ wm_high.vtaps = 2;
+ wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm_high.lb_size = lb_size;
+ wm_high.dram_channels = cik_get_number_of_dram_channels(rdev);
+ wm_high.num_heads = num_heads;
/* set for high clocks */
- latency_watermark_a = min(dce8_latency_watermark(&wm), (u32)65535);
+ latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535);
+
+ /* possibly force display priority to high */
+ /* should really do this at mode validation time... */
+ if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+ !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+ !dce8_check_latency_hiding(&wm_high) ||
+ (rdev->disp_priority == 2)) {
+ DRM_DEBUG_KMS("force priority to high\n");
+ }
+
+ /* watermark for low clocks */
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+ rdev->pm.dpm_enabled) {
+ wm_low.yclk =
+ radeon_dpm_get_mclk(rdev, true) * 10;
+ wm_low.sclk =
+ radeon_dpm_get_sclk(rdev, true) * 10;
+ } else {
+ wm_low.yclk = rdev->pm.current_mclk * 10;
+ wm_low.sclk = rdev->pm.current_sclk * 10;
+ }
+
+ wm_low.disp_clk = mode->clock;
+ wm_low.src_width = mode->crtc_hdisplay;
+ wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.blank_time = line_time - wm_low.active_time;
+ wm_low.interlaced = false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ wm_low.interlaced = true;
+ wm_low.vsc = radeon_crtc->vsc;
+ wm_low.vtaps = 1;
+ if (radeon_crtc->rmx_type != RMX_OFF)
+ wm_low.vtaps = 2;
+ wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm_low.lb_size = lb_size;
+ wm_low.dram_channels = cik_get_number_of_dram_channels(rdev);
+ wm_low.num_heads = num_heads;
+
/* set for low clocks */
- /* wm.yclk = low clk; wm.sclk = low clk */
- latency_watermark_b = min(dce8_latency_watermark(&wm), (u32)65535);
+ latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535);
/* possibly force display priority to high */
/* should really do this at mode validation time... */
- if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
- !dce8_average_bandwidth_vs_available_bandwidth(&wm) ||
- !dce8_check_latency_hiding(&wm) ||
+ if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+ !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+ !dce8_check_latency_hiding(&wm_low) ||
(rdev->disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
@@ -6877,6 +7967,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
LATENCY_HIGH_WATERMARK(line_time)));
/* restore original selection */
WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
+
+ /* save values for DPM */
+ radeon_crtc->line_time = line_time;
+ radeon_crtc->wm_high = latency_watermark_a;
+ radeon_crtc->wm_low = latency_watermark_b;
}
/**
@@ -6966,39 +8061,307 @@ int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
return r;
}
-int cik_uvd_resume(struct radeon_device *rdev)
+static void cik_pcie_gen3_enable(struct radeon_device *rdev)
{
- uint64_t addr;
- uint32_t size;
- int r;
+ struct pci_dev *root = rdev->pdev->bus->self;
+ int bridge_pos, gpu_pos;
+ u32 speed_cntl, mask, current_data_rate;
+ int ret, i;
+ u16 tmp16;
- r = radeon_uvd_resume(rdev);
- if (r)
- return r;
+ if (radeon_pcie_gen2 == 0)
+ return;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
- /* programm the VCPU memory controller bits 0-27 */
- addr = rdev->uvd.gpu_addr >> 3;
- size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
- WREG32(UVD_VCPU_CACHE_SIZE0, size);
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret != 0)
+ return;
- addr += size;
- size = RADEON_UVD_STACK_SIZE >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
- WREG32(UVD_VCPU_CACHE_SIZE1, size);
+ if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ return;
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
+ LC_CURRENT_DATA_RATE_SHIFT;
+ if (mask & DRM_PCIE_SPEED_80) {
+ if (current_data_rate == 2) {
+ DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+ return;
+ }
+ DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
+ } else if (mask & DRM_PCIE_SPEED_50) {
+ if (current_data_rate == 1) {
+ DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ return;
+ }
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+ }
- addr += size;
- size = RADEON_UVD_HEAP_SIZE >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
- WREG32(UVD_VCPU_CACHE_SIZE2, size);
+ bridge_pos = pci_pcie_cap(root);
+ if (!bridge_pos)
+ return;
- /* bits 28-31 */
- addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
- WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+ gpu_pos = pci_pcie_cap(rdev->pdev);
+ if (!gpu_pos)
+ return;
- /* bits 32-39 */
- addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
- WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+ if (mask & DRM_PCIE_SPEED_80) {
+ /* re-try equalization if gen3 is not already enabled */
+ if (current_data_rate != 2) {
+ u16 bridge_cfg, gpu_cfg;
+ u16 bridge_cfg2, gpu_cfg2;
+ u32 max_lw, current_lw, tmp;
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+ tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+ tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+ pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
+ max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+ current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+
+ if (current_lw < max_lw) {
+ tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ if (tmp & LC_RENEGOTIATION_SUPPORT) {
+ tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
+ tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
+ tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+ }
+ }
- return 0;
+ for (i = 0; i < 10; i++) {
+ /* check status */
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ if (tmp16 & PCI_EXP_DEVSTA_TRPND)
+ break;
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp |= LC_SET_QUIESCE;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp |= LC_REDO_EQ;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+ mdelay(100);
+
+ /* linkctl */
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+ tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+ tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+ pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+ /* linkctl2 */
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~((1 << 4) | (7 << 9));
+ tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~((1 << 4) | (7 << 9));
+ tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+ pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp &= ~LC_SET_QUIESCE;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ }
+ }
+ }
+
+ /* set the link speed */
+ speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
+ speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~0xf;
+ if (mask & DRM_PCIE_SPEED_80)
+ tmp16 |= 3; /* gen3 */
+ else if (mask & DRM_PCIE_SPEED_50)
+ tmp16 |= 2; /* gen2 */
+ else
+ tmp16 |= 1; /* gen1 */
+ pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+static void cik_program_aspm(struct radeon_device *rdev)
+{
+ u32 data, orig;
+ bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
+ bool disable_clkreq = false;
+
+ if (radeon_aspm == 0)
+ return;
+
+ /* XXX double check IGPs */
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+ data &= ~LC_XMIT_N_FTS_MASK;
+ data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
+ data |= LC_GO_TO_RECOVERY;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL);
+ data |= P_IGNORE_EDB_ERR;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_P_CNTL, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+ data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
+ data |= LC_PMI_TO_L1_DIS;
+ if (!disable_l0s)
+ data |= LC_L0S_INACTIVITY(7);
+
+ if (!disable_l1) {
+ data |= LC_L1_INACTIVITY(7);
+ data &= ~LC_PMI_TO_L1_DIS;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+
+ if (!disable_plloff_in_l1) {
+ bool clk_req_support;
+
+ orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0);
+ data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ if (orig != data)
+ WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data);
+
+ orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1);
+ data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ if (orig != data)
+ WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data);
+
+ orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0);
+ data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ if (orig != data)
+ WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data);
+
+ orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1);
+ data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ if (orig != data)
+ WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ data &= ~LC_DYN_LANES_PWR_STATE_MASK;
+ data |= LC_DYN_LANES_PWR_STATE(3);
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+
+ if (!disable_clkreq) {
+ struct pci_dev *root = rdev->pdev->bus->self;
+ u32 lnkcap;
+
+ clk_req_support = false;
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+ if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
+ clk_req_support = true;
+ } else {
+ clk_req_support = false;
+ }
+
+ if (clk_req_support) {
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
+ data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+
+ orig = data = RREG32_SMC(THM_CLK_CNTL);
+ data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
+ data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+ if (orig != data)
+ WREG32_SMC(THM_CLK_CNTL, data);
+
+ orig = data = RREG32_SMC(MISC_CLK_CTRL);
+ data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
+ data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+ if (orig != data)
+ WREG32_SMC(MISC_CLK_CTRL, data);
+
+ orig = data = RREG32_SMC(CG_CLKPIN_CNTL);
+ data &= ~BCLK_AS_XCLK;
+ if (orig != data)
+ WREG32_SMC(CG_CLKPIN_CNTL, data);
+
+ orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2);
+ data &= ~FORCE_BIF_REFCLK_EN;
+ if (orig != data)
+ WREG32_SMC(CG_CLKPIN_CNTL_2, data);
+
+ orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_CLKOUT_SEL_MASK;
+ data |= MPLL_CLKOUT_SEL(4);
+ if (orig != data)
+ WREG32_SMC(MPLL_BYPASSCLK_SEL, data);
+ }
+ }
+ } else {
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ }
+
+ orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
+ data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_CNTL2, data);
+
+ if (!disable_l0s) {
+ data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+ if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
+ data = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
+ if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+ data &= ~LC_L0S_INACTIVITY_MASK;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ }
+ }
+ }
}
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
index d71e46d571f5..ca1bb6133580 100644
--- a/drivers/gpu/drm/radeon/cik_reg.h
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -24,6 +24,9 @@
#ifndef __CIK_REG_H__
#define __CIK_REG_H__
+#define CIK_DIDT_IND_INDEX 0xca00
+#define CIK_DIDT_IND_DATA 0xca04
+
#define CIK_DC_GPIO_HPD_MASK 0x65b0
#define CIK_DC_GPIO_HPD_A 0x65b4
#define CIK_DC_GPIO_HPD_EN 0x65b8
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
new file mode 100644
index 000000000000..b6286068e111
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -0,0 +1,785 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "cikd.h"
+
+/* sdma */
+#define CIK_SDMA_UCODE_SIZE 1050
+#define CIK_SDMA_UCODE_VERSION 64
+
+u32 cik_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/*
+ * sDMA - System DMA
+ * Starting with CIK, the GPU has new asynchronous
+ * DMA engines. These engines are used for compute
+ * and gfx. There are two DMA engines (SDMA0, SDMA1)
+ * and each one supports 1 ring buffer used for gfx
+ * and 2 queues used for compute.
+ *
+ * The programming model is very similar to the CP
+ * (ring buffer, IBs, etc.), but sDMA has it's own
+ * packet format that is different from the PM4 format
+ * used by the CP. sDMA supports copying data, writing
+ * embedded data, solid fills, and a number of other
+ * things. It also has support for tiling/detiling of
+ * buffers.
+ */
+
+/**
+ * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (CIK).
+ */
+void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 5;
+ while ((next_rptr & 7) != 4)
+ next_rptr++;
+ next_rptr += 4;
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, 1); /* number of DWs to follow */
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* IB packet must end on a 8 DW boundary */
+ while ((ring->wptr & 7) != 4)
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
+ radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, ib->length_dw);
+
+}
+
+/**
+ * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (CIK).
+ */
+void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+ u32 ref_and_mask;
+
+ if (fence->ring == R600_RING_TYPE_DMA_INDEX)
+ ref_and_mask = SDMA0;
+ else
+ ref_and_mask = SDMA1;
+
+ /* write the fence */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ radeon_ring_write(ring, fence->seq);
+ /* generate an interrupt */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
+ /* flush HDP */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
+ radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
+ radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
+ radeon_ring_write(ring, ref_and_mask); /* MASK */
+ radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
+}
+
+/**
+ * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (CIK).
+ */
+void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ u64 addr = semaphore->gpu_addr;
+ u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
+ radeon_ring_write(ring, addr & 0xfffffff8);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+}
+
+/**
+ * cik_sdma_gfx_stop - stop the gfx async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the gfx async dma ring buffers (CIK).
+ */
+static void cik_sdma_gfx_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl, reg_offset;
+ int i;
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ reg_offset = SDMA0_REGISTER_OFFSET;
+ else
+ reg_offset = SDMA1_REGISTER_OFFSET;
+ rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
+ rb_cntl &= ~SDMA_RB_ENABLE;
+ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
+ WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
+ }
+}
+
+/**
+ * cik_sdma_rlc_stop - stop the compute async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the compute async dma queues (CIK).
+ */
+static void cik_sdma_rlc_stop(struct radeon_device *rdev)
+{
+ /* XXX todo */
+}
+
+/**
+ * cik_sdma_enable - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ * @enable: enable/disable the DMA MEs.
+ *
+ * Halt or unhalt the async dma engines (CIK).
+ */
+void cik_sdma_enable(struct radeon_device *rdev, bool enable)
+{
+ u32 me_cntl, reg_offset;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ reg_offset = SDMA0_REGISTER_OFFSET;
+ else
+ reg_offset = SDMA1_REGISTER_OFFSET;
+ me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
+ if (enable)
+ me_cntl &= ~SDMA_HALT;
+ else
+ me_cntl |= SDMA_HALT;
+ WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
+ }
+}
+
+/**
+ * cik_sdma_gfx_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_gfx_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ u32 rb_cntl, ib_cntl;
+ u32 rb_bufsz;
+ u32 reg_offset, wb_offset;
+ int i, r;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ reg_offset = SDMA0_REGISTER_OFFSET;
+ wb_offset = R600_WB_DMA_RPTR_OFFSET;
+ } else {
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ reg_offset = SDMA1_REGISTER_OFFSET;
+ wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+ }
+
+ WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+ WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
+ WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
+ upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
+ ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+ WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
+
+ ring->wptr = 0;
+ WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
+
+ ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
+
+ /* enable DMA RB */
+ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
+
+ ib_cntl = SDMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= SDMA_IB_SWAP_ENABLE;
+#endif
+ /* enable DMA IBs */
+ WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, ring->idx, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * cik_sdma_rlc_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the compute DMA queues and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_rlc_resume(struct radeon_device *rdev)
+{
+ /* XXX todo */
+ return 0;
+}
+
+/**
+ * cik_sdma_load_microcode - load the sDMA ME ucode
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Loads the sDMA0/1 ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int cik_sdma_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->sdma_fw)
+ return -EINVAL;
+
+ /* stop the gfx rings and rlc compute queues */
+ cik_sdma_gfx_stop(rdev);
+ cik_sdma_rlc_stop(rdev);
+
+ /* halt the MEs */
+ cik_sdma_enable(rdev, false);
+
+ /* sdma0 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ /* sdma1 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ return 0;
+}
+
+/**
+ * cik_sdma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA engines and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+int cik_sdma_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+ RREG32(SRBM_SOFT_RESET);
+
+ r = cik_sdma_load_microcode(rdev);
+ if (r)
+ return r;
+
+ /* unhalt the MEs */
+ cik_sdma_enable(rdev, true);
+
+ /* start the gfx rings and rlc compute queues */
+ r = cik_sdma_gfx_resume(rdev);
+ if (r)
+ return r;
+ r = cik_sdma_rlc_resume(rdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/**
+ * cik_sdma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (CIK).
+ */
+void cik_sdma_fini(struct radeon_device *rdev)
+{
+ /* stop the gfx rings and rlc compute queues */
+ cik_sdma_gfx_stop(rdev);
+ cik_sdma_rlc_stop(rdev);
+ /* halt the MEs */
+ cik_sdma_enable(rdev, false);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+ radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+ /* XXX - compute dma queue tear down */
+}
+
+/**
+ * cik_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (CIK).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int cik_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0x1fffff)
+ cur_size_in_bytes = 0x1fffff;
+ size_in_bytes -= cur_size_in_bytes;
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
+ radeon_ring_write(ring, cur_size_in_bytes);
+ radeon_ring_write(ring, 0); /* src/dst endian swap */
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
+/**
+ * cik_sdma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (CIK).
+ * Returns 0 for success, error for failure.
+ */
+int cik_sdma_ring_test(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ring_lock(rdev, ring, 4);
+ if (r) {
+ DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
+ radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, 1); /* number of DWs to follow */
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/**
+ * cik_sdma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (CIK).
+ * Returns 0 on success, error on failure.
+ */
+int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp = 0;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+ ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
+ ib.ptr[3] = 1;
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+/**
+ * cik_sdma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (CIK).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = cik_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * cik_sdma_vm_set_page - update the page tables using sDMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+void cik_sdma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ uint64_t value;
+ unsigned ndw;
+
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count;
+ if (ndw > 0x7FFFF)
+ ndw = 0x7FFFF;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+ pe += ndw * 8;
+ addr += ndw * incr;
+ count -= ndw;
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
+}
+
+/**
+ * cik_dma_vm_flush - cik vm flush using sDMA
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (CIK).
+ */
+void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+ u32 ref_and_mask;
+
+ if (vm == NULL)
+ return;
+
+ if (ridx == R600_RING_TYPE_DMA_INDEX)
+ ref_and_mask = SDMA0;
+ else
+ ref_and_mask = SDMA1;
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ if (vm->id < 8) {
+ radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+ } else {
+ radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ }
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* update SH_MEM_* regs */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
+ radeon_ring_write(ring, VMID(vm->id));
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SH_MEM_BASES >> 2);
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
+ radeon_ring_write(ring, 1);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
+ radeon_ring_write(ring, VMID(0));
+
+ /* flush HDP */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
+ radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
+ radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
+ radeon_ring_write(ring, ref_and_mask); /* MASK */
+ radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
+
+ /* flush TLB */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 7e9275eaef80..203d2a09a1f5 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -28,21 +28,375 @@
#define CIK_RB_BITMAP_WIDTH_PER_SH 2
+/* DIDT IND registers */
+#define DIDT_SQ_CTRL0 0x0
+# define DIDT_CTRL_EN (1 << 0)
+#define DIDT_DB_CTRL0 0x20
+#define DIDT_TD_CTRL0 0x40
+#define DIDT_TCP_CTRL0 0x60
+
/* SMC IND registers */
+#define DPM_TABLE_475 0x3F768
+# define SamuBootLevel(x) ((x) << 0)
+# define SamuBootLevel_MASK 0x000000ff
+# define SamuBootLevel_SHIFT 0
+# define AcpBootLevel(x) ((x) << 8)
+# define AcpBootLevel_MASK 0x0000ff00
+# define AcpBootLevel_SHIFT 8
+# define VceBootLevel(x) ((x) << 16)
+# define VceBootLevel_MASK 0x00ff0000
+# define VceBootLevel_SHIFT 16
+# define UvdBootLevel(x) ((x) << 24)
+# define UvdBootLevel_MASK 0xff000000
+# define UvdBootLevel_SHIFT 24
+
+#define FIRMWARE_FLAGS 0x3F800
+# define INTERRUPTS_ENABLED (1 << 0)
+
+#define NB_DPM_CONFIG_1 0x3F9E8
+# define Dpm0PgNbPsLo(x) ((x) << 0)
+# define Dpm0PgNbPsLo_MASK 0x000000ff
+# define Dpm0PgNbPsLo_SHIFT 0
+# define Dpm0PgNbPsHi(x) ((x) << 8)
+# define Dpm0PgNbPsHi_MASK 0x0000ff00
+# define Dpm0PgNbPsHi_SHIFT 8
+# define DpmXNbPsLo(x) ((x) << 16)
+# define DpmXNbPsLo_MASK 0x00ff0000
+# define DpmXNbPsLo_SHIFT 16
+# define DpmXNbPsHi(x) ((x) << 24)
+# define DpmXNbPsHi_MASK 0xff000000
+# define DpmXNbPsHi_SHIFT 24
+
+#define SMC_SYSCON_RESET_CNTL 0x80000000
+# define RST_REG (1 << 0)
+#define SMC_SYSCON_CLOCK_CNTL_0 0x80000004
+# define CK_DISABLE (1 << 0)
+# define CKEN (1 << 24)
+
+#define SMC_SYSCON_MISC_CNTL 0x80000010
+
+#define SMC_SYSCON_MSG_ARG_0 0x80000068
+
+#define SMC_PC_C 0x80000370
+
+#define SMC_SCRATCH9 0x80000424
+
+#define RCU_UC_EVENTS 0xC0000004
+# define BOOT_SEQ_DONE (1 << 7)
+
#define GENERAL_PWRMGT 0xC0200000
+# define GLOBAL_PWRMGT_EN (1 << 0)
+# define STATIC_PM_EN (1 << 1)
+# define THERMAL_PROTECTION_DIS (1 << 2)
+# define THERMAL_PROTECTION_TYPE (1 << 3)
+# define SW_SMIO_INDEX(x) ((x) << 6)
+# define SW_SMIO_INDEX_MASK (1 << 6)
+# define SW_SMIO_INDEX_SHIFT 6
+# define VOLT_PWRMGT_EN (1 << 10)
# define GPU_COUNTER_CLK (1 << 15)
-
+# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
+
+#define CNB_PWRMGT_CNTL 0xC0200004
+# define GNB_SLOW_MODE(x) ((x) << 0)
+# define GNB_SLOW_MODE_MASK (3 << 0)
+# define GNB_SLOW_MODE_SHIFT 0
+# define GNB_SLOW (1 << 2)
+# define FORCE_NB_PS1 (1 << 3)
+# define DPM_ENABLED (1 << 4)
+
+#define SCLK_PWRMGT_CNTL 0xC0200008
+# define SCLK_PWRMGT_OFF (1 << 0)
+# define RESET_BUSY_CNT (1 << 4)
+# define RESET_SCLK_CNT (1 << 5)
+# define DYNAMIC_PM_EN (1 << 21)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX 0xC0200014
+# define CURRENT_STATE_MASK (0xf << 4)
+# define CURRENT_STATE_SHIFT 4
+# define CURR_MCLK_INDEX_MASK (0xf << 8)
+# define CURR_MCLK_INDEX_SHIFT 8
+# define CURR_SCLK_INDEX_MASK (0x1f << 16)
+# define CURR_SCLK_INDEX_SHIFT 16
+
+#define CG_SSP 0xC0200044
+# define SST(x) ((x) << 0)
+# define SST_MASK (0xffff << 0)
+# define SSTU(x) ((x) << 16)
+# define SSTU_MASK (0xf << 16)
+
+#define CG_DISPLAY_GAP_CNTL 0xC0200060
+# define DISP_GAP(x) ((x) << 0)
+# define DISP_GAP_MASK (3 << 0)
+# define VBI_TIMER_COUNT(x) ((x) << 4)
+# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
+# define VBI_TIMER_UNIT(x) ((x) << 20)
+# define VBI_TIMER_UNIT_MASK (7 << 20)
+# define DISP_GAP_MCHG(x) ((x) << 24)
+# define DISP_GAP_MCHG_MASK (3 << 24)
+
+#define SMU_VOLTAGE_STATUS 0xC0200094
+# define SMU_VOLTAGE_CURRENT_LEVEL_MASK (0xff << 1)
+# define SMU_VOLTAGE_CURRENT_LEVEL_SHIFT 1
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1 0xC02000F0
+# define CURR_PCIE_INDEX_MASK (0xf << 24)
+# define CURR_PCIE_INDEX_SHIFT 24
+
+#define CG_ULV_PARAMETER 0xC0200158
+
+#define CG_FTV_0 0xC02001A8
+#define CG_FTV_1 0xC02001AC
+#define CG_FTV_2 0xC02001B0
+#define CG_FTV_3 0xC02001B4
+#define CG_FTV_4 0xC02001B8
+#define CG_FTV_5 0xC02001BC
+#define CG_FTV_6 0xC02001C0
+#define CG_FTV_7 0xC02001C4
+
+#define CG_DISPLAY_GAP_CNTL2 0xC0200230
+
+#define LCAC_SX0_OVR_SEL 0xC0400D04
+#define LCAC_SX0_OVR_VAL 0xC0400D08
+
+#define LCAC_MC0_CNTL 0xC0400D30
+#define LCAC_MC0_OVR_SEL 0xC0400D34
+#define LCAC_MC0_OVR_VAL 0xC0400D38
+#define LCAC_MC1_CNTL 0xC0400D3C
+#define LCAC_MC1_OVR_SEL 0xC0400D40
+#define LCAC_MC1_OVR_VAL 0xC0400D44
+
+#define LCAC_MC2_OVR_SEL 0xC0400D4C
+#define LCAC_MC2_OVR_VAL 0xC0400D50
+
+#define LCAC_MC3_OVR_SEL 0xC0400D58
+#define LCAC_MC3_OVR_VAL 0xC0400D5C
+
+#define LCAC_CPL_CNTL 0xC0400D80
+#define LCAC_CPL_OVR_SEL 0xC0400D84
+#define LCAC_CPL_OVR_VAL 0xC0400D88
+
+/* dGPU */
+#define CG_THERMAL_CTRL 0xC0300004
+#define DPM_EVENT_SRC(x) ((x) << 0)
+#define DPM_EVENT_SRC_MASK (7 << 0)
+#define DIG_THERM_DPM(x) ((x) << 14)
+#define DIG_THERM_DPM_MASK 0x003FC000
+#define DIG_THERM_DPM_SHIFT 14
+
+#define CG_THERMAL_INT 0xC030000C
+#define CI_DIG_THERM_INTH(x) ((x) << 8)
+#define CI_DIG_THERM_INTH_MASK 0x0000FF00
+#define CI_DIG_THERM_INTH_SHIFT 8
+#define CI_DIG_THERM_INTL(x) ((x) << 16)
+#define CI_DIG_THERM_INTL_MASK 0x00FF0000
+#define CI_DIG_THERM_INTL_SHIFT 16
+#define THERM_INT_MASK_HIGH (1 << 24)
+#define THERM_INT_MASK_LOW (1 << 25)
+
+#define CG_MULT_THERMAL_STATUS 0xC0300014
+#define ASIC_MAX_TEMP(x) ((x) << 0)
+#define ASIC_MAX_TEMP_MASK 0x000001ff
+#define ASIC_MAX_TEMP_SHIFT 0
+#define CTF_TEMP(x) ((x) << 9)
+#define CTF_TEMP_MASK 0x0003fe00
+#define CTF_TEMP_SHIFT 9
+
+#define CG_SPLL_FUNC_CNTL 0xC0500140
+#define SPLL_RESET (1 << 0)
+#define SPLL_PWRON (1 << 1)
+#define SPLL_BYPASS_EN (1 << 3)
+#define SPLL_REF_DIV(x) ((x) << 5)
+#define SPLL_REF_DIV_MASK (0x3f << 5)
+#define SPLL_PDIV_A(x) ((x) << 20)
+#define SPLL_PDIV_A_MASK (0x7f << 20)
+#define SPLL_PDIV_A_SHIFT 20
+#define CG_SPLL_FUNC_CNTL_2 0xC0500144
+#define SCLK_MUX_SEL(x) ((x) << 0)
+#define SCLK_MUX_SEL_MASK (0x1ff << 0)
+#define CG_SPLL_FUNC_CNTL_3 0xC0500148
+#define SPLL_FB_DIV(x) ((x) << 0)
+#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
+#define SPLL_FB_DIV_SHIFT 0
+#define SPLL_DITHEN (1 << 28)
+#define CG_SPLL_FUNC_CNTL_4 0xC050014C
+
+#define CG_SPLL_SPREAD_SPECTRUM 0xC0500164
+#define SSEN (1 << 0)
+#define CLK_S(x) ((x) << 4)
+#define CLK_S_MASK (0xfff << 4)
+#define CLK_S_SHIFT 4
+#define CG_SPLL_SPREAD_SPECTRUM_2 0xC0500168
+#define CLK_V(x) ((x) << 0)
+#define CLK_V_MASK (0x3ffffff << 0)
+#define CLK_V_SHIFT 0
+
+#define MPLL_BYPASSCLK_SEL 0xC050019C
+# define MPLL_CLKOUT_SEL(x) ((x) << 8)
+# define MPLL_CLKOUT_SEL_MASK 0xFF00
#define CG_CLKPIN_CNTL 0xC05001A0
# define XTALIN_DIVIDE (1 << 1)
-
+# define BCLK_AS_XCLK (1 << 2)
+#define CG_CLKPIN_CNTL_2 0xC05001A4
+# define FORCE_BIF_REFCLK_EN (1 << 3)
+# define MUX_TCLK_TO_XCLK (1 << 8)
+#define THM_CLK_CNTL 0xC05001A8
+# define CMON_CLK_SEL(x) ((x) << 0)
+# define CMON_CLK_SEL_MASK 0xFF
+# define TMON_CLK_SEL(x) ((x) << 8)
+# define TMON_CLK_SEL_MASK 0xFF00
+#define MISC_CLK_CTRL 0xC05001AC
+# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
+# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
+# define ZCLK_SEL(x) ((x) << 8)
+# define ZCLK_SEL_MASK 0xFF00
+
+/* KV/KB */
+#define CG_THERMAL_INT_CTRL 0xC2100028
+#define DIG_THERM_INTH(x) ((x) << 0)
+#define DIG_THERM_INTH_MASK 0x000000FF
+#define DIG_THERM_INTH_SHIFT 0
+#define DIG_THERM_INTL(x) ((x) << 8)
+#define DIG_THERM_INTL_MASK 0x0000FF00
+#define DIG_THERM_INTL_SHIFT 8
+#define THERM_INTH_MASK (1 << 24)
+#define THERM_INTL_MASK (1 << 25)
+
+/* PCIE registers idx/data 0x38/0x3c */
+#define PB0_PIF_PWRDOWN_0 0x1100012 /* PCIE */
+# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
+# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_0_SHIFT 24
+#define PB0_PIF_PWRDOWN_1 0x1100013 /* PCIE */
+# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
+# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_1_SHIFT 24
+
+#define PCIE_CNTL2 0x1001001c /* PCIE */
+# define SLV_MEM_LS_EN (1 << 16)
+# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
+# define MST_MEM_LS_EN (1 << 18)
+# define REPLAY_MEM_LS_EN (1 << 19)
+
+#define PCIE_LC_STATUS1 0x1400028 /* PCIE */
+# define LC_REVERSE_RCVR (1 << 0)
+# define LC_REVERSE_XMIT (1 << 1)
+# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
+# define LC_OPERATING_LINK_WIDTH_SHIFT 2
+# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
+# define LC_DETECTED_LINK_WIDTH_SHIFT 5
+
+#define PCIE_P_CNTL 0x1400040 /* PCIE */
+# define P_IGNORE_EDB_ERR (1 << 6)
+
+#define PB1_PIF_PWRDOWN_0 0x2100012 /* PCIE */
+#define PB1_PIF_PWRDOWN_1 0x2100013 /* PCIE */
+
+#define PCIE_LC_CNTL 0x100100A0 /* PCIE */
+# define LC_L0S_INACTIVITY(x) ((x) << 8)
+# define LC_L0S_INACTIVITY_MASK (0xf << 8)
+# define LC_L0S_INACTIVITY_SHIFT 8
+# define LC_L1_INACTIVITY(x) ((x) << 12)
+# define LC_L1_INACTIVITY_MASK (0xf << 12)
+# define LC_L1_INACTIVITY_SHIFT 12
+# define LC_PMI_TO_L1_DIS (1 << 16)
+# define LC_ASPM_TO_L1_DIS (1 << 24)
+
+#define PCIE_LC_LINK_WIDTH_CNTL 0x100100A2 /* PCIE */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
+# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
+# define LC_DYN_LANES_PWR_STATE_SHIFT 21
+#define PCIE_LC_N_FTS_CNTL 0x100100a3 /* PCIE */
+# define LC_XMIT_N_FTS(x) ((x) << 0)
+# define LC_XMIT_N_FTS_MASK (0xff << 0)
+# define LC_XMIT_N_FTS_SHIFT 0
+# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
+# define LC_N_FTS_MASK (0xff << 24)
+#define PCIE_LC_SPEED_CNTL 0x100100A4 /* PCIE */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_GEN3_EN_STRAP (1 << 1)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
+# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
+# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
+# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
+# define LC_CURRENT_DATA_RATE_SHIFT 13
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
+# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
+# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
+
+#define PCIE_LC_CNTL2 0x100100B1 /* PCIE */
+# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
+# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
+
+#define PCIE_LC_CNTL3 0x100100B5 /* PCIE */
+# define LC_GO_TO_RECOVERY (1 << 30)
+#define PCIE_LC_CNTL4 0x100100B6 /* PCIE */
+# define LC_REDO_EQ (1 << 5)
+# define LC_SET_QUIESCE (1 << 13)
+
+/* direct registers */
#define PCIE_INDEX 0x38
#define PCIE_DATA 0x3C
+#define SMC_IND_INDEX_0 0x200
+#define SMC_IND_DATA_0 0x204
+
+#define SMC_IND_ACCESS_CNTL 0x240
+#define AUTO_INCREMENT_IND_0 (1 << 0)
+
+#define SMC_MESSAGE_0 0x250
+#define SMC_MSG_MASK 0xffff
+#define SMC_RESP_0 0x254
+#define SMC_RESP_MASK 0xffff
+
+#define SMC_MSG_ARG_0 0x290
+
#define VGA_HDP_CONTROL 0x328
#define VGA_MEMORY_DISABLE (1 << 4)
#define DMIF_ADDR_CALC 0xC00
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
#define SRBM_GFX_CNTL 0xE44
#define PIPEID(x) ((x) << 0)
#define MEID(x) ((x) << 2)
@@ -172,6 +526,10 @@
#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580
+#define VM_L2_CG 0x15c0
+#define MC_CG_ENABLE (1 << 18)
+#define MC_LS_ENABLE (1 << 19)
+
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x0000f000
@@ -201,6 +559,17 @@
#define MC_SHARED_BLACKOUT_CNTL 0x20ac
+#define MC_HUB_MISC_HUB_CG 0x20b8
+#define MC_HUB_MISC_VM_CG 0x20bc
+
+#define MC_HUB_MISC_SIP_CG 0x20c0
+
+#define MC_XPB_CLK_GAT 0x2478
+
+#define MC_CITF_MISC_RD_CG 0x2648
+#define MC_CITF_MISC_WR_CG 0x264c
+#define MC_CITF_MISC_VM_CG 0x2650
+
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003
@@ -215,9 +584,37 @@
#define NOOFGROUPS_SHIFT 12
#define NOOFGROUPS_MASK 0x00001000
+#define MC_ARB_DRAM_TIMING 0x2774
+#define MC_ARB_DRAM_TIMING2 0x2778
+
+#define MC_ARB_BURST_TIME 0x2808
+#define STATE0(x) ((x) << 0)
+#define STATE0_MASK (0x1f << 0)
+#define STATE0_SHIFT 0
+#define STATE1(x) ((x) << 5)
+#define STATE1_MASK (0x1f << 5)
+#define STATE1_SHIFT 5
+#define STATE2(x) ((x) << 10)
+#define STATE2_MASK (0x1f << 10)
+#define STATE2_SHIFT 10
+#define STATE3(x) ((x) << 15)
+#define STATE3_MASK (0x1f << 15)
+#define STATE3_SHIFT 15
+
+#define MC_SEQ_RAS_TIMING 0x28a0
+#define MC_SEQ_CAS_TIMING 0x28a4
+#define MC_SEQ_MISC_TIMING 0x28a8
+#define MC_SEQ_MISC_TIMING2 0x28ac
+#define MC_SEQ_PMG_TIMING 0x28b0
+#define MC_SEQ_RD_CTL_D0 0x28b4
+#define MC_SEQ_RD_CTL_D1 0x28b8
+#define MC_SEQ_WR_CTL_D0 0x28bc
+#define MC_SEQ_WR_CTL_D1 0x28c0
+
#define MC_SEQ_SUP_CNTL 0x28c8
#define RUN_MASK (1 << 0)
#define MC_SEQ_SUP_PGM 0x28cc
+#define MC_PMG_AUTO_CMD 0x28d0
#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
#define TRAIN_DONE_D0 (1 << 30)
@@ -226,10 +623,92 @@
#define MC_IO_PAD_CNTL_D0 0x29d0
#define MEM_FALL_OUT_CMD (1 << 8)
+#define MC_SEQ_MISC0 0x2a00
+#define MC_SEQ_MISC0_VEN_ID_SHIFT 8
+#define MC_SEQ_MISC0_VEN_ID_MASK 0x00000f00
+#define MC_SEQ_MISC0_VEN_ID_VALUE 3
+#define MC_SEQ_MISC0_REV_ID_SHIFT 12
+#define MC_SEQ_MISC0_REV_ID_MASK 0x0000f000
+#define MC_SEQ_MISC0_REV_ID_VALUE 1
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+#define MC_SEQ_MISC1 0x2a04
+#define MC_SEQ_RESERVE_M 0x2a08
+#define MC_PMG_CMD_EMRS 0x2a0c
+
#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
#define MC_SEQ_IO_DEBUG_DATA 0x2a48
+#define MC_SEQ_MISC5 0x2a54
+#define MC_SEQ_MISC6 0x2a58
+
+#define MC_SEQ_MISC7 0x2a64
+
+#define MC_SEQ_RAS_TIMING_LP 0x2a6c
+#define MC_SEQ_CAS_TIMING_LP 0x2a70
+#define MC_SEQ_MISC_TIMING_LP 0x2a74
+#define MC_SEQ_MISC_TIMING2_LP 0x2a78
+#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
+#define MC_SEQ_WR_CTL_D1_LP 0x2a80
+#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
+#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
+
+#define MC_PMG_CMD_MRS 0x2aac
+
+#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
+#define MC_SEQ_RD_CTL_D1_LP 0x2b20
+
+#define MC_PMG_CMD_MRS1 0x2b44
+#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
+#define MC_SEQ_PMG_TIMING_LP 0x2b4c
+
+#define MC_SEQ_WR_CTL_2 0x2b54
+#define MC_SEQ_WR_CTL_2_LP 0x2b58
+#define MC_PMG_CMD_MRS2 0x2b5c
+#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
+
+#define MCLK_PWRMGT_CNTL 0x2ba0
+# define DLL_SPEED(x) ((x) << 0)
+# define DLL_SPEED_MASK (0x1f << 0)
+# define DLL_READY (1 << 6)
+# define MC_INT_CNTL (1 << 7)
+# define MRDCK0_PDNB (1 << 8)
+# define MRDCK1_PDNB (1 << 9)
+# define MRDCK0_RESET (1 << 16)
+# define MRDCK1_RESET (1 << 17)
+# define DLL_READY_READ (1 << 24)
+#define DLL_CNTL 0x2ba4
+# define MRDCK0_BYPASS (1 << 24)
+# define MRDCK1_BYPASS (1 << 25)
+
+#define MPLL_FUNC_CNTL 0x2bb4
+#define BWCTRL(x) ((x) << 20)
+#define BWCTRL_MASK (0xff << 20)
+#define MPLL_FUNC_CNTL_1 0x2bb8
+#define VCO_MODE(x) ((x) << 0)
+#define VCO_MODE_MASK (3 << 0)
+#define CLKFRAC(x) ((x) << 4)
+#define CLKFRAC_MASK (0xfff << 4)
+#define CLKF(x) ((x) << 16)
+#define CLKF_MASK (0xfff << 16)
+#define MPLL_FUNC_CNTL_2 0x2bbc
+#define MPLL_AD_FUNC_CNTL 0x2bc0
+#define YCLK_POST_DIV(x) ((x) << 0)
+#define YCLK_POST_DIV_MASK (7 << 0)
+#define MPLL_DQ_FUNC_CNTL 0x2bc4
+#define YCLK_SEL(x) ((x) << 4)
+#define YCLK_SEL_MASK (1 << 4)
+
+#define MPLL_SS1 0x2bcc
+#define CLKV(x) ((x) << 0)
+#define CLKV_MASK (0x3ffffff << 0)
+#define MPLL_SS2 0x2bd0
+#define CLKS(x) ((x) << 0)
+#define CLKS_MASK (0xfff << 0)
+
#define HDP_HOST_PATH_CNTL 0x2C00
+#define CLOCK_GATING_DIS (1 << 23)
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
@@ -237,6 +716,26 @@
#define HDP_ADDR_CONFIG 0x2F48
#define HDP_MISC_CNTL 0x2F4C
#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
+#define HDP_MEM_POWER_LS 0x2F50
+#define HDP_LS_ENABLE (1 << 0)
+
+#define ATC_MISC_CG 0x3350
+
+#define MC_SEQ_CNTL_3 0x3600
+# define CAC_EN (1 << 31)
+#define MC_SEQ_G5PDX_CTRL 0x3604
+#define MC_SEQ_G5PDX_CTRL_LP 0x3608
+#define MC_SEQ_G5PDX_CMD0 0x360c
+#define MC_SEQ_G5PDX_CMD0_LP 0x3610
+#define MC_SEQ_G5PDX_CMD1 0x3614
+#define MC_SEQ_G5PDX_CMD1_LP 0x3618
+
+#define MC_SEQ_PMG_DVS_CTL 0x3628
+#define MC_SEQ_PMG_DVS_CTL_LP 0x362c
+#define MC_SEQ_PMG_DVS_CMD 0x3630
+#define MC_SEQ_PMG_DVS_CMD_LP 0x3634
+#define MC_SEQ_DLL_STBY 0x3638
+#define MC_SEQ_DLL_STBY_LP 0x363c
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
@@ -265,6 +764,9 @@
# define MC_WR_CLEAN_CNT(x) ((x) << 20)
# define MC_VMID(x) ((x) << 25)
+#define BIF_LNCNT_RESET 0x5220
+# define RESET_LNCNT_EN (1 << 0)
+
#define CONFIG_MEMSIZE 0x5428
#define INTERRUPT_CNTL 0x5468
@@ -401,6 +903,9 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
+# define STUTTER_ENABLE (1 << 0)
+
#define GRBM_CNTL 0x8000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
@@ -504,6 +1009,9 @@
#define CP_RB0_RPTR 0x8700
#define CP_RB_WPTR_DELAY 0x8704
+#define CP_RB_WPTR_POLL_CNTL 0x8708
+#define IDLE_POLL_COUNT(x) ((x) << 16)
+#define IDLE_POLL_COUNT_MASK (0xffff << 16)
#define CP_MEQ_THRESHOLDS 0x8764
#define MEQ1_START(x) ((x) << 0)
@@ -730,6 +1238,9 @@
# define CP_RINGID1_INT_STAT (1 << 30)
# define CP_RINGID0_INT_STAT (1 << 31)
+#define CP_MEM_SLP_CNTL 0xC1E4
+# define CP_MEM_LS_EN (1 << 0)
+
#define CP_CPF_DEBUG 0xC200
#define CP_PQ_WPTR_POLL_CNTL 0xC20C
@@ -775,14 +1286,20 @@
#define RLC_MC_CNTL 0xC30C
+#define RLC_MEM_SLP_CNTL 0xC318
+# define RLC_MEM_LS_EN (1 << 0)
+
#define RLC_LB_CNTR_MAX 0xC348
#define RLC_LB_CNTL 0xC364
+# define LOAD_BALANCE_ENABLE (1 << 0)
#define RLC_LB_CNTR_INIT 0xC36C
#define RLC_SAVE_AND_RESTORE_BASE 0xC374
-#define RLC_DRIVER_DMA_STATUS 0xC378
+#define RLC_DRIVER_DMA_STATUS 0xC378 /* dGPU */
+#define RLC_CP_TABLE_RESTORE 0xC378 /* APU */
+#define RLC_PG_DELAY_2 0xC37C
#define RLC_GPM_UCODE_ADDR 0xC388
#define RLC_GPM_UCODE_DATA 0xC38C
@@ -791,12 +1308,52 @@
#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC398
#define RLC_UCODE_CNTL 0xC39C
+#define RLC_GPM_STAT 0xC400
+# define RLC_GPM_BUSY (1 << 0)
+# define GFX_POWER_STATUS (1 << 1)
+# define GFX_CLOCK_STATUS (1 << 2)
+
+#define RLC_PG_CNTL 0xC40C
+# define GFX_PG_ENABLE (1 << 0)
+# define GFX_PG_SRC (1 << 1)
+# define DYN_PER_CU_PG_ENABLE (1 << 2)
+# define STATIC_PER_CU_PG_ENABLE (1 << 3)
+# define DISABLE_GDS_PG (1 << 13)
+# define DISABLE_CP_PG (1 << 15)
+# define SMU_CLK_SLOWDOWN_ON_PU_ENABLE (1 << 17)
+# define SMU_CLK_SLOWDOWN_ON_PD_ENABLE (1 << 18)
+
+#define RLC_CGTT_MGCG_OVERRIDE 0xC420
#define RLC_CGCG_CGLS_CTRL 0xC424
+# define CGCG_EN (1 << 0)
+# define CGLS_EN (1 << 1)
+
+#define RLC_PG_DELAY 0xC434
#define RLC_LB_INIT_CU_MASK 0xC43C
#define RLC_LB_PARAMS 0xC444
+#define RLC_PG_AO_CU_MASK 0xC44C
+
+#define RLC_MAX_PG_CU 0xC450
+# define MAX_PU_CU(x) ((x) << 0)
+# define MAX_PU_CU_MASK (0xff << 0)
+#define RLC_AUTO_PG_CTRL 0xC454
+# define AUTO_PG_EN (1 << 0)
+# define GRBM_REG_SGIT(x) ((x) << 3)
+# define GRBM_REG_SGIT_MASK (0xffff << 3)
+
+#define RLC_SERDES_WR_CU_MASTER_MASK 0xC474
+#define RLC_SERDES_WR_NONCU_MASTER_MASK 0xC478
+#define RLC_SERDES_WR_CTRL 0xC47C
+#define BPM_ADDR(x) ((x) << 0)
+#define BPM_ADDR_MASK (0xff << 0)
+#define CGLS_ENABLE (1 << 16)
+#define CGCG_OVERRIDE_0 (1 << 20)
+#define MGCG_OVERRIDE_0 (1 << 22)
+#define MGCG_OVERRIDE_1 (1 << 23)
+
#define RLC_SERDES_CU_MASTER_BUSY 0xC484
#define RLC_SERDES_NONCU_MASTER_BUSY 0xC488
# define SE_MASTER_BUSY_MASK 0x0000ffff
@@ -807,6 +1364,13 @@
#define RLC_GPM_SCRATCH_ADDR 0xC4B0
#define RLC_GPM_SCRATCH_DATA 0xC4B4
+#define RLC_GPR_REG2 0xC4E8
+#define REQ 0x00000001
+#define MESSAGE(x) ((x) << 1)
+#define MESSAGE_MASK 0x0000001e
+#define MSG_ENTER_RLC_SAFE_MODE 1
+#define MSG_EXIT_RLC_SAFE_MODE 0
+
#define CP_HPD_EOP_BASE_ADDR 0xC904
#define CP_HPD_EOP_BASE_ADDR_HI 0xC908
#define CP_HPD_EOP_VMID 0xC90C
@@ -851,6 +1415,8 @@
#define MQD_VMID(x) ((x) << 0)
#define MQD_VMID_MASK (0xf << 0)
+#define DB_RENDER_CONTROL 0x28000
+
#define PA_SC_RASTER_CONFIG 0x28350
# define RASTER_CONFIG_RB_MAP_0 0
# define RASTER_CONFIG_RB_MAP_1 1
@@ -944,6 +1510,16 @@
#define CP_PERFMON_CNTL 0x36020
+#define CGTS_SM_CTRL_REG 0x3c000
+#define SM_MODE(x) ((x) << 17)
+#define SM_MODE_MASK (0x7 << 17)
+#define SM_MODE_ENABLE (1 << 20)
+#define CGTS_OVERRIDE (1 << 21)
+#define CGTS_LS_OVERRIDE (1 << 22)
+#define ON_MONITOR_ADD_EN (1 << 23)
+#define ON_MONITOR_ADD(x) ((x) << 24)
+#define ON_MONITOR_ADD_MASK (0xff << 24)
+
#define CGTS_TCC_DISABLE 0x3c00c
#define CGTS_USER_TCC_DISABLE 0x3c010
#define TCC_DISABLE_MASK 0xFFFF0000
@@ -1176,6 +1752,8 @@
#define SDMA0_UCODE_ADDR 0xD000
#define SDMA0_UCODE_DATA 0xD004
+#define SDMA0_POWER_CNTL 0xD008
+#define SDMA0_CLK_CTRL 0xD00C
#define SDMA0_CNTL 0xD010
# define TRAP_ENABLE (1 << 0)
@@ -1300,6 +1878,13 @@
#define UVD_RBC_RB_RPTR 0xf690
#define UVD_RBC_RB_WPTR 0xf694
+#define UVD_CGC_CTRL 0xF4B0
+# define DCM (1 << 0)
+# define CG_DT(x) ((x) << 2)
+# define CG_DT_MASK (0xf << 2)
+# define CLK_OD(x) ((x) << 6)
+# define CLK_OD_MASK (0x1f << 6)
+
/* UVD clocks */
#define CG_DCLK_CNTL 0xC050009C
@@ -1310,4 +1895,7 @@
#define CG_VCLK_CNTL 0xC05000A4
#define CG_VCLK_STATUS 0xC05000A8
+/* UVD CTX indirect */
+#define UVD_CGC_MEM_CTRL 0xC0
+
#endif
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
index c00339440c5e..aa908c55a513 100644
--- a/drivers/gpu/drm/radeon/clearstate_cayman.h
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -1073,7 +1073,7 @@ static const struct cs_extent_def SECT_CTRLCONST_defs[] =
{SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
{ 0, 0, 0 }
};
-struct cs_section_def cayman_cs_data[] = {
+static const struct cs_section_def cayman_cs_data[] = {
{ SECT_CONTEXT_defs, SECT_CONTEXT },
{ SECT_CLEAR_defs, SECT_CLEAR },
{ SECT_CTRLCONST_defs, SECT_CTRLCONST },
diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h
new file mode 100644
index 000000000000..c3982f9475fb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_ci.h
@@ -0,0 +1,944 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const unsigned int ci_SECT_CONTEXT_def_1[] =
+{
+ 0x00000000, // DB_RENDER_CONTROL
+ 0x00000000, // DB_COUNT_CONTROL
+ 0x00000000, // DB_DEPTH_VIEW
+ 0x00000000, // DB_RENDER_OVERRIDE
+ 0x00000000, // DB_RENDER_OVERRIDE2
+ 0x00000000, // DB_HTILE_DATA_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // DB_DEPTH_BOUNDS_MIN
+ 0x00000000, // DB_DEPTH_BOUNDS_MAX
+ 0x00000000, // DB_STENCIL_CLEAR
+ 0x00000000, // DB_DEPTH_CLEAR
+ 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+ 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+ 0, // HOLE
+ 0x00000000, // DB_DEPTH_INFO
+ 0x00000000, // DB_Z_INFO
+ 0x00000000, // DB_STENCIL_INFO
+ 0x00000000, // DB_Z_READ_BASE
+ 0x00000000, // DB_STENCIL_READ_BASE
+ 0x00000000, // DB_Z_WRITE_BASE
+ 0x00000000, // DB_STENCIL_WRITE_BASE
+ 0x00000000, // DB_DEPTH_SIZE
+ 0x00000000, // DB_DEPTH_SLICE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // TA_BC_BASE_ADDR
+ 0x00000000, // TA_BC_BASE_ADDR_HI
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // COHER_DEST_BASE_HI_0
+ 0x00000000, // COHER_DEST_BASE_HI_1
+ 0x00000000, // COHER_DEST_BASE_HI_2
+ 0x00000000, // COHER_DEST_BASE_HI_3
+ 0x00000000, // COHER_DEST_BASE_2
+ 0x00000000, // COHER_DEST_BASE_3
+ 0x00000000, // PA_SC_WINDOW_OFFSET
+ 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+ 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+ 0x0000ffff, // PA_SC_CLIPRECT_RULE
+ 0x00000000, // PA_SC_CLIPRECT_0_TL
+ 0x40004000, // PA_SC_CLIPRECT_0_BR
+ 0x00000000, // PA_SC_CLIPRECT_1_TL
+ 0x40004000, // PA_SC_CLIPRECT_1_BR
+ 0x00000000, // PA_SC_CLIPRECT_2_TL
+ 0x40004000, // PA_SC_CLIPRECT_2_BR
+ 0x00000000, // PA_SC_CLIPRECT_3_TL
+ 0x40004000, // PA_SC_CLIPRECT_3_BR
+ 0xaa99aaaa, // PA_SC_EDGERULE
+ 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+ 0xffffffff, // CB_TARGET_MASK
+ 0xffffffff, // CB_SHADER_MASK
+ 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+ 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+ 0x00000000, // COHER_DEST_BASE_0
+ 0x00000000, // COHER_DEST_BASE_1
+ 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+ 0x00000000, // PA_SC_VPORT_ZMIN_0
+ 0x3f800000, // PA_SC_VPORT_ZMAX_0
+ 0x00000000, // PA_SC_VPORT_ZMIN_1
+ 0x3f800000, // PA_SC_VPORT_ZMAX_1
+ 0x00000000, // PA_SC_VPORT_ZMIN_2
+ 0x3f800000, // PA_SC_VPORT_ZMAX_2
+ 0x00000000, // PA_SC_VPORT_ZMIN_3
+ 0x3f800000, // PA_SC_VPORT_ZMAX_3
+ 0x00000000, // PA_SC_VPORT_ZMIN_4
+ 0x3f800000, // PA_SC_VPORT_ZMAX_4
+ 0x00000000, // PA_SC_VPORT_ZMIN_5
+ 0x3f800000, // PA_SC_VPORT_ZMAX_5
+ 0x00000000, // PA_SC_VPORT_ZMIN_6
+ 0x3f800000, // PA_SC_VPORT_ZMAX_6
+ 0x00000000, // PA_SC_VPORT_ZMIN_7
+ 0x3f800000, // PA_SC_VPORT_ZMAX_7
+ 0x00000000, // PA_SC_VPORT_ZMIN_8
+ 0x3f800000, // PA_SC_VPORT_ZMAX_8
+ 0x00000000, // PA_SC_VPORT_ZMIN_9
+ 0x3f800000, // PA_SC_VPORT_ZMAX_9
+ 0x00000000, // PA_SC_VPORT_ZMIN_10
+ 0x3f800000, // PA_SC_VPORT_ZMAX_10
+ 0x00000000, // PA_SC_VPORT_ZMIN_11
+ 0x3f800000, // PA_SC_VPORT_ZMAX_11
+ 0x00000000, // PA_SC_VPORT_ZMIN_12
+ 0x3f800000, // PA_SC_VPORT_ZMAX_12
+ 0x00000000, // PA_SC_VPORT_ZMIN_13
+ 0x3f800000, // PA_SC_VPORT_ZMAX_13
+ 0x00000000, // PA_SC_VPORT_ZMIN_14
+ 0x3f800000, // PA_SC_VPORT_ZMAX_14
+ 0x00000000, // PA_SC_VPORT_ZMIN_15
+ 0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const unsigned int ci_SECT_CONTEXT_def_2[] =
+{
+ 0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
+ 0, // HOLE
+ 0x00000000, // CP_PERFMON_CNTX_CNTL
+ 0x00000000, // CP_RINGID
+ 0x00000000, // CP_VMID
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0xffffffff, // VGT_MAX_VTX_INDX
+ 0x00000000, // VGT_MIN_VTX_INDX
+ 0x00000000, // VGT_INDX_OFFSET
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+ 0, // HOLE
+ 0x00000000, // CB_BLEND_RED
+ 0x00000000, // CB_BLEND_GREEN
+ 0x00000000, // CB_BLEND_BLUE
+ 0x00000000, // CB_BLEND_ALPHA
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // DB_STENCIL_CONTROL
+ 0x00000000, // DB_STENCILREFMASK
+ 0x00000000, // DB_STENCILREFMASK_BF
+ 0, // HOLE
+ 0x00000000, // PA_CL_VPORT_XSCALE
+ 0x00000000, // PA_CL_VPORT_XOFFSET
+ 0x00000000, // PA_CL_VPORT_YSCALE
+ 0x00000000, // PA_CL_VPORT_YOFFSET
+ 0x00000000, // PA_CL_VPORT_ZSCALE
+ 0x00000000, // PA_CL_VPORT_ZOFFSET
+ 0x00000000, // PA_CL_VPORT_XSCALE_1
+ 0x00000000, // PA_CL_VPORT_XOFFSET_1
+ 0x00000000, // PA_CL_VPORT_YSCALE_1
+ 0x00000000, // PA_CL_VPORT_YOFFSET_1
+ 0x00000000, // PA_CL_VPORT_ZSCALE_1
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_1
+ 0x00000000, // PA_CL_VPORT_XSCALE_2
+ 0x00000000, // PA_CL_VPORT_XOFFSET_2
+ 0x00000000, // PA_CL_VPORT_YSCALE_2
+ 0x00000000, // PA_CL_VPORT_YOFFSET_2
+ 0x00000000, // PA_CL_VPORT_ZSCALE_2
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_2
+ 0x00000000, // PA_CL_VPORT_XSCALE_3
+ 0x00000000, // PA_CL_VPORT_XOFFSET_3
+ 0x00000000, // PA_CL_VPORT_YSCALE_3
+ 0x00000000, // PA_CL_VPORT_YOFFSET_3
+ 0x00000000, // PA_CL_VPORT_ZSCALE_3
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_3
+ 0x00000000, // PA_CL_VPORT_XSCALE_4
+ 0x00000000, // PA_CL_VPORT_XOFFSET_4
+ 0x00000000, // PA_CL_VPORT_YSCALE_4
+ 0x00000000, // PA_CL_VPORT_YOFFSET_4
+ 0x00000000, // PA_CL_VPORT_ZSCALE_4
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_4
+ 0x00000000, // PA_CL_VPORT_XSCALE_5
+ 0x00000000, // PA_CL_VPORT_XOFFSET_5
+ 0x00000000, // PA_CL_VPORT_YSCALE_5
+ 0x00000000, // PA_CL_VPORT_YOFFSET_5
+ 0x00000000, // PA_CL_VPORT_ZSCALE_5
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_5
+ 0x00000000, // PA_CL_VPORT_XSCALE_6
+ 0x00000000, // PA_CL_VPORT_XOFFSET_6
+ 0x00000000, // PA_CL_VPORT_YSCALE_6
+ 0x00000000, // PA_CL_VPORT_YOFFSET_6
+ 0x00000000, // PA_CL_VPORT_ZSCALE_6
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_6
+ 0x00000000, // PA_CL_VPORT_XSCALE_7
+ 0x00000000, // PA_CL_VPORT_XOFFSET_7
+ 0x00000000, // PA_CL_VPORT_YSCALE_7
+ 0x00000000, // PA_CL_VPORT_YOFFSET_7
+ 0x00000000, // PA_CL_VPORT_ZSCALE_7
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_7
+ 0x00000000, // PA_CL_VPORT_XSCALE_8
+ 0x00000000, // PA_CL_VPORT_XOFFSET_8
+ 0x00000000, // PA_CL_VPORT_YSCALE_8
+ 0x00000000, // PA_CL_VPORT_YOFFSET_8
+ 0x00000000, // PA_CL_VPORT_ZSCALE_8
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_8
+ 0x00000000, // PA_CL_VPORT_XSCALE_9
+ 0x00000000, // PA_CL_VPORT_XOFFSET_9
+ 0x00000000, // PA_CL_VPORT_YSCALE_9
+ 0x00000000, // PA_CL_VPORT_YOFFSET_9
+ 0x00000000, // PA_CL_VPORT_ZSCALE_9
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_9
+ 0x00000000, // PA_CL_VPORT_XSCALE_10
+ 0x00000000, // PA_CL_VPORT_XOFFSET_10
+ 0x00000000, // PA_CL_VPORT_YSCALE_10
+ 0x00000000, // PA_CL_VPORT_YOFFSET_10
+ 0x00000000, // PA_CL_VPORT_ZSCALE_10
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_10
+ 0x00000000, // PA_CL_VPORT_XSCALE_11
+ 0x00000000, // PA_CL_VPORT_XOFFSET_11
+ 0x00000000, // PA_CL_VPORT_YSCALE_11
+ 0x00000000, // PA_CL_VPORT_YOFFSET_11
+ 0x00000000, // PA_CL_VPORT_ZSCALE_11
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_11
+ 0x00000000, // PA_CL_VPORT_XSCALE_12
+ 0x00000000, // PA_CL_VPORT_XOFFSET_12
+ 0x00000000, // PA_CL_VPORT_YSCALE_12
+ 0x00000000, // PA_CL_VPORT_YOFFSET_12
+ 0x00000000, // PA_CL_VPORT_ZSCALE_12
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_12
+ 0x00000000, // PA_CL_VPORT_XSCALE_13
+ 0x00000000, // PA_CL_VPORT_XOFFSET_13
+ 0x00000000, // PA_CL_VPORT_YSCALE_13
+ 0x00000000, // PA_CL_VPORT_YOFFSET_13
+ 0x00000000, // PA_CL_VPORT_ZSCALE_13
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_13
+ 0x00000000, // PA_CL_VPORT_XSCALE_14
+ 0x00000000, // PA_CL_VPORT_XOFFSET_14
+ 0x00000000, // PA_CL_VPORT_YSCALE_14
+ 0x00000000, // PA_CL_VPORT_YOFFSET_14
+ 0x00000000, // PA_CL_VPORT_ZSCALE_14
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_14
+ 0x00000000, // PA_CL_VPORT_XSCALE_15
+ 0x00000000, // PA_CL_VPORT_XOFFSET_15
+ 0x00000000, // PA_CL_VPORT_YSCALE_15
+ 0x00000000, // PA_CL_VPORT_YOFFSET_15
+ 0x00000000, // PA_CL_VPORT_ZSCALE_15
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_15
+ 0x00000000, // PA_CL_UCP_0_X
+ 0x00000000, // PA_CL_UCP_0_Y
+ 0x00000000, // PA_CL_UCP_0_Z
+ 0x00000000, // PA_CL_UCP_0_W
+ 0x00000000, // PA_CL_UCP_1_X
+ 0x00000000, // PA_CL_UCP_1_Y
+ 0x00000000, // PA_CL_UCP_1_Z
+ 0x00000000, // PA_CL_UCP_1_W
+ 0x00000000, // PA_CL_UCP_2_X
+ 0x00000000, // PA_CL_UCP_2_Y
+ 0x00000000, // PA_CL_UCP_2_Z
+ 0x00000000, // PA_CL_UCP_2_W
+ 0x00000000, // PA_CL_UCP_3_X
+ 0x00000000, // PA_CL_UCP_3_Y
+ 0x00000000, // PA_CL_UCP_3_Z
+ 0x00000000, // PA_CL_UCP_3_W
+ 0x00000000, // PA_CL_UCP_4_X
+ 0x00000000, // PA_CL_UCP_4_Y
+ 0x00000000, // PA_CL_UCP_4_Z
+ 0x00000000, // PA_CL_UCP_4_W
+ 0x00000000, // PA_CL_UCP_5_X
+ 0x00000000, // PA_CL_UCP_5_Y
+ 0x00000000, // PA_CL_UCP_5_Z
+ 0x00000000, // PA_CL_UCP_5_W
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_CNTL_0
+ 0x00000000, // SPI_PS_INPUT_CNTL_1
+ 0x00000000, // SPI_PS_INPUT_CNTL_2
+ 0x00000000, // SPI_PS_INPUT_CNTL_3
+ 0x00000000, // SPI_PS_INPUT_CNTL_4
+ 0x00000000, // SPI_PS_INPUT_CNTL_5
+ 0x00000000, // SPI_PS_INPUT_CNTL_6
+ 0x00000000, // SPI_PS_INPUT_CNTL_7
+ 0x00000000, // SPI_PS_INPUT_CNTL_8
+ 0x00000000, // SPI_PS_INPUT_CNTL_9
+ 0x00000000, // SPI_PS_INPUT_CNTL_10
+ 0x00000000, // SPI_PS_INPUT_CNTL_11
+ 0x00000000, // SPI_PS_INPUT_CNTL_12
+ 0x00000000, // SPI_PS_INPUT_CNTL_13
+ 0x00000000, // SPI_PS_INPUT_CNTL_14
+ 0x00000000, // SPI_PS_INPUT_CNTL_15
+ 0x00000000, // SPI_PS_INPUT_CNTL_16
+ 0x00000000, // SPI_PS_INPUT_CNTL_17
+ 0x00000000, // SPI_PS_INPUT_CNTL_18
+ 0x00000000, // SPI_PS_INPUT_CNTL_19
+ 0x00000000, // SPI_PS_INPUT_CNTL_20
+ 0x00000000, // SPI_PS_INPUT_CNTL_21
+ 0x00000000, // SPI_PS_INPUT_CNTL_22
+ 0x00000000, // SPI_PS_INPUT_CNTL_23
+ 0x00000000, // SPI_PS_INPUT_CNTL_24
+ 0x00000000, // SPI_PS_INPUT_CNTL_25
+ 0x00000000, // SPI_PS_INPUT_CNTL_26
+ 0x00000000, // SPI_PS_INPUT_CNTL_27
+ 0x00000000, // SPI_PS_INPUT_CNTL_28
+ 0x00000000, // SPI_PS_INPUT_CNTL_29
+ 0x00000000, // SPI_PS_INPUT_CNTL_30
+ 0x00000000, // SPI_PS_INPUT_CNTL_31
+ 0x00000000, // SPI_VS_OUT_CONFIG
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_ENA
+ 0x00000000, // SPI_PS_INPUT_ADDR
+ 0x00000000, // SPI_INTERP_CONTROL_0
+ 0x00000002, // SPI_PS_IN_CONTROL
+ 0, // HOLE
+ 0x00000000, // SPI_BARYC_CNTL
+ 0, // HOLE
+ 0x00000000, // SPI_TMPRING_SIZE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_SHADER_POS_FORMAT
+ 0x00000000, // SPI_SHADER_Z_FORMAT
+ 0x00000000, // SPI_SHADER_COL_FORMAT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_BLEND0_CONTROL
+ 0x00000000, // CB_BLEND1_CONTROL
+ 0x00000000, // CB_BLEND2_CONTROL
+ 0x00000000, // CB_BLEND3_CONTROL
+ 0x00000000, // CB_BLEND4_CONTROL
+ 0x00000000, // CB_BLEND5_CONTROL
+ 0x00000000, // CB_BLEND6_CONTROL
+ 0x00000000, // CB_BLEND7_CONTROL
+};
+static const unsigned int ci_SECT_CONTEXT_def_3[] =
+{
+ 0x00000000, // PA_CL_POINT_X_RAD
+ 0x00000000, // PA_CL_POINT_Y_RAD
+ 0x00000000, // PA_CL_POINT_SIZE
+ 0x00000000, // PA_CL_POINT_CULL_RAD
+ 0x00000000, // VGT_DMA_BASE_HI
+ 0x00000000, // VGT_DMA_BASE
+};
+static const unsigned int ci_SECT_CONTEXT_def_4[] =
+{
+ 0x00000000, // DB_DEPTH_CONTROL
+ 0x00000000, // DB_EQAA
+ 0x00000000, // CB_COLOR_CONTROL
+ 0x00000000, // DB_SHADER_CONTROL
+ 0x00090000, // PA_CL_CLIP_CNTL
+ 0x00000004, // PA_SU_SC_MODE_CNTL
+ 0x00000000, // PA_CL_VTE_CNTL
+ 0x00000000, // PA_CL_VS_OUT_CNTL
+ 0x00000000, // PA_CL_NANINF_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+ 0x00000000, // PA_SU_PRIM_FILTER_CNTL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SU_POINT_SIZE
+ 0x00000000, // PA_SU_POINT_MINMAX
+ 0x00000000, // PA_SU_LINE_CNTL
+ 0x00000000, // PA_SC_LINE_STIPPLE
+ 0x00000000, // VGT_OUTPUT_PATH_CNTL
+ 0x00000000, // VGT_HOS_CNTL
+ 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+ 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+ 0x00000000, // VGT_HOS_REUSE_DEPTH
+ 0x00000000, // VGT_GROUP_PRIM_TYPE
+ 0x00000000, // VGT_GROUP_FIRST_DECR
+ 0x00000000, // VGT_GROUP_DECR
+ 0x00000000, // VGT_GROUP_VECT_0_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_CNTL
+ 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+ 0x00000000, // VGT_GS_MODE
+ 0x00000000, // VGT_GS_ONCHIP_CNTL
+ 0x00000000, // PA_SC_MODE_CNTL_0
+ 0x00000000, // PA_SC_MODE_CNTL_1
+ 0x00000000, // VGT_ENHANCE
+ 0x00000100, // VGT_GS_PER_ES
+ 0x00000080, // VGT_ES_PER_GS
+ 0x00000002, // VGT_GS_PER_VS
+ 0x00000000, // VGT_GSVS_RING_OFFSET_1
+ 0x00000000, // VGT_GSVS_RING_OFFSET_2
+ 0x00000000, // VGT_GSVS_RING_OFFSET_3
+ 0x00000000, // VGT_GS_OUT_PRIM_TYPE
+ 0x00000000, // IA_ENHANCE
+};
+static const unsigned int ci_SECT_CONTEXT_def_5[] =
+{
+ 0x00000000, // WD_ENHANCE
+ 0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const unsigned int ci_SECT_CONTEXT_def_6[] =
+{
+ 0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const unsigned int ci_SECT_CONTEXT_def_7[] =
+{
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_0
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_1
+ 0x000000ff, // IA_MULTI_VGT_PARAM
+ 0x00000000, // VGT_ESGS_RING_ITEMSIZE
+ 0x00000000, // VGT_GSVS_RING_ITEMSIZE
+ 0x00000000, // VGT_REUSE_OFF
+ 0x00000000, // VGT_VTX_CNT_EN
+ 0x00000000, // DB_HTILE_SURFACE
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE0
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE1
+ 0x00000000, // DB_PRELOAD_CONTROL
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+ 0, // HOLE
+ 0x00000000, // VGT_GS_MAX_VERT_OUT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_SHADER_STAGES_EN
+ 0x00000000, // VGT_LS_HS_CONFIG
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+ 0x00000000, // VGT_TF_PARAM
+ 0x00000000, // DB_ALPHA_TO_MASK
+ 0, // HOLE
+ 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+ 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+ 0x00000000, // VGT_GS_INSTANCE_CNT
+ 0x00000000, // VGT_STRMOUT_CONFIG
+ 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_0
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_1
+ 0x00001000, // PA_SC_LINE_CNTL
+ 0x00000000, // PA_SC_AA_CONFIG
+ 0x00000005, // PA_SU_VTX_CNTL
+ 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+ 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+ 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+ 0x00000010, // VGT_OUT_DEALLOC_CNTL
+ 0x00000000, // CB_COLOR0_BASE
+ 0x00000000, // CB_COLOR0_PITCH
+ 0x00000000, // CB_COLOR0_SLICE
+ 0x00000000, // CB_COLOR0_VIEW
+ 0x00000000, // CB_COLOR0_INFO
+ 0x00000000, // CB_COLOR0_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR0_CMASK
+ 0x00000000, // CB_COLOR0_CMASK_SLICE
+ 0x00000000, // CB_COLOR0_FMASK
+ 0x00000000, // CB_COLOR0_FMASK_SLICE
+ 0x00000000, // CB_COLOR0_CLEAR_WORD0
+ 0x00000000, // CB_COLOR0_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_BASE
+ 0x00000000, // CB_COLOR1_PITCH
+ 0x00000000, // CB_COLOR1_SLICE
+ 0x00000000, // CB_COLOR1_VIEW
+ 0x00000000, // CB_COLOR1_INFO
+ 0x00000000, // CB_COLOR1_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_CMASK
+ 0x00000000, // CB_COLOR1_CMASK_SLICE
+ 0x00000000, // CB_COLOR1_FMASK
+ 0x00000000, // CB_COLOR1_FMASK_SLICE
+ 0x00000000, // CB_COLOR1_CLEAR_WORD0
+ 0x00000000, // CB_COLOR1_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_BASE
+ 0x00000000, // CB_COLOR2_PITCH
+ 0x00000000, // CB_COLOR2_SLICE
+ 0x00000000, // CB_COLOR2_VIEW
+ 0x00000000, // CB_COLOR2_INFO
+ 0x00000000, // CB_COLOR2_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_CMASK
+ 0x00000000, // CB_COLOR2_CMASK_SLICE
+ 0x00000000, // CB_COLOR2_FMASK
+ 0x00000000, // CB_COLOR2_FMASK_SLICE
+ 0x00000000, // CB_COLOR2_CLEAR_WORD0
+ 0x00000000, // CB_COLOR2_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_BASE
+ 0x00000000, // CB_COLOR3_PITCH
+ 0x00000000, // CB_COLOR3_SLICE
+ 0x00000000, // CB_COLOR3_VIEW
+ 0x00000000, // CB_COLOR3_INFO
+ 0x00000000, // CB_COLOR3_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_CMASK
+ 0x00000000, // CB_COLOR3_CMASK_SLICE
+ 0x00000000, // CB_COLOR3_FMASK
+ 0x00000000, // CB_COLOR3_FMASK_SLICE
+ 0x00000000, // CB_COLOR3_CLEAR_WORD0
+ 0x00000000, // CB_COLOR3_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_BASE
+ 0x00000000, // CB_COLOR4_PITCH
+ 0x00000000, // CB_COLOR4_SLICE
+ 0x00000000, // CB_COLOR4_VIEW
+ 0x00000000, // CB_COLOR4_INFO
+ 0x00000000, // CB_COLOR4_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_CMASK
+ 0x00000000, // CB_COLOR4_CMASK_SLICE
+ 0x00000000, // CB_COLOR4_FMASK
+ 0x00000000, // CB_COLOR4_FMASK_SLICE
+ 0x00000000, // CB_COLOR4_CLEAR_WORD0
+ 0x00000000, // CB_COLOR4_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_BASE
+ 0x00000000, // CB_COLOR5_PITCH
+ 0x00000000, // CB_COLOR5_SLICE
+ 0x00000000, // CB_COLOR5_VIEW
+ 0x00000000, // CB_COLOR5_INFO
+ 0x00000000, // CB_COLOR5_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_CMASK
+ 0x00000000, // CB_COLOR5_CMASK_SLICE
+ 0x00000000, // CB_COLOR5_FMASK
+ 0x00000000, // CB_COLOR5_FMASK_SLICE
+ 0x00000000, // CB_COLOR5_CLEAR_WORD0
+ 0x00000000, // CB_COLOR5_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_BASE
+ 0x00000000, // CB_COLOR6_PITCH
+ 0x00000000, // CB_COLOR6_SLICE
+ 0x00000000, // CB_COLOR6_VIEW
+ 0x00000000, // CB_COLOR6_INFO
+ 0x00000000, // CB_COLOR6_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_CMASK
+ 0x00000000, // CB_COLOR6_CMASK_SLICE
+ 0x00000000, // CB_COLOR6_FMASK
+ 0x00000000, // CB_COLOR6_FMASK_SLICE
+ 0x00000000, // CB_COLOR6_CLEAR_WORD0
+ 0x00000000, // CB_COLOR6_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_BASE
+ 0x00000000, // CB_COLOR7_PITCH
+ 0x00000000, // CB_COLOR7_SLICE
+ 0x00000000, // CB_COLOR7_VIEW
+ 0x00000000, // CB_COLOR7_INFO
+ 0x00000000, // CB_COLOR7_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_CMASK
+ 0x00000000, // CB_COLOR7_CMASK_SLICE
+ 0x00000000, // CB_COLOR7_FMASK
+ 0x00000000, // CB_COLOR7_FMASK_SLICE
+ 0x00000000, // CB_COLOR7_CLEAR_WORD0
+ 0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def ci_SECT_CONTEXT_defs[] =
+{
+ {ci_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+ {ci_SECT_CONTEXT_def_2, 0x0000a0d6, 274 },
+ {ci_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+ {ci_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+ {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
+ {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+ {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+ { 0, 0, 0 }
+};
+static const struct cs_section_def ci_cs_data[] = {
+ { ci_SECT_CONTEXT_defs, SECT_CONTEXT },
+ { 0, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/radeon/clearstate_evergreen.h b/drivers/gpu/drm/radeon/clearstate_evergreen.h
index 4791d856b7fd..63a1ffbb3ced 100644
--- a/drivers/gpu/drm/radeon/clearstate_evergreen.h
+++ b/drivers/gpu/drm/radeon/clearstate_evergreen.h
@@ -1072,7 +1072,7 @@ static const struct cs_extent_def SECT_CTRLCONST_defs[] =
{SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
{ 0, 0, 0 }
};
-struct cs_section_def evergreen_cs_data[] = {
+static const struct cs_section_def evergreen_cs_data[] = {
{ SECT_CONTEXT_defs, SECT_CONTEXT },
{ SECT_CLEAR_defs, SECT_CLEAR },
{ SECT_CTRLCONST_defs, SECT_CTRLCONST },
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 9bcdd174780f..95a66db08d9b 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2038,9 +2038,6 @@ int cypress_dpm_init(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- uint16_t data_offset, size;
- uint8_t frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -2092,16 +2089,7 @@ int cypress_dpm_init(struct radeon_device *rdev)
eg_pi->vddci_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -2122,8 +2110,7 @@ int cypress_dpm_init(struct radeon_device *rdev)
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
@@ -2179,7 +2166,8 @@ bool cypress_dpm_vblank_too_short(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
- u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+ /* we never hit the non-gddr5 limit so disable it */
+ u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
if (vblank_time < switch_limit)
return true;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
new file mode 100644
index 000000000000..8953255e894b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "sid.h"
+
+static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
+ u32 block_offset, u32 reg)
+{
+ u32 r;
+
+ WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+ r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
+ return r;
+}
+
+static void dce6_endpoint_wreg(struct radeon_device *rdev,
+ u32 block_offset, u32 reg, u32 v)
+{
+ if (ASIC_IS_DCE8(rdev))
+ WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+ else
+ WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
+ AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
+ WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
+}
+
+#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
+#define WREG32_ENDPOINT(block, reg, v) dce6_endpoint_wreg(rdev, (block), (reg), (v))
+
+
+static void dce6_afmt_get_connected_pins(struct radeon_device *rdev)
+{
+ int i;
+ u32 offset, tmp;
+
+ for (i = 0; i < rdev->audio.num_pins; i++) {
+ offset = rdev->audio.pin[i].offset;
+ tmp = RREG32_ENDPOINT(offset,
+ AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+ if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
+ rdev->audio.pin[i].connected = false;
+ else
+ rdev->audio.pin[i].connected = true;
+ }
+}
+
+struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
+{
+ int i;
+
+ dce6_afmt_get_connected_pins(rdev);
+
+ for (i = 0; i < rdev->audio.num_pins; i++) {
+ if (rdev->audio.pin[i].connected)
+ return &rdev->audio.pin[i];
+ }
+ DRM_ERROR("No connected audio pins found!\n");
+ return NULL;
+}
+
+void dce6_afmt_select_pin(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u32 offset = dig->afmt->offset;
+ u32 id = dig->afmt->pin->id;
+
+ if (!dig->afmt->pin)
+ return;
+
+ WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id));
+}
+
+void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 offset, tmp;
+ u8 *sadb;
+ int sad_count;
+
+ if (!dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ return;
+ }
+
+ /* program the speaker allocation */
+ tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set HDMI mode */
+ tmp |= HDMI_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+
+ kfree(sadb);
+}
+
+void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u32 offset;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ struct cea_sad *sads;
+ int i, sad_count;
+
+ static const u16 eld_reg_to_type[][2] = {
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ };
+
+ if (!dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+ BUG_ON(!sads);
+
+ for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ u32 value = 0;
+ int j;
+
+ for (j = 0; j < sad_count; j++) {
+ struct cea_sad *sad = &sads[j];
+
+ if (sad->format == eld_reg_to_type[i][1]) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+ value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
+ break;
+ }
+ }
+ WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
+ }
+
+ kfree(sads);
+}
+
+static int dce6_audio_chipset_supported(struct radeon_device *rdev)
+{
+ return !ASIC_IS_NODCE(rdev);
+}
+
+static void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
+{
+ WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
+ AUDIO_ENABLED);
+ DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
+}
+
+static const u32 pin_offsets[7] =
+{
+ (0x5e00 - 0x5e00),
+ (0x5e18 - 0x5e00),
+ (0x5e30 - 0x5e00),
+ (0x5e48 - 0x5e00),
+ (0x5e60 - 0x5e00),
+ (0x5e78 - 0x5e00),
+ (0x5e90 - 0x5e00),
+};
+
+int dce6_audio_init(struct radeon_device *rdev)
+{
+ int i;
+
+ if (!radeon_audio || !dce6_audio_chipset_supported(rdev))
+ return 0;
+
+ rdev->audio.enabled = true;
+
+ if (ASIC_IS_DCE8(rdev))
+ rdev->audio.num_pins = 7;
+ else
+ rdev->audio.num_pins = 6;
+
+ for (i = 0; i < rdev->audio.num_pins; i++) {
+ rdev->audio.pin[i].channels = -1;
+ rdev->audio.pin[i].rate = -1;
+ rdev->audio.pin[i].bits_per_sample = -1;
+ rdev->audio.pin[i].status_bits = 0;
+ rdev->audio.pin[i].category_code = 0;
+ rdev->audio.pin[i].connected = false;
+ rdev->audio.pin[i].offset = pin_offsets[i];
+ rdev->audio.pin[i].id = i;
+ dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
+ }
+
+ return 0;
+}
+
+void dce6_audio_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ if (!rdev->audio.enabled)
+ return;
+
+ for (i = 0; i < rdev->audio.num_pins; i++)
+ dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
+
+ rdev->audio.enabled = false;
+}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b67c9ec7f690..555164e270a7 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -47,7 +47,7 @@ static const u32 crtc_offsets[6] =
#include "clearstate_evergreen.h"
-static u32 sumo_rlc_save_restore_register_list[] =
+static const u32 sumo_rlc_save_restore_register_list[] =
{
0x98fc,
0x9830,
@@ -131,7 +131,6 @@ static u32 sumo_rlc_save_restore_register_list[] =
0x9150,
0x802c,
};
-static u32 sumo_rlc_save_restore_register_list_size = ARRAY_SIZE(sumo_rlc_save_restore_register_list);
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
@@ -141,6 +140,12 @@ extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
extern void cayman_vm_decode_fault(struct radeon_device *rdev,
u32 status, u32 addr);
+void cik_init_cp_pg_table(struct radeon_device *rdev);
+
+extern u32 si_get_csb_size(struct radeon_device *rdev);
+extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
+extern u32 cik_get_csb_size(struct radeon_device *rdev);
+extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
static const u32 evergreen_golden_registers[] =
{
@@ -1807,7 +1812,8 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
- u32 tmp;
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
@@ -1830,18 +1836,34 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
* non-linked crtcs for maximum line buffer allocation.
*/
if (radeon_crtc->base.enabled && mode) {
- if (other_mode)
+ if (other_mode) {
tmp = 0; /* 1/2 */
- else
+ buffer_alloc = 1;
+ } else {
tmp = 2; /* whole */
- } else
+ buffer_alloc = 2;
+ }
+ } else {
tmp = 0;
+ buffer_alloc = 0;
+ }
/* second controller of the pair uses second half of the lb */
if (radeon_crtc->crtc_id % 2)
tmp += 4;
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+ }
+
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
@@ -3613,7 +3635,7 @@ bool evergreen_is_display_hung(struct radeon_device *rdev)
return true;
}
-static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -3839,28 +3861,6 @@ bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * evergreen_dma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
-
- if (!(reset_mask & RADEON_RESET_DMA)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
/*
* RLC
*/
@@ -3894,147 +3894,231 @@ void sumo_rlc_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->rlc.clear_state_obj);
rdev->rlc.clear_state_obj = NULL;
}
+
+ /* clear state block */
+ if (rdev->rlc.cp_table_obj) {
+ r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ radeon_bo_unpin(rdev->rlc.cp_table_obj);
+ radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+
+ radeon_bo_unref(&rdev->rlc.cp_table_obj);
+ rdev->rlc.cp_table_obj = NULL;
+ }
}
+#define CP_ME_TABLE_SIZE 96
+
int sumo_rlc_init(struct radeon_device *rdev)
{
- u32 *src_ptr;
+ const u32 *src_ptr;
volatile u32 *dst_ptr;
u32 dws, data, i, j, k, reg_num;
- u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index;
+ u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0;
u64 reg_list_mc_addr;
- struct cs_section_def *cs_data;
+ const struct cs_section_def *cs_data;
int r;
src_ptr = rdev->rlc.reg_list;
dws = rdev->rlc.reg_list_size;
+ if (rdev->family >= CHIP_BONAIRE) {
+ dws += (5 * 16) + 48 + 48 + 64;
+ }
cs_data = rdev->rlc.cs_data;
- /* save restore block */
- if (rdev->rlc.save_restore_obj == NULL) {
- r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
+ if (src_ptr) {
+ /* save restore block */
+ if (rdev->rlc.save_restore_obj == NULL) {
+ r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
+ if (unlikely(r != 0)) {
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.save_restore_gpu_addr);
if (r) {
- dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
+ radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+ dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
+ sumo_rlc_fini(rdev);
return r;
}
- }
- r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
- if (unlikely(r != 0)) {
- sumo_rlc_fini(rdev);
- return r;
- }
- r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.save_restore_gpu_addr);
- if (r) {
+ r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ /* write the sr buffer */
+ dst_ptr = rdev->rlc.sr_ptr;
+ if (rdev->family >= CHIP_TAHITI) {
+ /* SI */
+ for (i = 0; i < rdev->rlc.reg_list_size; i++)
+ dst_ptr[i] = src_ptr[i];
+ } else {
+ /* ON/LN/TN */
+ /* format:
+ * dw0: (reg2 << 16) | reg1
+ * dw1: reg1 save space
+ * dw2: reg2 save space
+ */
+ for (i = 0; i < dws; i++) {
+ data = src_ptr[i] >> 2;
+ i++;
+ if (i < dws)
+ data |= (src_ptr[i] >> 2) << 16;
+ j = (((i - 1) * 3) / 2);
+ dst_ptr[j] = data;
+ }
+ j = ((i * 3) / 2);
+ dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
+ }
+ radeon_bo_kunmap(rdev->rlc.save_restore_obj);
radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
}
- r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- /* write the sr buffer */
- dst_ptr = rdev->rlc.sr_ptr;
- /* format:
- * dw0: (reg2 << 16) | reg1
- * dw1: reg1 save space
- * dw2: reg2 save space
- */
- for (i = 0; i < dws; i++) {
- data = src_ptr[i] >> 2;
- i++;
- if (i < dws)
- data |= (src_ptr[i] >> 2) << 16;
- j = (((i - 1) * 3) / 2);
- dst_ptr[j] = data;
- }
- j = ((i * 3) / 2);
- dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
-
- radeon_bo_kunmap(rdev->rlc.save_restore_obj);
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- /* clear state block */
- reg_list_num = 0;
- dws = 0;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_list_num++;
- dws += cs_data[i].section[j].reg_count;
+ if (cs_data) {
+ /* clear state block */
+ if (rdev->family >= CHIP_BONAIRE) {
+ rdev->rlc.clear_state_size = dws = cik_get_csb_size(rdev);
+ } else if (rdev->family >= CHIP_TAHITI) {
+ rdev->rlc.clear_state_size = si_get_csb_size(rdev);
+ dws = rdev->rlc.clear_state_size + (256 / 4);
+ } else {
+ reg_list_num = 0;
+ dws = 0;
+ for (i = 0; cs_data[i].section != NULL; i++) {
+ for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
+ reg_list_num++;
+ dws += cs_data[i].section[j].reg_count;
+ }
+ }
+ reg_list_blk_index = (3 * reg_list_num + 2);
+ dws += reg_list_blk_index;
+ rdev->rlc.clear_state_size = dws;
}
- }
- reg_list_blk_index = (3 * reg_list_num + 2);
- dws += reg_list_blk_index;
- if (rdev->rlc.clear_state_obj == NULL) {
- r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
+ if (rdev->rlc.clear_state_obj == NULL) {
+ r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ }
+ r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
+ if (unlikely(r != 0)) {
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.clear_state_gpu_addr);
if (r) {
- dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
+ radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
sumo_rlc_fini(rdev);
return r;
}
- }
- r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- sumo_rlc_fini(rdev);
- return r;
- }
- r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.clear_state_gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
- dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- /* set up the cs buffer */
- dst_ptr = rdev->rlc.cs_ptr;
- reg_list_hdr_blk_index = 0;
- reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
- data = upper_32_bits(reg_list_mc_addr);
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_num = cs_data[i].section[j].reg_count;
- data = reg_list_mc_addr & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- data = 0x08000000 | (reg_num * 4);
+ r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ /* set up the cs buffer */
+ dst_ptr = rdev->rlc.cs_ptr;
+ if (rdev->family >= CHIP_BONAIRE) {
+ cik_get_csb_buffer(rdev, dst_ptr);
+ } else if (rdev->family >= CHIP_TAHITI) {
+ reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
+ dst_ptr[0] = upper_32_bits(reg_list_mc_addr);
+ dst_ptr[1] = lower_32_bits(reg_list_mc_addr);
+ dst_ptr[2] = rdev->rlc.clear_state_size;
+ si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
+ } else {
+ reg_list_hdr_blk_index = 0;
+ reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
+ data = upper_32_bits(reg_list_mc_addr);
dst_ptr[reg_list_hdr_blk_index] = data;
reg_list_hdr_blk_index++;
-
- for (k = 0; k < reg_num; k++) {
- data = cs_data[i].section[j].extent[k];
- dst_ptr[reg_list_blk_index + k] = data;
+ for (i = 0; cs_data[i].section != NULL; i++) {
+ for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
+ reg_num = cs_data[i].section[j].reg_count;
+ data = reg_list_mc_addr & 0xffffffff;
+ dst_ptr[reg_list_hdr_blk_index] = data;
+ reg_list_hdr_blk_index++;
+
+ data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
+ dst_ptr[reg_list_hdr_blk_index] = data;
+ reg_list_hdr_blk_index++;
+
+ data = 0x08000000 | (reg_num * 4);
+ dst_ptr[reg_list_hdr_blk_index] = data;
+ reg_list_hdr_blk_index++;
+
+ for (k = 0; k < reg_num; k++) {
+ data = cs_data[i].section[j].extent[k];
+ dst_ptr[reg_list_blk_index + k] = data;
+ }
+ reg_list_mc_addr += reg_num * 4;
+ reg_list_blk_index += reg_num;
+ }
}
- reg_list_mc_addr += reg_num * 4;
- reg_list_blk_index += reg_num;
+ dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
}
+ radeon_bo_kunmap(rdev->rlc.clear_state_obj);
+ radeon_bo_unreserve(rdev->rlc.clear_state_obj);
}
- dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
- radeon_bo_kunmap(rdev->rlc.clear_state_obj);
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ if (rdev->rlc.cp_table_size) {
+ if (rdev->rlc.cp_table_obj == NULL) {
+ r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
+ if (unlikely(r != 0)) {
+ dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.cp_table_gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+ dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+
+ cik_init_cp_pg_table(rdev);
+
+ radeon_bo_kunmap(rdev->rlc.cp_table_obj);
+ radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+
+ }
return 0;
}
@@ -4959,143 +5043,6 @@ restart_ih:
return IRQ_HANDLED;
}
-/**
- * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
- *
- * @rdev: radeon_device pointer
- * @fence: radeon fence object
- *
- * Add a DMA fence packet to the ring to write
- * the fence seq number and DMA trap packet to generate
- * an interrupt if needed (evergreen-SI).
- */
-void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
- /* write the fence */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
- radeon_ring_write(ring, addr & 0xfffffffc);
- radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
- radeon_ring_write(ring, fence->seq);
- /* generate an interrupt */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
- /* flush HDP */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
- radeon_ring_write(ring, 1);
-}
-
-/**
- * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- *
- * Schedule an IB in the DMA ring (evergreen).
- */
-void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
- struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
-
- if (rdev->wb.enabled) {
- u32 next_rptr = ring->wptr + 4;
- while ((next_rptr & 7) != 5)
- next_rptr++;
- next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
- radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
- radeon_ring_write(ring, next_rptr);
- }
-
- /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
- * Pad as necessary with NOPs.
- */
- while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
- radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
- radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
-
-}
-
-/**
- * evergreen_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (evergreen-cayman).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int evergreen_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_dw, cur_size_in_dw;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
- num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_dw = size_in_dw;
- if (cur_size_in_dw > 0xFFFFF)
- cur_size_in_dw = 0xFFFFF;
- size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
- radeon_ring_write(ring, src_offset & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
- src_offset += cur_size_in_dw * 4;
- dst_offset += cur_size_in_dw * 4;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
static int evergreen_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
@@ -5106,6 +5053,13 @@ static int evergreen_startup(struct radeon_device *rdev)
/* enable aspm */
evergreen_program_aspm(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ evergreen_mc_program(rdev);
+
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
@@ -5129,11 +5083,6 @@ static int evergreen_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
- evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
} else {
@@ -5143,17 +5092,11 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_gpu_init(rdev);
- r = evergreen_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
-
/* allocate rlc buffers */
if (rdev->flags & RADEON_IS_IGP) {
rdev->rlc.reg_list = sumo_rlc_save_restore_register_list;
- rdev->rlc.reg_list_size = sumo_rlc_save_restore_register_list_size;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(sumo_rlc_save_restore_register_list);
rdev->rlc.cs_data = evergreen_cs_data;
r = sumo_rlc_init(rdev);
if (r) {
@@ -5179,7 +5122,7 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
- r = rv770_uvd_resume(rdev);
+ r = uvd_v2_2_resume(rdev);
if (!r) {
r = radeon_fence_driver_start_ring(rdev,
R600_RING_TYPE_UVD_INDEX);
@@ -5208,14 +5151,14 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0));
if (r)
return r;
@@ -5231,12 +5174,11 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
@@ -5291,10 +5233,10 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
- r600_uvd_rbc_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
@@ -5419,7 +5361,6 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- r600_blit_fini(rdev);
r700_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
@@ -5429,6 +5370,7 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
deleted file mode 100644
index 057c87b6515a..000000000000
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ /dev/null
@@ -1,729 +0,0 @@
-/*
- * Copyright 2010 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Alex Deucher <alexander.deucher@amd.com>
- */
-
-#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon.h"
-
-#include "evergreend.h"
-#include "evergreen_blit_shaders.h"
-#include "cayman_blit_shaders.h"
-#include "radeon_blit_common.h"
-
-/* emits 17 */
-static void
-set_render_target(struct radeon_device *rdev, int format,
- int w, int h, u64 gpu_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 cb_color_info;
- int pitch, slice;
-
- h = ALIGN(h, 8);
- if (h < 8)
- h = 8;
-
- cb_color_info = CB_FORMAT(format) |
- CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
- CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- pitch = (w / 8) - 1;
- slice = ((w * h) / 64) - 1;
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
- radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, pitch);
- radeon_ring_write(ring, slice);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, cb_color_info);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
-}
-
-/* emits 5dw */
-static void
-cp_set_surface_sync(struct radeon_device *rdev,
- u32 sync_type, u32 size,
- u64 mc_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 cp_coher_size;
-
- if (size == 0xffffffff)
- cp_coher_size = 0xffffffff;
- else
- cp_coher_size = ((size + 255) >> 8);
-
- if (rdev->family >= CHIP_CAYMAN) {
- /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
- * to the RB directly. For IBs, the CP programs this as part of the
- * surface_sync packet.
- */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
- }
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, sync_type);
- radeon_ring_write(ring, cp_coher_size);
- radeon_ring_write(ring, mc_addr >> 8);
- radeon_ring_write(ring, 10); /* poll interval */
-}
-
-/* emits 11dw + 1 surface sync = 16dw */
-static void
-set_shaders(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u64 gpu_addr;
-
- /* VS */
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
- radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, 2);
- radeon_ring_write(ring, 0);
-
- /* PS */
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
- radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, 1);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 2);
-
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
-}
-
-/* emits 10 + 1 sync (5) = 15 */
-static void
-set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
-
- /* high addr, stride */
- sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
- SQ_VTXC_STRIDE(16);
-#ifdef __BIG_ENDIAN
- sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
-#endif
- /* xyzw swizzles */
- sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) |
- SQ_VTCX_SEL_Y(SQ_SEL_Y) |
- SQ_VTCX_SEL_Z(SQ_SEL_Z) |
- SQ_VTCX_SEL_W(SQ_SEL_W);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(ring, 0x580);
- radeon_ring_write(ring, gpu_addr & 0xffffffff);
- radeon_ring_write(ring, 48 - 1); /* size */
- radeon_ring_write(ring, sq_vtx_constant_word2);
- radeon_ring_write(ring, sq_vtx_constant_word3);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
-
- if ((rdev->family == CHIP_CEDAR) ||
- (rdev->family == CHIP_PALM) ||
- (rdev->family == CHIP_SUMO) ||
- (rdev->family == CHIP_SUMO2) ||
- (rdev->family == CHIP_CAICOS))
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, 48, gpu_addr);
- else
- cp_set_surface_sync(rdev,
- PACKET3_VC_ACTION_ENA, 48, gpu_addr);
-
-}
-
-/* emits 10 */
-static void
-set_tex_resource(struct radeon_device *rdev,
- int format, int w, int h, int pitch,
- u64 gpu_addr, u32 size)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_tex_resource_word0, sq_tex_resource_word1;
- u32 sq_tex_resource_word4, sq_tex_resource_word7;
-
- if (h < 1)
- h = 1;
-
- sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D);
- sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
- ((w - 1) << 18));
- sq_tex_resource_word1 = ((h - 1) << 0) |
- TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- /* xyzw swizzles */
- sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) |
- TEX_DST_SEL_Y(SQ_SEL_Y) |
- TEX_DST_SEL_Z(SQ_SEL_Z) |
- TEX_DST_SEL_W(SQ_SEL_W);
-
- sq_tex_resource_word7 = format |
- S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
-
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, size, gpu_addr);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_tex_resource_word0);
- radeon_ring_write(ring, sq_tex_resource_word1);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, sq_tex_resource_word4);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_tex_resource_word7);
-}
-
-/* emits 12 */
-static void
-set_scissors(struct radeon_device *rdev, int x1, int y1,
- int x2, int y2)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- /* workaround some hw bugs */
- if (x2 == 0)
- x1 = 1;
- if (y2 == 0)
- y1 = 1;
- if (rdev->family >= CHIP_CAYMAN) {
- if ((x2 == 1) && (y2 == 1))
- x2 = 2;
- }
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-}
-
-/* emits 10 */
-static void
-draw_auto(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, DI_PT_RECTLIST);
-
- radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(ring,
-#ifdef __BIG_ENDIAN
- (2 << 2) |
-#endif
- DI_INDEX_SIZE_16_BIT);
-
- radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(ring, 1);
-
- radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(ring, 3);
- radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
-
-}
-
-/* emits 39 */
-static void
-set_default_state(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
- u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
- u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
- int num_ps_gprs, num_vs_gprs, num_temp_gprs;
- int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
- int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
- int num_hs_threads, num_ls_threads;
- int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
- int num_hs_stack_entries, num_ls_stack_entries;
- u64 gpu_addr;
- int dwords;
-
- /* set clear context state */
- radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(ring, 0);
-
- if (rdev->family < CHIP_CAYMAN) {
- switch (rdev->family) {
- case CHIP_CEDAR:
- default:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 16;
- num_gs_threads = 16;
- num_es_threads = 16;
- num_hs_threads = 16;
- num_ls_threads = 16;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_REDWOOD:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_JUNIPER:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_PALM:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 16;
- num_gs_threads = 16;
- num_es_threads = 16;
- num_hs_threads = 16;
- num_ls_threads = 16;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_SUMO:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 25;
- num_gs_threads = 25;
- num_es_threads = 25;
- num_hs_threads = 25;
- num_ls_threads = 25;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_SUMO2:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 25;
- num_gs_threads = 25;
- num_es_threads = 25;
- num_hs_threads = 25;
- num_ls_threads = 25;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_BARTS:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_TURKS:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_CAICOS:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 10;
- num_gs_threads = 10;
- num_es_threads = 10;
- num_hs_threads = 10;
- num_ls_threads = 10;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- }
-
- if ((rdev->family == CHIP_CEDAR) ||
- (rdev->family == CHIP_PALM) ||
- (rdev->family == CHIP_SUMO) ||
- (rdev->family == CHIP_SUMO2) ||
- (rdev->family == CHIP_CAICOS))
- sq_config = 0;
- else
- sq_config = VC_ENABLE;
-
- sq_config |= (EXPORT_SRC_C |
- CS_PRIO(0) |
- LS_PRIO(0) |
- HS_PRIO(0) |
- PS_PRIO(0) |
- VS_PRIO(1) |
- GS_PRIO(2) |
- ES_PRIO(3));
-
- sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
- NUM_VS_GPRS(num_vs_gprs) |
- NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
- sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
- NUM_ES_GPRS(num_es_gprs));
- sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
- NUM_LS_GPRS(num_ls_gprs));
- sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
- NUM_VS_THREADS(num_vs_threads) |
- NUM_GS_THREADS(num_gs_threads) |
- NUM_ES_THREADS(num_es_threads));
- sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
- NUM_LS_THREADS(num_ls_threads));
- sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
- NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
- sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
- NUM_ES_STACK_ENTRIES(num_es_stack_entries));
- sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
- NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
-
- /* disable dyn gprs */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0);
-
- /* setup LDS */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0x10001000);
-
- /* SQ config */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
- radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, sq_config);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_thread_resource_mgmt);
- radeon_ring_write(ring, sq_thread_resource_mgmt_2);
- radeon_ring_write(ring, sq_stack_resource_mgmt_1);
- radeon_ring_write(ring, sq_stack_resource_mgmt_2);
- radeon_ring_write(ring, sq_stack_resource_mgmt_3);
- }
-
- /* CONTEXT_CONTROL */
- radeon_ring_write(ring, 0xc0012800);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
-
- /* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(ring, 0xc0026f00);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
-
- /* SET_SAMPLER */
- radeon_ring_write(ring, 0xc0036e00);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000012);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
-
- /* set to DX10/11 mode */
- radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
- radeon_ring_write(ring, 1);
-
- /* emit an IB pointing at default state */
- dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
- radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(ring, dwords);
-
-}
-
-int evergreen_blit_init(struct radeon_device *rdev)
-{
- u32 obj_size;
- int i, r, dwords;
- void *ptr;
- u32 packet2s[16];
- int num_packet2s = 0;
-
- rdev->r600_blit.primitives.set_render_target = set_render_target;
- rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
- rdev->r600_blit.primitives.set_shaders = set_shaders;
- rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
- rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
- rdev->r600_blit.primitives.set_scissors = set_scissors;
- rdev->r600_blit.primitives.draw_auto = draw_auto;
- rdev->r600_blit.primitives.set_default_state = set_default_state;
-
- rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
- rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
- rdev->r600_blit.ring_size_common += 5; /* done copy */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
-
- rdev->r600_blit.ring_size_per_loop = 74;
- if (rdev->family >= CHIP_CAYMAN)
- rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
-
- rdev->r600_blit.max_dim = 16384;
-
- rdev->r600_blit.state_offset = 0;
-
- if (rdev->family < CHIP_CAYMAN)
- rdev->r600_blit.state_len = evergreen_default_size;
- else
- rdev->r600_blit.state_len = cayman_default_size;
-
- dwords = rdev->r600_blit.state_len;
- while (dwords & 0xf) {
- packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
- dwords++;
- }
-
- obj_size = dwords * 4;
- obj_size = ALIGN(obj_size, 256);
-
- rdev->r600_blit.vs_offset = obj_size;
- if (rdev->family < CHIP_CAYMAN)
- obj_size += evergreen_vs_size * 4;
- else
- obj_size += cayman_vs_size * 4;
- obj_size = ALIGN(obj_size, 256);
-
- rdev->r600_blit.ps_offset = obj_size;
- if (rdev->family < CHIP_CAYMAN)
- obj_size += evergreen_ps_size * 4;
- else
- obj_size += cayman_ps_size * 4;
- obj_size = ALIGN(obj_size, 256);
-
- /* pin copy shader into vram if not already initialized */
- if (!rdev->r600_blit.shader_obj) {
- r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->r600_blit.shader_obj);
- if (r) {
- DRM_ERROR("evergreen failed to allocate shader\n");
- return r;
- }
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
- }
- }
-
- DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
- obj_size,
- rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
- if (r) {
- DRM_ERROR("failed to map blit object %d\n", r);
- return r;
- }
-
- if (rdev->family < CHIP_CAYMAN) {
- memcpy_toio(ptr + rdev->r600_blit.state_offset,
- evergreen_default_state, rdev->r600_blit.state_len * 4);
-
- if (num_packet2s)
- memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
- packet2s, num_packet2s * 4);
- for (i = 0; i < evergreen_vs_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
- for (i = 0; i < evergreen_ps_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
- } else {
- memcpy_toio(ptr + rdev->r600_blit.state_offset,
- cayman_default_state, rdev->r600_blit.state_len * 4);
-
- if (num_packet2s)
- memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
- packet2s, num_packet2s * 4);
- for (i = 0; i < cayman_vs_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
- for (i = 0; i < cayman_ps_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
- }
- radeon_bo_kunmap(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
- return 0;
-}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index f85c0af115b5..d43383470cdf 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -300,58 +300,4 @@ const u32 evergreen_default_state[] =
0x00000010, /* */
};
-const u32 evergreen_vs[] =
-{
- 0x00000004,
- 0x80800400,
- 0x0000a03c,
- 0x95000688,
- 0x00004000,
- 0x15200688,
- 0x00000000,
- 0x00000000,
- 0x3c000000,
- 0x67961001,
-#ifdef __BIG_ENDIAN
- 0x000a0000,
-#else
- 0x00080000,
-#endif
- 0x00000000,
- 0x1c000000,
- 0x67961000,
-#ifdef __BIG_ENDIAN
- 0x00020008,
-#else
- 0x00000008,
-#endif
- 0x00000000,
-};
-
-const u32 evergreen_ps[] =
-{
- 0x00000003,
- 0xa00c0000,
- 0x00000008,
- 0x80400000,
- 0x00000000,
- 0x95200688,
- 0x00380400,
- 0x00146b10,
- 0x00380000,
- 0x20146b10,
- 0x00380400,
- 0x40146b00,
- 0x80380000,
- 0x60146b00,
- 0x00000000,
- 0x00000000,
- 0x00000010,
- 0x000d1000,
- 0xb0800000,
- 0x00000000,
-};
-
-const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
-const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
new file mode 100644
index 000000000000..6a0656d00ed0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+
+u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, fence->seq);
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
+ /* flush HDP */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFFF)
+ cur_size_in_dw = 0xFFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
+/**
+ * evergreen_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & RADEON_RESET_DMA)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index b0d3fb341417..f71ce390aebe 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -32,6 +32,10 @@
#include "evergreend.h"
#include "atom.h"
+extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
+extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
+extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
+
/*
* update the N and CTS parameters for a given pixel clock rate
*/
@@ -54,6 +58,45 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
}
+static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 tmp;
+ u8 *sadb;
+ int sad_count;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ return;
+ }
+
+ /* program the speaker allocation */
+ tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set HDMI mode */
+ tmp |= HDMI_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+
+ kfree(sadb);
+}
+
static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
@@ -148,18 +191,44 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
u32 base_rate = 24000;
+ u32 max_ratio = clock / base_rate;
+ u32 dto_phase;
+ u32 dto_modulo = clock;
+ u32 wallclock_ratio;
+ u32 dto_cntl;
if (!dig || !dig->afmt)
return;
+ if (ASIC_IS_DCE6(rdev)) {
+ dto_phase = 24 * 1000;
+ } else {
+ if (max_ratio >= 8) {
+ dto_phase = 192 * 1000;
+ wallclock_ratio = 3;
+ } else if (max_ratio >= 4) {
+ dto_phase = 96 * 1000;
+ wallclock_ratio = 2;
+ } else if (max_ratio >= 2) {
+ dto_phase = 48 * 1000;
+ wallclock_ratio = 1;
+ } else {
+ dto_phase = 24 * 1000;
+ wallclock_ratio = 0;
+ }
+ dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
+ }
+
/* XXX two dtos; generally use dto0 for hdmi */
/* Express [24MHz / target pixel clock] as an exact rational
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
- WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
- WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
+ WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
}
@@ -238,13 +307,23 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
AFMT_60958_CS_CHANNEL_NUMBER_7(8));
- /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */
+ if (ASIC_IS_DCE6(rdev)) {
+ dce6_afmt_write_speaker_allocation(encoder);
+ } else {
+ dce4_afmt_write_speaker_allocation(encoder);
+ }
WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
AFMT_AUDIO_CHANNEL_ENABLE(0xff));
/* fglrx sets 0x40 in 0x5f80 here */
- evergreen_hdmi_write_sad_regs(encoder);
+
+ if (ASIC_IS_DCE6(rdev)) {
+ dce6_afmt_select_pin(encoder);
+ dce6_afmt_write_sad_regs(encoder);
+ } else {
+ evergreen_hdmi_write_sad_regs(encoder);
+ }
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
@@ -280,6 +359,8 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -292,6 +373,15 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
+ if (enable) {
+ if (ASIC_IS_DCE6(rdev))
+ dig->afmt->pin = dce6_audio_get_pin(rdev);
+ else
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ } else {
+ dig->afmt->pin = NULL;
+ }
+
dig->afmt->enabled = enable;
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a7baf67aef6c..8768fd6a1e27 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -497,6 +497,9 @@
#define DCCG_AUDIO_DTO0_MODULE 0x05b4
#define DCCG_AUDIO_DTO0_LOAD 0x05b8
#define DCCG_AUDIO_DTO0_CNTL 0x05bc
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
#define DCCG_AUDIO_DTO1_PHASE 0x05c0
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
@@ -711,6 +714,13 @@
#define AFMT_GENERIC0_7 0x7138
/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x5f78
+#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
+#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
+#define SPEAKER_ALLOCATION_SHIFT 0
+#define HDMI_CONNECTION (1 << 16)
+#define DP_CONNECTION (1 << 17)
+
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
@@ -1150,6 +1160,10 @@
# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
new file mode 100644
index 000000000000..ecd60809db4e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -0,0 +1,2645 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "radeon.h"
+#include "cikd.h"
+#include "r600_dpm.h"
+#include "kv_dpm.h"
+#include "radeon_asic.h"
+#include <linux/seq_file.h>
+
+#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define KV_MINIMUM_ENGINE_CLOCK 800
+#define SMC_RAM_END 0x40000
+
+static void kv_init_graphics_levels(struct radeon_device *rdev);
+static int kv_calculate_ds_divider(struct radeon_device *rdev);
+static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
+static int kv_calculate_dpm_settings(struct radeon_device *rdev);
+static void kv_enable_new_levels(struct radeon_device *rdev);
+static void kv_program_nbps_index_settings(struct radeon_device *rdev,
+ struct radeon_ps *new_rps);
+static int kv_set_enabled_levels(struct radeon_device *rdev);
+static int kv_force_dpm_highest(struct radeon_device *rdev);
+static int kv_force_dpm_lowest(struct radeon_device *rdev);
+static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
+ struct radeon_ps *new_rps,
+ struct radeon_ps *old_rps);
+static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp);
+static int kv_init_fps_limits(struct radeon_device *rdev);
+
+void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
+static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
+static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
+
+extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable);
+
+static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 1, 4, 1 },
+ { 2, 5, 1 },
+ { 3, 4, 2 },
+ { 4, 1, 1 },
+ { 5, 5, 2 },
+ { 6, 6, 1 },
+ { 7, 9, 2 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 1, 4, 1 },
+ { 2, 5, 1 },
+ { 3, 4, 1 },
+ { 4, 1, 1 },
+ { 5, 5, 1 },
+ { 6, 6, 1 },
+ { 7, 9, 1 },
+ { 8, 4, 1 },
+ { 9, 2, 1 },
+ { 10, 3, 1 },
+ { 11, 6, 1 },
+ { 12, 8, 2 },
+ { 13, 1, 1 },
+ { 14, 2, 1 },
+ { 15, 3, 1 },
+ { 16, 1, 1 },
+ { 17, 4, 1 },
+ { 18, 3, 1 },
+ { 19, 1, 1 },
+ { 20, 8, 1 },
+ { 21, 5, 1 },
+ { 22, 1, 1 },
+ { 23, 1, 1 },
+ { 24, 4, 1 },
+ { 27, 6, 1 },
+ { 28, 1, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
+{
+ { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
+{
+ { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
+{
+ { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
+{
+ { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
+{
+ { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
+{
+ { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_pt_config_reg didt_config_kv[] =
+{
+ { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0xFFFFFFFF }
+};
+
+static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
+{
+ struct kv_ps *ps = rps->ps_priv;
+
+ return ps;
+}
+
+static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = rdev->pm.dpm.priv;
+
+ return pi;
+}
+
+#if 0
+static void kv_program_local_cac_table(struct radeon_device *rdev,
+ const struct kv_lcac_config_values *local_cac_table,
+ const struct kv_lcac_config_reg *local_cac_reg)
+{
+ u32 i, count, data;
+ const struct kv_lcac_config_values *values = local_cac_table;
+
+ while (values->block_id != 0xffffffff) {
+ count = values->signal_id;
+ for (i = 0; i < count; i++) {
+ data = ((values->block_id << local_cac_reg->block_shift) &
+ local_cac_reg->block_mask);
+ data |= ((i << local_cac_reg->signal_shift) &
+ local_cac_reg->signal_mask);
+ data |= ((values->t << local_cac_reg->t_shift) &
+ local_cac_reg->t_mask);
+ data |= ((1 << local_cac_reg->enable_shift) &
+ local_cac_reg->enable_mask);
+ WREG32_SMC(local_cac_reg->cntl, data);
+ }
+ values++;
+ }
+}
+#endif
+
+static int kv_program_pt_config_registers(struct radeon_device *rdev,
+ const struct kv_pt_config_reg *cac_config_regs)
+{
+ const struct kv_pt_config_reg *config_regs = cac_config_regs;
+ u32 data;
+ u32 cache = 0;
+
+ if (config_regs == NULL)
+ return -EINVAL;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ if (config_regs->type == KV_CONFIGREG_CACHE) {
+ cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ } else {
+ switch (config_regs->type) {
+ case KV_CONFIGREG_SMC_IND:
+ data = RREG32_SMC(config_regs->offset);
+ break;
+ case KV_CONFIGREG_DIDT_IND:
+ data = RREG32_DIDT(config_regs->offset);
+ break;
+ default:
+ data = RREG32(config_regs->offset << 2);
+ break;
+ }
+
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ data |= cache;
+ cache = 0;
+
+ switch (config_regs->type) {
+ case KV_CONFIGREG_SMC_IND:
+ WREG32_SMC(config_regs->offset, data);
+ break;
+ case KV_CONFIGREG_DIDT_IND:
+ WREG32_DIDT(config_regs->offset, data);
+ break;
+ default:
+ WREG32(config_regs->offset << 2, data);
+ break;
+ }
+ }
+ config_regs++;
+ }
+
+ return 0;
+}
+
+static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 data;
+
+ if (pi->caps_sq_ramping) {
+ data = RREG32_DIDT(DIDT_SQ_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_SQ_CTRL0, data);
+ }
+
+ if (pi->caps_db_ramping) {
+ data = RREG32_DIDT(DIDT_DB_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_DB_CTRL0, data);
+ }
+
+ if (pi->caps_td_ramping) {
+ data = RREG32_DIDT(DIDT_TD_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_TD_CTRL0, data);
+ }
+
+ if (pi->caps_tcp_ramping) {
+ data = RREG32_DIDT(DIDT_TCP_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_TCP_CTRL0, data);
+ }
+}
+
+static int kv_enable_didt(struct radeon_device *rdev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ if (pi->caps_sq_ramping ||
+ pi->caps_db_ramping ||
+ pi->caps_td_ramping ||
+ pi->caps_tcp_ramping) {
+ cik_enter_rlc_safe_mode(rdev);
+
+ if (enable) {
+ ret = kv_program_pt_config_registers(rdev, didt_config_kv);
+ if (ret) {
+ cik_exit_rlc_safe_mode(rdev);
+ return ret;
+ }
+ }
+
+ kv_do_enable_didt(rdev, enable);
+
+ cik_exit_rlc_safe_mode(rdev);
+ }
+
+ return 0;
+}
+
+#if 0
+static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->caps_cac) {
+ WREG32_SMC(LCAC_SX0_OVR_SEL, 0);
+ WREG32_SMC(LCAC_SX0_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
+
+ WREG32_SMC(LCAC_MC0_OVR_SEL, 0);
+ WREG32_SMC(LCAC_MC0_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
+
+ WREG32_SMC(LCAC_MC1_OVR_SEL, 0);
+ WREG32_SMC(LCAC_MC1_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
+
+ WREG32_SMC(LCAC_MC2_OVR_SEL, 0);
+ WREG32_SMC(LCAC_MC2_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
+
+ WREG32_SMC(LCAC_MC3_OVR_SEL, 0);
+ WREG32_SMC(LCAC_MC3_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
+
+ WREG32_SMC(LCAC_CPL_OVR_SEL, 0);
+ WREG32_SMC(LCAC_CPL_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
+ }
+}
+#endif
+
+static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret = 0;
+
+ if (pi->caps_cac) {
+ if (enable) {
+ ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
+ if (ret)
+ pi->cac_enabled = false;
+ else
+ pi->cac_enabled = true;
+ } else if (pi->cac_enabled) {
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
+ pi->cac_enabled = false;
+ }
+ }
+
+ return ret;
+}
+
+static int kv_process_firmware_header(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 tmp;
+ int ret;
+
+ ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, DpmTable),
+ &tmp, pi->sram_end);
+
+ if (ret == 0)
+ pi->dpm_table_start = tmp;
+
+ ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, SoftRegisters),
+ &tmp, pi->sram_end);
+
+ if (ret == 0)
+ pi->soft_regs_start = tmp;
+
+ return ret;
+}
+
+static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ pi->graphics_voltage_change_enable = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
+ &pi->graphics_voltage_change_enable,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_set_dpm_interval(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ pi->graphics_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
+ &pi->graphics_interval,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_set_dpm_boot_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
+ &pi->graphics_boot_level,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static void kv_program_vc(struct radeon_device *rdev)
+{
+ WREG32_SMC(CG_FTV_0, 0x3FFFC000);
+}
+
+static void kv_clear_vc(struct radeon_device *rdev)
+{
+ WREG32_SMC(CG_FTV_0, 0);
+}
+
+static int kv_set_divider_value(struct radeon_device *rdev,
+ u32 index, u32 sclk)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct atom_clock_dividers dividers;
+ int ret;
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ sclk, false, &dividers);
+ if (ret)
+ return ret;
+
+ pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
+ pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
+
+ return 0;
+}
+
+static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
+ u16 voltage)
+{
+ return 6200 - (voltage * 25);
+}
+
+static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
+ u32 vid_2bit)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev,
+ &pi->sys_info.vid_mapping_table,
+ vid_2bit);
+
+ return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
+}
+
+
+static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
+ pi->graphics_level[index].MinVddNb =
+ cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
+
+ return 0;
+}
+
+static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->graphics_level[index].AT = cpu_to_be16((u16)at);
+
+ return 0;
+}
+
+static void kv_dpm_power_level_enable(struct radeon_device *rdev,
+ u32 index, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
+}
+
+static void kv_start_dpm(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+ tmp |= GLOBAL_PWRMGT_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+
+ kv_smc_dpm_enable(rdev, true);
+}
+
+static void kv_stop_dpm(struct radeon_device *rdev)
+{
+ kv_smc_dpm_enable(rdev, false);
+}
+
+static void kv_start_am(struct radeon_device *rdev)
+{
+ u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
+
+ sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
+ sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
+
+ WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
+}
+
+static void kv_reset_am(struct radeon_device *rdev)
+{
+ u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
+
+ sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
+
+ WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
+}
+
+static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
+{
+ return kv_notify_message_to_smu(rdev, freeze ?
+ PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+}
+
+static int kv_force_lowest_valid(struct radeon_device *rdev)
+{
+ return kv_force_dpm_lowest(rdev);
+}
+
+static int kv_unforce_levels(struct radeon_device *rdev)
+{
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
+}
+
+static int kv_update_sclk_t(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 low_sclk_interrupt_t = 0;
+ int ret = 0;
+
+ if (pi->caps_sclk_throttle_low_notification) {
+ low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
+ (u8 *)&low_sclk_interrupt_t,
+ sizeof(u32), pi->sram_end);
+ }
+ return ret;
+}
+
+static int kv_program_bootup_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ if ((table->entries[i].clk == pi->boot_pl.sclk) ||
+ (i == 0))
+ break;
+ }
+
+ pi->graphics_boot_level = (u8)i;
+ kv_dpm_power_level_enable(rdev, i, true);
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ if (table->num_max_dpm_entries == 0)
+ return -EINVAL;
+
+ for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) ||
+ (i == 0))
+ break;
+ }
+
+ pi->graphics_boot_level = (u8)i;
+ kv_dpm_power_level_enable(rdev, i, true);
+ }
+ return 0;
+}
+
+static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ pi->graphics_therm_throttle_enable = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
+ &pi->graphics_therm_throttle_enable,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_upload_dpm_settings(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
+ (u8 *)&pi->graphics_level,
+ sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
+ pi->sram_end);
+
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
+ &pi->graphics_dpm_level_count,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static u32 kv_get_clock_difference(u32 a, u32 b)
+{
+ return (a >= b) ? a - b : b - a;
+}
+
+static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 value;
+
+ if (pi->caps_enable_dfs_bypass) {
+ if (kv_get_clock_difference(clk, 40000) < 200)
+ value = 3;
+ else if (kv_get_clock_difference(clk, 30000) < 200)
+ value = 2;
+ else if (kv_get_clock_difference(clk, 20000) < 200)
+ value = 7;
+ else if (kv_get_clock_difference(clk, 15000) < 200)
+ value = 6;
+ else if (kv_get_clock_difference(clk, 10000) < 200)
+ value = 8;
+ else
+ value = 0;
+ } else {
+ value = 0;
+ }
+
+ return value;
+}
+
+static int kv_populate_uvd_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_uvd_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 i;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->uvd_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t < table->entries[i].v))
+ break;
+
+ pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
+ pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
+ pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
+
+ pi->uvd_level[i].VClkBypassCntl =
+ (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
+ pi->uvd_level[i].DClkBypassCntl =
+ (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].vclk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].dclk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
+
+ pi->uvd_level_count++;
+ }
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
+ (u8 *)&pi->uvd_level_count,
+ sizeof(u8), pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->uvd_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UVDInterval),
+ &pi->uvd_interval,
+ sizeof(u8), pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UvdLevel),
+ (u8 *)&pi->uvd_level,
+ sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
+ pi->sram_end);
+
+ return ret;
+
+}
+
+static int kv_populate_vce_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+ u32 i;
+ struct radeon_vce_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->vce_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ pi->high_voltage_t < table->entries[i].v)
+ break;
+
+ pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
+ pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+ pi->vce_level[i].ClkBypassCntl =
+ (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].evclk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->vce_level[i].Divider = (u8)dividers.post_div;
+
+ pi->vce_level_count++;
+ }
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
+ (u8 *)&pi->vce_level_count,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->vce_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VCEInterval),
+ (u8 *)&pi->vce_interval,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VceLevel),
+ (u8 *)&pi->vce_level,
+ sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
+ pi->sram_end);
+
+ return ret;
+}
+
+static int kv_populate_samu_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 i;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->samu_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ pi->high_voltage_t < table->entries[i].v)
+ break;
+
+ pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
+ pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+ pi->samu_level[i].ClkBypassCntl =
+ (u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].clk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->samu_level[i].Divider = (u8)dividers.post_div;
+
+ pi->samu_level_count++;
+ }
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
+ (u8 *)&pi->samu_level_count,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->samu_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
+ (u8 *)&pi->samu_interval,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SamuLevel),
+ (u8 *)&pi->samu_level,
+ sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+
+static int kv_populate_acp_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 i;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->acp_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
+ pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].clk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->acp_level[i].Divider = (u8)dividers.post_div;
+
+ pi->acp_level_count++;
+ }
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
+ (u8 *)&pi->acp_level_count,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->acp_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, ACPInterval),
+ (u8 *)&pi->acp_interval,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, AcpLevel),
+ (u8 *)&pi->acp_level,
+ sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+ if (pi->caps_enable_dfs_bypass) {
+ if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 3;
+ else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 2;
+ else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 7;
+ else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 6;
+ else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 8;
+ else
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ } else {
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ }
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+ for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+ if (pi->caps_enable_dfs_bypass) {
+ if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 3;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 2;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 7;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 6;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 8;
+ else
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ } else {
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ }
+ }
+ }
+}
+
+static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
+}
+
+static void kv_update_current_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->current_rps = *rps;
+ pi->current_ps = *new_ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+static void kv_update_requested_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->requested_rps = *rps;
+ pi->requested_ps = *new_ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
+int kv_dpm_enable(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), false);
+
+ ret = kv_process_firmware_header(rdev);
+ if (ret) {
+ DRM_ERROR("kv_process_firmware_header failed\n");
+ return ret;
+ }
+ kv_init_fps_limits(rdev);
+ kv_init_graphics_levels(rdev);
+ ret = kv_program_bootup_state(rdev);
+ if (ret) {
+ DRM_ERROR("kv_program_bootup_state failed\n");
+ return ret;
+ }
+ kv_calculate_dfs_bypass_settings(rdev);
+ ret = kv_upload_dpm_settings(rdev);
+ if (ret) {
+ DRM_ERROR("kv_upload_dpm_settings failed\n");
+ return ret;
+ }
+ ret = kv_populate_uvd_table(rdev);
+ if (ret) {
+ DRM_ERROR("kv_populate_uvd_table failed\n");
+ return ret;
+ }
+ ret = kv_populate_vce_table(rdev);
+ if (ret) {
+ DRM_ERROR("kv_populate_vce_table failed\n");
+ return ret;
+ }
+ ret = kv_populate_samu_table(rdev);
+ if (ret) {
+ DRM_ERROR("kv_populate_samu_table failed\n");
+ return ret;
+ }
+ ret = kv_populate_acp_table(rdev);
+ if (ret) {
+ DRM_ERROR("kv_populate_acp_table failed\n");
+ return ret;
+ }
+ kv_program_vc(rdev);
+#if 0
+ kv_initialize_hardware_cac_manager(rdev);
+#endif
+ kv_start_am(rdev);
+ if (pi->enable_auto_thermal_throttling) {
+ ret = kv_enable_auto_thermal_throttling(rdev);
+ if (ret) {
+ DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
+ return ret;
+ }
+ }
+ ret = kv_enable_dpm_voltage_scaling(rdev);
+ if (ret) {
+ DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
+ return ret;
+ }
+ ret = kv_set_dpm_interval(rdev);
+ if (ret) {
+ DRM_ERROR("kv_set_dpm_interval failed\n");
+ return ret;
+ }
+ ret = kv_set_dpm_boot_state(rdev);
+ if (ret) {
+ DRM_ERROR("kv_set_dpm_boot_state failed\n");
+ return ret;
+ }
+ ret = kv_enable_ulv(rdev, true);
+ if (ret) {
+ DRM_ERROR("kv_enable_ulv failed\n");
+ return ret;
+ }
+ kv_start_dpm(rdev);
+ ret = kv_enable_didt(rdev, true);
+ if (ret) {
+ DRM_ERROR("kv_enable_didt failed\n");
+ return ret;
+ }
+ ret = kv_enable_smc_cac(rdev, true);
+ if (ret) {
+ DRM_ERROR("kv_enable_smc_cac failed\n");
+ return ret;
+ }
+
+ if (rdev->irq.installed &&
+ r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+ ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret) {
+ DRM_ERROR("kv_set_thermal_temperature_range failed\n");
+ return ret;
+ }
+ rdev->irq.dpm_thermal = true;
+ radeon_irq_set(rdev);
+ }
+
+ /* powerdown unused blocks for now */
+ kv_dpm_powergate_acp(rdev, true);
+ kv_dpm_powergate_samu(rdev, true);
+ kv_dpm_powergate_vce(rdev, true);
+ kv_dpm_powergate_uvd(rdev, true);
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), true);
+
+ kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+ return ret;
+}
+
+void kv_dpm_disable(struct radeon_device *rdev)
+{
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), false);
+
+ /* powerup blocks */
+ kv_dpm_powergate_acp(rdev, false);
+ kv_dpm_powergate_samu(rdev, false);
+ kv_dpm_powergate_vce(rdev, false);
+ kv_dpm_powergate_uvd(rdev, false);
+
+ kv_enable_smc_cac(rdev, false);
+ kv_enable_didt(rdev, false);
+ kv_clear_vc(rdev);
+ kv_stop_dpm(rdev);
+ kv_enable_ulv(rdev, false);
+ kv_reset_am(rdev);
+
+ kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+}
+
+#if 0
+static int kv_write_smc_soft_register(struct radeon_device *rdev,
+ u16 reg_offset, u32 value)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset,
+ (u8 *)&value, sizeof(u16), pi->sram_end);
+}
+
+static int kv_read_smc_soft_register(struct radeon_device *rdev,
+ u16 reg_offset, u32 *value)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset,
+ value, pi->sram_end);
+}
+#endif
+
+static void kv_init_sclk_t(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->low_sclk_interrupt_t = 0;
+}
+
+static int kv_init_fps_limits(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret = 0;
+
+ if (pi->caps_fps) {
+ u16 tmp;
+
+ tmp = 45;
+ pi->fps_high_t = cpu_to_be16(tmp);
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, FpsHighT),
+ (u8 *)&pi->fps_high_t,
+ sizeof(u16), pi->sram_end);
+
+ tmp = 30;
+ pi->fps_low_t = cpu_to_be16(tmp);
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, FpsLowT),
+ (u8 *)&pi->fps_low_t,
+ sizeof(u16), pi->sram_end);
+
+ }
+ return ret;
+}
+
+static void kv_init_powergate_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->uvd_power_gated = false;
+ pi->vce_power_gated = false;
+ pi->samu_power_gated = false;
+ pi->acp_power_gated = false;
+
+}
+
+static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
+}
+
+#if 0
+static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
+}
+#endif
+
+static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
+}
+
+static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
+}
+
+static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_uvd_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ int ret;
+
+ if (!gate) {
+ if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state)
+ pi->uvd_boot_level = table->count - 1;
+ else
+ pi->uvd_boot_level = 0;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
+ (uint8_t *)&pi->uvd_boot_level,
+ sizeof(u8), pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (!pi->caps_uvd_dpm ||
+ pi->caps_stable_p_state)
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (1 << pi->uvd_boot_level));
+ }
+
+ return kv_enable_uvd_dpm(rdev, !gate);
+}
+
+#if 0
+static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
+{
+ u8 i;
+ struct radeon_vce_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].evclk >= 0) /* XXX */
+ break;
+ }
+
+ return i;
+}
+
+static int kv_update_vce_dpm(struct radeon_device *rdev,
+ struct radeon_ps *radeon_new_state,
+ struct radeon_ps *radeon_current_state)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_vce_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ int ret;
+
+ if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
+ if (pi->caps_stable_p_state)
+ pi->vce_boot_level = table->count - 1;
+ else
+ pi->vce_boot_level = kv_get_vce_boot_level(rdev);
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
+ (u8 *)&pi->vce_boot_level,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (pi->caps_stable_p_state)
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (1 << pi->vce_boot_level));
+
+ kv_enable_vce_dpm(rdev, true);
+ } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
+ kv_enable_vce_dpm(rdev, false);
+ }
+
+ return 0;
+}
+#endif
+
+static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+ int ret;
+
+ if (!gate) {
+ if (pi->caps_stable_p_state)
+ pi->samu_boot_level = table->count - 1;
+ else
+ pi->samu_boot_level = 0;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
+ (u8 *)&pi->samu_boot_level,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (pi->caps_stable_p_state)
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (1 << pi->samu_boot_level));
+ }
+
+ return kv_enable_samu_dpm(rdev, !gate);
+}
+
+static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+ int ret;
+
+ if (!gate) {
+ if (pi->caps_stable_p_state)
+ pi->acp_boot_level = table->count - 1;
+ else
+ pi->acp_boot_level = 0;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
+ (u8 *)&pi->acp_boot_level,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (pi->caps_stable_p_state)
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_ACPDPM_SetEnabledMask,
+ (1 << pi->acp_boot_level));
+ }
+
+ return kv_enable_acp_dpm(rdev, !gate);
+}
+
+void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->uvd_power_gated == gate)
+ return;
+
+ pi->uvd_power_gated = gate;
+
+ if (gate) {
+ if (pi->caps_uvd_pg) {
+ uvd_v1_0_stop(rdev);
+ cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
+ }
+ kv_update_uvd_dpm(rdev, gate);
+ if (pi->caps_uvd_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
+ } else {
+ if (pi->caps_uvd_pg) {
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
+ uvd_v4_2_resume(rdev);
+ uvd_v1_0_start(rdev);
+ cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
+ }
+ kv_update_uvd_dpm(rdev, gate);
+ }
+}
+
+static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->vce_power_gated == gate)
+ return;
+
+ pi->vce_power_gated = gate;
+
+ if (gate) {
+ if (pi->caps_vce_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
+ } else {
+ if (pi->caps_vce_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
+ }
+}
+
+static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->samu_power_gated == gate)
+ return;
+
+ pi->samu_power_gated = gate;
+
+ if (gate) {
+ kv_update_samu_dpm(rdev, true);
+ if (pi->caps_samu_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
+ } else {
+ if (pi->caps_samu_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
+ kv_update_samu_dpm(rdev, false);
+ }
+}
+
+static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->acp_power_gated == gate)
+ return;
+
+ if (rdev->family == CHIP_KABINI)
+ return;
+
+ pi->acp_power_gated = gate;
+
+ if (gate) {
+ kv_update_acp_dpm(rdev, true);
+ if (pi->caps_acp_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
+ } else {
+ if (pi->caps_acp_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
+ kv_update_acp_dpm(rdev, false);
+ }
+}
+
+static void kv_set_valid_clock_range(struct radeon_device *rdev,
+ struct radeon_ps *new_rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+ if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
+ (i == (pi->graphics_dpm_level_count - 1))) {
+ pi->lowest_valid = i;
+ break;
+ }
+ }
+
+ for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) ||
+ (i == 0)) {
+ pi->highest_valid = i;
+ break;
+ }
+ }
+
+ if (pi->lowest_valid > pi->highest_valid) {
+ if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
+ (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
+ pi->highest_valid = pi->lowest_valid;
+ else
+ pi->lowest_valid = pi->highest_valid;
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
+ if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
+ i == (int)(pi->graphics_dpm_level_count - 1)) {
+ pi->lowest_valid = i;
+ break;
+ }
+ }
+
+ for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ if (table->entries[i].sclk_frequency <=
+ new_ps->levels[new_ps->num_levels - 1].sclk ||
+ i == 0) {
+ pi->highest_valid = i;
+ break;
+ }
+ }
+
+ if (pi->lowest_valid > pi->highest_valid) {
+ if ((new_ps->levels[0].sclk -
+ table->entries[pi->highest_valid].sclk_frequency) >
+ (table->entries[pi->lowest_valid].sclk_frequency -
+ new_ps->levels[new_ps->num_levels -1].sclk))
+ pi->highest_valid = pi->lowest_valid;
+ else
+ pi->lowest_valid = pi->highest_valid;
+ }
+ }
+}
+
+static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
+ struct radeon_ps *new_rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret = 0;
+ u8 clk_bypass_cntl;
+
+ if (pi->caps_enable_dfs_bypass) {
+ clk_bypass_cntl = new_ps->need_dfs_bypass ?
+ pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
+ ret = kv_copy_bytes_to_smc(rdev,
+ (pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
+ (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
+ offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
+ &clk_bypass_cntl,
+ sizeof(u8), pi->sram_end);
+ }
+
+ return ret;
+}
+
+static int kv_enable_nb_dpm(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret = 0;
+
+ if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
+ ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
+ if (ret == 0)
+ pi->nb_dpm_enabled = true;
+ }
+
+ return ret;
+}
+
+int kv_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level)
+{
+ int ret;
+
+ if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+ ret = kv_force_dpm_highest(rdev);
+ if (ret)
+ return ret;
+ } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+ ret = kv_force_dpm_lowest(rdev);
+ if (ret)
+ return ret;
+ } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
+ ret = kv_unforce_levels(rdev);
+ if (ret)
+ return ret;
+ }
+
+ rdev->pm.dpm.forced_level = level;
+
+ return 0;
+}
+
+int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+ struct radeon_ps *new_ps = &requested_ps;
+
+ kv_update_requested_ps(rdev, new_ps);
+
+ kv_apply_state_adjust_rules(rdev,
+ &pi->requested_rps,
+ &pi->current_rps);
+
+ return 0;
+}
+
+int kv_dpm_set_power_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+ /*struct radeon_ps *old_ps = &pi->current_rps;*/
+ int ret;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), false);
+
+ if (rdev->family == CHIP_KABINI) {
+ if (pi->enable_dpm) {
+ kv_set_valid_clock_range(rdev, new_ps);
+ kv_update_dfs_bypass_settings(rdev, new_ps);
+ ret = kv_calculate_ds_divider(rdev);
+ if (ret) {
+ DRM_ERROR("kv_calculate_ds_divider failed\n");
+ return ret;
+ }
+ kv_calculate_nbps_level_settings(rdev);
+ kv_calculate_dpm_settings(rdev);
+ kv_force_lowest_valid(rdev);
+ kv_enable_new_levels(rdev);
+ kv_upload_dpm_settings(rdev);
+ kv_program_nbps_index_settings(rdev, new_ps);
+ kv_unforce_levels(rdev);
+ kv_set_enabled_levels(rdev);
+ kv_force_lowest_valid(rdev);
+ kv_unforce_levels(rdev);
+#if 0
+ ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
+ if (ret) {
+ DRM_ERROR("kv_update_vce_dpm failed\n");
+ return ret;
+ }
+#endif
+ kv_update_sclk_t(rdev);
+ }
+ } else {
+ if (pi->enable_dpm) {
+ kv_set_valid_clock_range(rdev, new_ps);
+ kv_update_dfs_bypass_settings(rdev, new_ps);
+ ret = kv_calculate_ds_divider(rdev);
+ if (ret) {
+ DRM_ERROR("kv_calculate_ds_divider failed\n");
+ return ret;
+ }
+ kv_calculate_nbps_level_settings(rdev);
+ kv_calculate_dpm_settings(rdev);
+ kv_freeze_sclk_dpm(rdev, true);
+ kv_upload_dpm_settings(rdev);
+ kv_program_nbps_index_settings(rdev, new_ps);
+ kv_freeze_sclk_dpm(rdev, false);
+ kv_set_enabled_levels(rdev);
+#if 0
+ ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
+ if (ret) {
+ DRM_ERROR("kv_update_vce_dpm failed\n");
+ return ret;
+ }
+#endif
+ kv_update_sclk_t(rdev);
+ kv_enable_nb_dpm(rdev);
+ }
+ }
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), true);
+
+ rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
+ return 0;
+}
+
+void kv_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+
+ kv_update_current_ps(rdev, new_ps);
+}
+
+void kv_dpm_setup_asic(struct radeon_device *rdev)
+{
+ sumo_take_smu_control(rdev, true);
+ kv_init_powergate_state(rdev);
+ kv_init_sclk_t(rdev);
+}
+
+void kv_dpm_reset_asic(struct radeon_device *rdev)
+{
+ kv_force_lowest_valid(rdev);
+ kv_init_graphics_levels(rdev);
+ kv_program_bootup_state(rdev);
+ kv_upload_dpm_settings(rdev);
+ kv_force_lowest_valid(rdev);
+ kv_unforce_levels(rdev);
+}
+
+//XXX use sumo_dpm_display_configuration_changed
+
+static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
+ struct radeon_clock_and_voltage_limits *table)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
+ int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
+ table->sclk =
+ pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
+ table->vddc =
+ kv_convert_2bit_index_to_voltage(rdev,
+ pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
+ }
+
+ table->mclk = pi->sys_info.nbp_memory_clock[0];
+}
+
+static void kv_patch_voltage_values(struct radeon_device *rdev)
+{
+ int i;
+ struct radeon_uvd_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+
+ if (table->count) {
+ for (i = 0; i < table->count; i++)
+ table->entries[i].v =
+ kv_convert_8bit_index_to_voltage(rdev,
+ table->entries[i].v);
+ }
+
+}
+
+static void kv_construct_boot_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
+ pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
+ pi->boot_pl.ds_divider_index = 0;
+ pi->boot_pl.ss_divider_index = 0;
+ pi->boot_pl.allow_gnb_slow = 1;
+ pi->boot_pl.force_nbp_state = 0;
+ pi->boot_pl.display_wm = 0;
+ pi->boot_pl.vce_wm = 0;
+}
+
+static int kv_force_dpm_highest(struct radeon_device *rdev)
+{
+ int ret;
+ u32 enable_mask, i;
+
+ ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
+ if (ret)
+ return ret;
+
+ for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) {
+ if (enable_mask & (1 << i))
+ break;
+ }
+
+ return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+}
+
+static int kv_force_dpm_lowest(struct radeon_device *rdev)
+{
+ int ret;
+ u32 enable_mask, i;
+
+ ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
+ if (enable_mask & (1 << i))
+ break;
+ }
+
+ return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+}
+
+static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
+ u32 sclk, u32 min_sclk_in_sr)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ u32 temp;
+ u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
+ min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
+
+ if (sclk < min)
+ return 0;
+
+ if (!pi->caps_sclk_ds)
+ return 0;
+
+ for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) {
+ temp = sclk / sumo_get_sleep_divider_from_id(i);
+ if ((temp >= min) || (i == 0))
+ break;
+ }
+
+ return (u8)i;
+}
+
+static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ int i;
+
+ if (table && table->count) {
+ for (i = table->count - 1; i >= 0; i--) {
+ if (pi->high_voltage_t &&
+ (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
+ pi->high_voltage_t)) {
+ *limit = i;
+ return 0;
+ }
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
+ if (pi->high_voltage_t &&
+ (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
+ pi->high_voltage_t)) {
+ *limit = i;
+ return 0;
+ }
+ }
+ }
+
+ *limit = 0;
+ return 0;
+}
+
+static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
+ struct radeon_ps *new_rps,
+ struct radeon_ps *old_rps)
+{
+ struct kv_ps *ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 min_sclk = 10000; /* ??? */
+ u32 sclk, mclk = 0;
+ int i, limit;
+ bool force_high;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 stable_p_state_sclk = 0;
+ struct radeon_clock_and_voltage_limits *max_limits =
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+ mclk = max_limits->mclk;
+ sclk = min_sclk;
+
+ if (pi->caps_stable_p_state) {
+ stable_p_state_sclk = (max_limits->sclk * 75) / 100;
+
+ for (i = table->count - 1; i >= 0; i++) {
+ if (stable_p_state_sclk >= table->entries[i].clk) {
+ stable_p_state_sclk = table->entries[i].clk;
+ break;
+ }
+ }
+
+ if (i > 0)
+ stable_p_state_sclk = table->entries[0].clk;
+
+ sclk = stable_p_state_sclk;
+ }
+
+ ps->need_dfs_bypass = true;
+
+ for (i = 0; i < ps->num_levels; i++) {
+ if (ps->levels[i].sclk < sclk)
+ ps->levels[i].sclk = sclk;
+ }
+
+ if (table && table->count) {
+ for (i = 0; i < ps->num_levels; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t <
+ kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
+ kv_get_high_voltage_limit(rdev, &limit);
+ ps->levels[i].sclk = table->entries[limit].clk;
+ }
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ for (i = 0; i < ps->num_levels; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t <
+ kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
+ kv_get_high_voltage_limit(rdev, &limit);
+ ps->levels[i].sclk = table->entries[limit].sclk_frequency;
+ }
+ }
+ }
+
+ if (pi->caps_stable_p_state) {
+ for (i = 0; i < ps->num_levels; i++) {
+ ps->levels[i].sclk = stable_p_state_sclk;
+ }
+ }
+
+ pi->video_start = new_rps->dclk || new_rps->vclk;
+
+ if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+ ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
+ pi->battery_state = true;
+ else
+ pi->battery_state = false;
+
+ if (rdev->family == CHIP_KABINI) {
+ ps->dpm0_pg_nb_ps_lo = 0x1;
+ ps->dpm0_pg_nb_ps_hi = 0x0;
+ ps->dpmx_nb_ps_lo = 0x1;
+ ps->dpmx_nb_ps_hi = 0x0;
+ } else {
+ ps->dpm0_pg_nb_ps_lo = 0x1;
+ ps->dpm0_pg_nb_ps_hi = 0x0;
+ ps->dpmx_nb_ps_lo = 0x2;
+ ps->dpmx_nb_ps_hi = 0x1;
+
+ if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
+ force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
+ pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
+ pi->disable_nb_ps3_in_battery;
+ ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
+ ps->dpm0_pg_nb_ps_hi = 0x2;
+ ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
+ ps->dpmx_nb_ps_hi = 0x2;
+ }
+ }
+}
+
+static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
+ u32 index, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
+}
+
+static int kv_calculate_ds_divider(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 sclk_in_sr = 10000; /* ??? */
+ u32 i;
+
+ if (pi->lowest_valid > pi->highest_valid)
+ return -EINVAL;
+
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+ pi->graphics_level[i].DeepSleepDivId =
+ kv_get_sleep_divider_id_from_clock(rdev,
+ be32_to_cpu(pi->graphics_level[i].SclkFrequency),
+ sclk_in_sr);
+ }
+ return 0;
+}
+
+static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ bool force_high;
+ struct radeon_clock_and_voltage_limits *max_limits =
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ u32 mclk = max_limits->mclk;
+
+ if (pi->lowest_valid > pi->highest_valid)
+ return -EINVAL;
+
+ if (rdev->family == CHIP_KABINI) {
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+ pi->graphics_level[i].GnbSlow = 1;
+ pi->graphics_level[i].ForceNbPs1 = 0;
+ pi->graphics_level[i].UpH = 0;
+ }
+
+ if (!pi->sys_info.nb_dpm_enable)
+ return 0;
+
+ force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
+ (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
+
+ if (force_high) {
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+ pi->graphics_level[i].GnbSlow = 0;
+ } else {
+ if (pi->battery_state)
+ pi->graphics_level[0].ForceNbPs1 = 1;
+
+ pi->graphics_level[1].GnbSlow = 0;
+ pi->graphics_level[2].GnbSlow = 0;
+ pi->graphics_level[3].GnbSlow = 0;
+ pi->graphics_level[4].GnbSlow = 0;
+ }
+ } else {
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+ pi->graphics_level[i].GnbSlow = 1;
+ pi->graphics_level[i].ForceNbPs1 = 0;
+ pi->graphics_level[i].UpH = 0;
+ }
+
+ if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
+ pi->graphics_level[pi->lowest_valid].UpH = 0x28;
+ pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
+ if (pi->lowest_valid != pi->highest_valid)
+ pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
+ }
+ }
+ return 0;
+}
+
+static int kv_calculate_dpm_settings(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+
+ if (pi->lowest_valid > pi->highest_valid)
+ return -EINVAL;
+
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+ pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
+
+ return 0;
+}
+
+static void kv_init_graphics_levels(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ u32 vid_2bit;
+
+ pi->graphics_dpm_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t <
+ kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
+ break;
+
+ kv_set_divider_value(rdev, i, table->entries[i].clk);
+ vid_2bit = sumo_convert_vid7_to_vid2(rdev,
+ &pi->sys_info.vid_mapping_table,
+ table->entries[i].v);
+ kv_set_vid(rdev, i, vid_2bit);
+ kv_set_at(rdev, i, pi->at[i]);
+ kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
+ pi->graphics_dpm_level_count++;
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ pi->graphics_dpm_level_count = 0;
+ for (i = 0; i < table->num_max_dpm_entries; i++) {
+ if (pi->high_voltage_t &&
+ pi->high_voltage_t <
+ kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
+ break;
+
+ kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
+ kv_set_vid(rdev, i, table->entries[i].vid_2bit);
+ kv_set_at(rdev, i, pi->at[i]);
+ kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
+ pi->graphics_dpm_level_count++;
+ }
+ }
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
+ kv_dpm_power_level_enable(rdev, i, false);
+}
+
+static void kv_enable_new_levels(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
+ if (i >= pi->lowest_valid && i <= pi->highest_valid)
+ kv_dpm_power_level_enable(rdev, i, true);
+ }
+}
+
+static int kv_set_enabled_levels(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i, new_mask = 0;
+
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+ new_mask |= (1 << i);
+
+ return kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ new_mask);
+}
+
+static void kv_program_nbps_index_settings(struct radeon_device *rdev,
+ struct radeon_ps *new_rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 nbdpmconfig1;
+
+ if (rdev->family == CHIP_KABINI)
+ return;
+
+ if (pi->sys_info.nb_dpm_enable) {
+ nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
+ nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
+ DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
+ nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
+ Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
+ DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
+ DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
+ WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
+ }
+}
+
+static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp)
+{
+ int low_temp = 0 * 1000;
+ int high_temp = 255 * 1000;
+ u32 tmp;
+
+ if (low_temp < min_temp)
+ low_temp = min_temp;
+ if (high_temp > max_temp)
+ high_temp = max_temp;
+ if (high_temp < low_temp) {
+ DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ return -EINVAL;
+ }
+
+ tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
+ tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
+ tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
+ DIG_THERM_INTL(49 + (low_temp / 1000)));
+ WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
+
+ rdev->pm.dpm.thermal.min_temp = low_temp;
+ rdev->pm.dpm.thermal.max_temp = high_temp;
+
+ return 0;
+}
+
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+};
+
+static int kv_parse_sys_info_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ union igp_info *igp_info;
+ u8 frev, crev;
+ u16 data_offset;
+ int i;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ if (crev != 8) {
+ DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+ return -EINVAL;
+ }
+ pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
+ pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
+ pi->sys_info.bootup_nb_voltage_index =
+ le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
+ if (igp_info->info_8.ucHtcTmpLmt == 0)
+ pi->sys_info.htc_tmp_lmt = 203;
+ else
+ pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
+ if (igp_info->info_8.ucHtcHystLmt == 0)
+ pi->sys_info.htc_hyst_lmt = 5;
+ else
+ pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
+ if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
+ DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
+ }
+
+ if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
+ pi->sys_info.nb_dpm_enable = true;
+ else
+ pi->sys_info.nb_dpm_enable = false;
+
+ for (i = 0; i < KV_NUM_NBPSTATES; i++) {
+ pi->sys_info.nbp_memory_clock[i] =
+ le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
+ pi->sys_info.nbp_n_clock[i] =
+ le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
+ }
+ if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
+ SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
+ pi->caps_enable_dfs_bypass = true;
+
+ sumo_construct_sclk_voltage_mapping_table(rdev,
+ &pi->sys_info.sclk_voltage_mapping_table,
+ igp_info->info_8.sAvail_SCLK);
+
+ sumo_construct_vid_mapping_table(rdev,
+ &pi->sys_info.vid_mapping_table,
+ igp_info->info_8.sAvail_SCLK);
+
+ kv_construct_max_power_limits_table(rdev,
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
+ }
+ return 0;
+}
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void kv_patch_boot_state(struct radeon_device *rdev,
+ struct kv_ps *ps)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ ps->num_levels = 1;
+ ps->levels[0] = pi->boot_pl;
+}
+
+static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
+ struct radeon_ps *rps,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+ u8 table_rev)
+{
+ struct kv_ps *ps = kv_get_ps(rps);
+
+ rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ rps->class = le16_to_cpu(non_clock_info->usClassification);
+ rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+ if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+ rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+ rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+ } else {
+ rps->vclk = 0;
+ rps->dclk = 0;
+ }
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ rdev->pm.dpm.boot_ps = rps;
+ kv_patch_boot_state(rdev, ps);
+ }
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
+ struct radeon_ps *rps, int index,
+ union pplib_clock_info *clock_info)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct kv_ps *ps = kv_get_ps(rps);
+ struct kv_pl *pl = &ps->levels[index];
+ u32 sclk;
+
+ sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+ sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+ pl->sclk = sclk;
+ pl->vddc_index = clock_info->sumo.vddcIndex;
+
+ ps->num_levels = index + 1;
+
+ if (pi->caps_sclk_ds) {
+ pl->ds_divider_index = 5;
+ pl->ss_divider_index = 5;
+ }
+}
+
+static int kv_parse_power_table(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, k, non_clock_array_index, clock_array_index;
+ union pplib_clock_info *clock_info;
+ struct _StateArray *state_array;
+ struct _ClockInfoArray *clock_info_array;
+ struct _NonClockInfoArray *non_clock_info_array;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u8 *power_state_offset;
+ struct kv_ps *ps;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ state_array = (struct _StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset));
+ clock_info_array = (struct _ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+ non_clock_info_array = (struct _NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+ rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!rdev->pm.dpm.ps)
+ return -ENOMEM;
+ power_state_offset = (u8 *)state_array->states;
+ rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
+ rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
+ rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ if (!rdev->pm.power_state[i].clock_info)
+ return -EINVAL;
+ ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(rdev->pm.dpm.ps);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.ps[i].ps_priv = ps;
+ k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = idx[j];
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
+ break;
+ clock_info = (union pplib_clock_info *)
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
+ kv_parse_pplib_clock_info(rdev,
+ &rdev->pm.dpm.ps[i], k,
+ clock_info);
+ k++;
+ }
+ kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+ non_clock_info,
+ non_clock_info_array->ucEntrySize);
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+ return 0;
+}
+
+int kv_dpm_init(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi;
+ int ret, i;
+
+ pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
+ if (pi == NULL)
+ return -ENOMEM;
+ rdev->pm.dpm.priv = pi;
+
+ ret = r600_parse_extended_power_table(rdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
+ pi->at[i] = TRINITY_AT_DFLT;
+
+ pi->sram_end = SMC_RAM_END;
+
+ if (rdev->family == CHIP_KABINI)
+ pi->high_voltage_t = 4001;
+
+ pi->enable_nb_dpm = true;
+
+ pi->caps_power_containment = true;
+ pi->caps_cac = true;
+ pi->enable_didt = false;
+ if (pi->enable_didt) {
+ pi->caps_sq_ramping = true;
+ pi->caps_db_ramping = true;
+ pi->caps_td_ramping = true;
+ pi->caps_tcp_ramping = true;
+ }
+
+ pi->caps_sclk_ds = true;
+ pi->enable_auto_thermal_throttling = true;
+ pi->disable_nb_ps3_in_battery = false;
+ pi->bapm_enable = true;
+ pi->voltage_drop_t = 0;
+ pi->caps_sclk_throttle_low_notification = false;
+ pi->caps_fps = false; /* true? */
+ pi->caps_uvd_pg = true;
+ pi->caps_uvd_dpm = true;
+ pi->caps_vce_pg = false;
+ pi->caps_samu_pg = false;
+ pi->caps_acp_pg = false;
+ pi->caps_stable_p_state = false;
+
+ ret = kv_parse_sys_info_table(rdev);
+ if (ret)
+ return ret;
+
+ kv_patch_voltage_values(rdev);
+ kv_construct_boot_state(rdev);
+
+ ret = kv_parse_power_table(rdev);
+ if (ret)
+ return ret;
+
+ pi->enable_dpm = true;
+
+ return 0;
+}
+
+void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 current_index =
+ (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
+ CURR_SCLK_INDEX_SHIFT;
+ u32 sclk, tmp;
+ u16 vddc;
+
+ if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
+ seq_printf(m, "invalid dpm profile %d\n", current_index);
+ } else {
+ sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
+ tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
+ SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
+ vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
+ seq_printf(m, "power level %d sclk: %u vddc: %u\n",
+ current_index, sclk, vddc);
+ }
+}
+
+void kv_dpm_print_power_state(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ int i;
+ struct kv_ps *ps = kv_get_ps(rps);
+
+ r600_dpm_print_class_info(rps->class, rps->class2);
+ r600_dpm_print_cap_info(rps->caps);
+ printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ for (i = 0; i < ps->num_levels; i++) {
+ struct kv_pl *pl = &ps->levels[i];
+ printk("\t\tpower level %d sclk: %u vddc: %u\n",
+ i, pl->sclk,
+ kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
+ }
+ r600_dpm_print_ps_status(rdev, rps);
+}
+
+void kv_dpm_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+ kfree(rdev->pm.dpm.ps[i].ps_priv);
+ }
+ kfree(rdev->pm.dpm.ps);
+ kfree(rdev->pm.dpm.priv);
+ r600_free_extended_power_table(rdev);
+}
+
+void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+
+}
+
+u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
+
+ if (low)
+ return requested_state->levels[0].sclk;
+ else
+ return requested_state->levels[requested_state->num_levels - 1].sclk;
+}
+
+u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ return pi->sys_info.bootup_uma_clk;
+}
+
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h
new file mode 100644
index 000000000000..32bb079572d7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_dpm.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __KV_DPM_H__
+#define __KV_DPM_H__
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */
+#include "smu7_fusion.h"
+#include "trinity_dpm.h"
+#include "ppsmc.h"
+
+#define KV_NUM_NBPSTATES 4
+
+enum kv_pt_config_reg_type {
+ KV_CONFIGREG_MMR = 0,
+ KV_CONFIGREG_SMC_IND,
+ KV_CONFIGREG_DIDT_IND,
+ KV_CONFIGREG_CACHE,
+ KV_CONFIGREG_MAX
+};
+
+struct kv_pt_config_reg {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 value;
+ enum kv_pt_config_reg_type type;
+};
+
+struct kv_lcac_config_values {
+ u32 block_id;
+ u32 signal_id;
+ u32 t;
+};
+
+struct kv_lcac_config_reg {
+ u32 cntl;
+ u32 block_mask;
+ u32 block_shift;
+ u32 signal_mask;
+ u32 signal_shift;
+ u32 t_mask;
+ u32 t_shift;
+ u32 enable_mask;
+ u32 enable_shift;
+};
+
+struct kv_pl {
+ u32 sclk;
+ u8 vddc_index;
+ u8 ds_divider_index;
+ u8 ss_divider_index;
+ u8 allow_gnb_slow;
+ u8 force_nbp_state;
+ u8 display_wm;
+ u8 vce_wm;
+};
+
+struct kv_ps {
+ struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
+ u32 num_levels;
+ bool need_dfs_bypass;
+ u8 dpm0_pg_nb_ps_lo;
+ u8 dpm0_pg_nb_ps_hi;
+ u8 dpmx_nb_ps_lo;
+ u8 dpmx_nb_ps_hi;
+};
+
+struct kv_sys_info {
+ u32 bootup_uma_clk;
+ u32 bootup_sclk;
+ u32 dentist_vco_freq;
+ u32 nb_dpm_enable;
+ u32 nbp_memory_clock[KV_NUM_NBPSTATES];
+ u32 nbp_n_clock[KV_NUM_NBPSTATES];
+ u16 bootup_nb_voltage_index;
+ u8 htc_tmp_lmt;
+ u8 htc_hyst_lmt;
+ struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
+ struct sumo_vid_mapping_table vid_mapping_table;
+ u32 uma_channel_number;
+};
+
+struct kv_power_info {
+ u32 at[SUMO_MAX_HARDWARE_POWERLEVELS];
+ u32 voltage_drop_t;
+ struct kv_sys_info sys_info;
+ struct kv_pl boot_pl;
+ bool enable_nb_ps_policy;
+ bool disable_nb_ps3_in_battery;
+ bool video_start;
+ bool battery_state;
+ u32 lowest_valid;
+ u32 highest_valid;
+ u16 high_voltage_t;
+ bool cac_enabled;
+ bool bapm_enable;
+ /* smc offsets */
+ u32 sram_end;
+ u32 dpm_table_start;
+ u32 soft_regs_start;
+ /* dpm SMU tables */
+ u8 graphics_dpm_level_count;
+ u8 uvd_level_count;
+ u8 vce_level_count;
+ u8 acp_level_count;
+ u8 samu_level_count;
+ u16 fps_high_t;
+ SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE];
+ SMU7_Fusion_ACPILevel acpi_level;
+ SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD];
+ SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE];
+ SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP];
+ SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU];
+ u8 uvd_boot_level;
+ u8 vce_boot_level;
+ u8 acp_boot_level;
+ u8 samu_boot_level;
+ u8 uvd_interval;
+ u8 vce_interval;
+ u8 acp_interval;
+ u8 samu_interval;
+ u8 graphics_boot_level;
+ u8 graphics_interval;
+ u8 graphics_therm_throttle_enable;
+ u8 graphics_voltage_change_enable;
+ u8 graphics_clk_slow_enable;
+ u8 graphics_clk_slow_divider;
+ u8 fps_low_t;
+ u32 low_sclk_interrupt_t;
+ bool uvd_power_gated;
+ bool vce_power_gated;
+ bool acp_power_gated;
+ bool samu_power_gated;
+ bool nb_dpm_enabled;
+ /* flags */
+ bool enable_didt;
+ bool enable_dpm;
+ bool enable_auto_thermal_throttling;
+ bool enable_nb_dpm;
+ /* caps */
+ bool caps_cac;
+ bool caps_power_containment;
+ bool caps_sq_ramping;
+ bool caps_db_ramping;
+ bool caps_td_ramping;
+ bool caps_tcp_ramping;
+ bool caps_sclk_throttle_low_notification;
+ bool caps_fps;
+ bool caps_uvd_dpm;
+ bool caps_uvd_pg;
+ bool caps_vce_pg;
+ bool caps_samu_pg;
+ bool caps_acp_pg;
+ bool caps_stable_p_state;
+ bool caps_enable_dfs_bypass;
+ bool caps_sclk_ds;
+ struct radeon_ps current_rps;
+ struct kv_ps current_ps;
+ struct radeon_ps requested_rps;
+ struct kv_ps requested_ps;
+};
+
+
+/* kv_smc.c */
+int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id);
+int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask);
+int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 parameter);
+int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
+ u32 *value, u32 limit);
+int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
+int kv_copy_bytes_to_smc(struct radeon_device *rdev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
new file mode 100644
index 000000000000..34a226d7e34a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include "drmP.h"
+#include "radeon.h"
+#include "cikd.h"
+#include "kv_dpm.h"
+
+int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id)
+{
+ u32 i;
+ u32 tmp = 0;
+
+ WREG32(SMC_MESSAGE_0, id & SMC_MSG_MASK);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if ((RREG32(SMC_RESP_0) & SMC_RESP_MASK) != 0)
+ break;
+ udelay(1);
+ }
+ tmp = RREG32(SMC_RESP_0) & SMC_RESP_MASK;
+
+ if (tmp != 1) {
+ if (tmp == 0xFF)
+ return -EINVAL;
+ else if (tmp == 0xFE)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask)
+{
+ int ret;
+
+ ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
+
+ if (ret == 0)
+ *enable_mask = RREG32_SMC(SMC_SYSCON_MSG_ARG_0);
+
+ return ret;
+}
+
+int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 parameter)
+{
+
+ WREG32(SMC_MSG_ARG_0, parameter);
+
+ return kv_notify_message_to_smu(rdev, msg);
+}
+
+static int kv_set_smc_sram_address(struct radeon_device *rdev,
+ u32 smc_address, u32 limit)
+{
+ if (smc_address & 3)
+ return -EINVAL;
+ if ((smc_address + 3) > limit)
+ return -EINVAL;
+
+ WREG32(SMC_IND_INDEX_0, smc_address);
+ WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+ return 0;
+}
+
+int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
+ u32 *value, u32 limit)
+{
+ int ret;
+
+ ret = kv_set_smc_sram_address(rdev, smc_address, limit);
+ if (ret)
+ return ret;
+
+ *value = RREG32(SMC_IND_DATA_0);
+ return 0;
+}
+
+int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Enable);
+ else
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
+}
+
+int kv_copy_bytes_to_smc(struct radeon_device *rdev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit)
+{
+ int ret;
+ u32 data, original_data, addr, extra_shift, t_byte, count, mask;
+
+ if ((smc_start_address + byte_count) > limit)
+ return -EINVAL;
+
+ addr = smc_start_address;
+ t_byte = addr & 3;
+
+ /* RMW for the initial bytes */
+ if (t_byte != 0) {
+ addr -= t_byte;
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ original_data = RREG32(SMC_IND_DATA_0);
+
+ data = 0;
+ mask = 0;
+ count = 4;
+ while (count > 0) {
+ if (t_byte > 0) {
+ mask = (mask << 8) | 0xff;
+ t_byte--;
+ } else if (byte_count > 0) {
+ data = (data << 8) + *src++;
+ byte_count--;
+ mask <<= 8;
+ } else {
+ data <<= 8;
+ mask = (mask << 8) | 0xff;
+ }
+ count--;
+ }
+
+ data |= original_data & mask;
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ addr += 4;
+ }
+
+ while (byte_count >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ /* RMW for the final bytes */
+ if (byte_count > 0) {
+ data = 0;
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ original_data= RREG32(SMC_IND_DATA_0);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ /* SMC address space is BE */
+ data = (data << 8) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+ }
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 5b6e47765656..93c1f9ef5da9 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -35,7 +35,7 @@
#include "radeon_ucode.h"
#include "clearstate_cayman.h"
-static u32 tn_rlc_save_restore_register_list[] =
+static const u32 tn_rlc_save_restore_register_list[] =
{
0x98fc,
0x98f0,
@@ -160,7 +160,6 @@ static u32 tn_rlc_save_restore_register_list[] =
0x9830,
0x802c,
};
-static u32 tn_rlc_save_restore_register_list_size = ARRAY_SIZE(tn_rlc_save_restore_register_list);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
@@ -175,6 +174,11 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void evergreen_program_aspm(struct radeon_device *rdev);
extern void sumo_rlc_fini(struct radeon_device *rdev);
extern int sumo_rlc_init(struct radeon_device *rdev);
+extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -794,9 +798,13 @@ int ni_init_microcode(struct radeon_device *rdev)
if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->smc_fw->size != smc_req_size) {
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"ni_mc: Bogus length %zu in firmware \"%s\"\n",
rdev->mc_fw->size, fw_name);
@@ -1370,23 +1378,6 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, 10); /* poll interval */
}
-void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- uint64_t addr = semaphore->gpu_addr;
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
- radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
- radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
- radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
-}
-
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
@@ -1609,186 +1600,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
return 0;
}
-/*
- * DMA
- * Starting with R600, the GPU has an asynchronous
- * DMA engine. The programming model is very similar
- * to the 3D engine (ring buffer, IBs, etc.), but the
- * DMA controller has it's own packet format that is
- * different form the PM4 format used by the 3D engine.
- * It supports copying data, writing embedded data,
- * solid fills, and a number of other things. It also
- * has support for tiling/detiling of buffers.
- * Cayman and newer support two asynchronous DMA engines.
- */
-/**
- * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- *
- * Schedule an IB in the DMA ring (cayman-SI).
- */
-void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
- struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
-
- if (rdev->wb.enabled) {
- u32 next_rptr = ring->wptr + 4;
- while ((next_rptr & 7) != 5)
- next_rptr++;
- next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
- radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
- radeon_ring_write(ring, next_rptr);
- }
-
- /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
- * Pad as necessary with NOPs.
- */
- while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
- radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
- radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
-
-}
-
-/**
- * cayman_dma_stop - stop the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engines (cayman-SI).
- */
-void cayman_dma_stop(struct radeon_device *rdev)
-{
- u32 rb_cntl;
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-
- /* dma0 */
- rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
-
- /* dma1 */
- rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
-
- rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
-}
-
-/**
- * cayman_dma_resume - setup and start the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Set up the DMA ring buffers and enable them. (cayman-SI).
- * Returns 0 for success, error for failure.
- */
-int cayman_dma_resume(struct radeon_device *rdev)
-{
- struct radeon_ring *ring;
- u32 rb_cntl, dma_cntl, ib_cntl;
- u32 rb_bufsz;
- u32 reg_offset, wb_offset;
- int i, r;
-
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
-
- for (i = 0; i < 2; i++) {
- if (i == 0) {
- ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- reg_offset = DMA0_REGISTER_OFFSET;
- wb_offset = R600_WB_DMA_RPTR_OFFSET;
- } else {
- ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
- reg_offset = DMA1_REGISTER_OFFSET;
- wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
- }
-
- WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
- WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
-
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = rb_bufsz << 1;
-#ifdef __BIG_ENDIAN
- rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
-#endif
- WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(DMA_RB_RPTR + reg_offset, 0);
- WREG32(DMA_RB_WPTR + reg_offset, 0);
-
- /* set the wb address whether it's enabled or not */
- WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
- upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
- WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
- ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
-
- if (rdev->wb.enabled)
- rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
-
- WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
-
- /* enable DMA IBs */
- ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
-#ifdef __BIG_ENDIAN
- ib_cntl |= DMA_IB_SWAP_ENABLE;
-#endif
- WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
-
- dma_cntl = RREG32(DMA_CNTL + reg_offset);
- dma_cntl &= ~CTXEMPTY_INT_ENABLE;
- WREG32(DMA_CNTL + reg_offset, dma_cntl);
-
- ring->wptr = 0;
- WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
-
- ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
-
- WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
-
- ring->ready = true;
-
- r = radeon_ring_test(rdev, ring->idx, ring);
- if (r) {
- ring->ready = false;
- return r;
- }
- }
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
- return 0;
-}
-
-/**
- * cayman_dma_fini - tear down the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engines and free the rings (cayman-SI).
- */
-void cayman_dma_fini(struct radeon_device *rdev)
-{
- cayman_dma_stop(rdev);
- radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
- radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
-}
-
-static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -2041,34 +1853,6 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * cayman_dma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
- u32 mask;
-
- if (ring->idx == R600_RING_TYPE_DMA_INDEX)
- mask = RADEON_RESET_DMA;
- else
- mask = RADEON_RESET_DMA1;
-
- if (!(reset_mask & mask)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
static int cayman_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -2079,6 +1863,13 @@ static int cayman_startup(struct radeon_device *rdev)
/* enable aspm */
evergreen_program_aspm(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ evergreen_mc_program(rdev);
+
if (rdev->flags & RADEON_IS_IGP) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = ni_init_microcode(rdev);
@@ -2103,27 +1894,16 @@ static int cayman_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
- evergreen_mc_program(rdev);
r = cayman_pcie_gart_enable(rdev);
if (r)
return r;
cayman_gpu_init(rdev);
- r = evergreen_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
-
/* allocate rlc buffers */
if (rdev->flags & RADEON_IS_IGP) {
rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
- rdev->rlc.reg_list_size = tn_rlc_save_restore_register_list_size;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
rdev->rlc.cs_data = cayman_cs_data;
r = sumo_rlc_init(rdev);
if (r) {
@@ -2143,7 +1923,7 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
- r = rv770_uvd_resume(rdev);
+ r = uvd_v2_2_resume(rdev);
if (!r) {
r = radeon_fence_driver_start_ring(rdev,
R600_RING_TYPE_UVD_INDEX);
@@ -2194,7 +1974,7 @@ static int cayman_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
@@ -2202,7 +1982,7 @@ static int cayman_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -2210,7 +1990,7 @@ static int cayman_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -2227,12 +2007,11 @@ static int cayman_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
@@ -2249,9 +2028,15 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r)
- return r;
+ if (ASIC_IS_DCE6(rdev)) {
+ r = dce6_audio_init(rdev);
+ if (r)
+ return r;
+ } else {
+ r = r600_audio_init(rdev);
+ if (r)
+ return r;
+ }
return 0;
}
@@ -2282,11 +2067,14 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
- r600_audio_fini(rdev);
+ if (ASIC_IS_DCE6(rdev))
+ dce6_audio_fini(rdev);
+ else
+ r600_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
- r600_uvd_rbc_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
@@ -2408,7 +2196,6 @@ int cayman_init(struct radeon_device *rdev)
void cayman_fini(struct radeon_device *rdev)
{
- r600_blit_fini(rdev);
cayman_cp_fini(rdev);
cayman_dma_fini(rdev);
r600_irq_fini(rdev);
@@ -2418,6 +2205,7 @@ void cayman_fini(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -2678,61 +2466,7 @@ void cayman_vm_set_page(struct radeon_device *rdev,
}
}
} else {
- if ((flags & RADEON_VM_PAGE_SYSTEM) ||
- (count == 1)) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
- }
- }
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+ cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
}
}
@@ -2766,26 +2500,3 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
-
-void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
-{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
-
- /* flush hdp cache */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
- radeon_ring_write(ring, 1);
-
- /* bits 0-7 are the VM contexts0-7 */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
- radeon_ring_write(ring, 1 << vm->id);
-}
-
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
new file mode 100644
index 000000000000..dd6e9688fbef
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "nid.h"
+
+u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl;
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ /* dma0 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+ /* dma1 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ u32 reg_offset, wb_offset;
+ int i, r;
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ reg_offset = DMA0_REGISTER_OFFSET;
+ wb_offset = R600_WB_DMA_RPTR_OFFSET;
+ } else {
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ reg_offset = DMA1_REGISTER_OFFSET;
+ wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+ }
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR + reg_offset, 0);
+ WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+ upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+ ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL + reg_offset);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
+
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, ring->idx, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+ cayman_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+ radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * cayman_dma_vm_set_page - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ * @r600_flags: hw access flags
+ *
+ * Update the page tables using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ uint64_t value;
+ unsigned ndw;
+
+ if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+}
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 559cf24d51af..f7b625c9e0e9 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -769,7 +769,8 @@ bool ni_dpm_vblank_too_short(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
- u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+ /* we never hit the non-gddr5 limit so disable it */
+ u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
if (vblank_time < switch_limit)
return true;
@@ -1054,10 +1055,6 @@ static int ni_restrict_performance_levels_before_switch(struct radeon_device *rd
int ni_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
- struct ni_ps *ps = ni_get_ps(rps);
- u32 levels;
-
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
@@ -1068,8 +1065,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev,
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
- levels = ps->performance_level_count - 1;
- if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+ if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
@@ -4042,6 +4038,7 @@ static int ni_parse_power_table(struct radeon_device *rdev)
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
if (power_info->pplib.ucStateEntrySize - 1) {
+ u8 *idx;
ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
@@ -4051,12 +4048,12 @@ static int ni_parse_power_table(struct radeon_device *rdev)
ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info,
power_info->pplib.ucNonClockSize);
+ idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
clock_info = (union pplib_clock_info *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
- (power_state->v1.ucClockStateIndices[j] *
- power_info->pplib.ucClockInfoSize));
+ (idx[j] * power_info->pplib.ucClockInfoSize));
ni_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], j,
clock_info);
@@ -4072,9 +4069,6 @@ int ni_dpm_init(struct radeon_device *rdev)
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
struct ni_power_info *ni_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- u16 data_offset, size;
- u8 frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -4167,16 +4161,7 @@ int ni_dpm_init(struct radeon_device *rdev)
eg_pi->vddci_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -4193,8 +4178,7 @@ int ni_dpm_init(struct radeon_device *rdev)
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
@@ -4288,6 +4272,12 @@ int ni_dpm_init(struct radeon_device *rdev)
ni_pi->use_power_boost_limit = true;
+ /* make sure dc limits are valid */
+ if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index b5564a3645d2..682842804bce 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -99,11 +99,68 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
+/* CI/KV/KB */
+#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
+#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
+#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
+#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
+#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
+#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
+#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
+#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
+#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
+#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
+#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
+#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
+#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
+#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
+#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
+#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
+#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
+#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
+#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
+#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
+#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
+#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
+#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
+#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
+#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
+#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
+#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
+#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
+#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
+#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
+#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
+#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
+#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
+#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
+#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
+#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
+#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
+#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
+#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
+#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
+#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
+#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
+#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
+#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
+#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
+#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
+#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
+#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
+#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
+
+#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
+#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
+
/* TN */
#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
+#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109)
+#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e)
+#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
new file mode 100644
index 000000000000..da43ab328833
--- /dev/null
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -0,0 +1,682 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _PPTABLE_H
+#define _PPTABLE_H
+
+#pragma pack(push, 1)
+
+typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+
+{
+ UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
+ UCHAR ucI2cLine; // as interpreted by DAL I2C
+ UCHAR ucI2cAddress;
+ UCHAR ucFanParameters; // Fan Control Parameters.
+ UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
+ UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
+ UCHAR ucReserved; // ----
+ UCHAR ucFlags; // to be defined
+} ATOM_PPLIB_THERMALCONTROLLER;
+
+#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
+
+#define ATOM_PP_THERMALCONTROLLER_NONE 0
+#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_LM64 5
+#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
+#define ATOM_PP_THERMALCONTROLLER_RV770 8
+#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
+#define ATOM_PP_THERMALCONTROLLER_KONG 10
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
+#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
+#define ATOM_PP_THERMALCONTROLLER_LM96163 17
+#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18
+#define ATOM_PP_THERMALCONTROLLER_KAVERI 19
+
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
+
+typedef struct _ATOM_PPLIB_STATE
+{
+ UCHAR ucNonClockStateIndex;
+ UCHAR ucClockStateIndices[1]; // variable-sized
+} ATOM_PPLIB_STATE;
+
+
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+ UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
+ UCHAR ucTHyst; // Temperature hysteresis. Integer.
+ USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+ USHORT usTMed; // The middle temperature where we change slopes.
+ USHORT usTHigh; // The high point above TMed for adjusting the second slope.
+ USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
+ USHORT usPWMMed; // The PWM value (in percent) at TMed.
+ USHORT usPWMHigh; // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_FANTABLE2
+{
+ ATOM_PPLIB_FANTABLE basicTable;
+ USHORT usTMax; // The max temperature
+} ATOM_PPLIB_FANTABLE2;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+ USHORT usSize;
+ ULONG ulMaxEngineClock; // For Overdrive.
+ ULONG ulMaxMemoryClock; // For Overdrive.
+ // Add extra system parameters here, always adjust size to include all fields.
+ USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
+ USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
+ USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table
+ USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table
+ USHORT usACPTableOffset; //points to ATOM_PPLIB_ACP_Table
+ USHORT usPowerTuneTableOffset; //points to ATOM_PPLIB_POWERTUNE_Table
+} ATOM_PPLIB_EXTENDEDHEADER;
+
+//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
+#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
+#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
+#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
+#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
+#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
+#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
+#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
+#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
+#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
+#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
+#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
+#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table.
+#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity.
+#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17.
+#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
+#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION 0x00400000 // Does the driver supports Temp Inversion feature.
+#define ATOM_PP_PLATFORM_CAP_EVV 0x00800000
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+
+ UCHAR ucDataRevision;
+
+ UCHAR ucNumStates;
+ UCHAR ucStateEntrySize;
+ UCHAR ucClockInfoSize;
+ UCHAR ucNonClockSize;
+
+ // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
+ USHORT usStateArrayOffset;
+
+ // offset from start of this table to array of ASIC-specific structures,
+ // currently ATOM_PPLIB_CLOCK_INFO.
+ USHORT usClockInfoArrayOffset;
+
+ // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
+ USHORT usNonClockInfoArrayOffset;
+
+ USHORT usBackbiasTime; // in microseconds
+ USHORT usVoltageTime; // in microseconds
+ USHORT usTableSize; //the size of this structure, or the extended structure
+
+ ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
+
+ ATOM_PPLIB_THERMALCONTROLLER sThermalController;
+
+ USHORT usBootClockInfoOffset;
+ USHORT usBootNonClockInfoOffset;
+
+} ATOM_PPLIB_POWERPLAYTABLE;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+ ATOM_PPLIB_POWERPLAYTABLE basicTable;
+ UCHAR ucNumCustomThermalPolicy;
+ USHORT usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+ ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+ USHORT usFormatID; // To be used ONLY by PPGen.
+ USHORT usFanTableOffset;
+ USHORT usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+ ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+ ULONG ulGoldenPPID; // PPGen use only
+ ULONG ulGoldenRevision; // PPGen use only
+ USHORT usVddcDependencyOnSCLKOffset;
+ USHORT usVddciDependencyOnMCLKOffset;
+ USHORT usVddcDependencyOnMCLKOffset;
+ USHORT usMaxClockVoltageOnDCOffset;
+ USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
+ USHORT usMvddDependencyOnMCLKOffset;
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+ ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+ ULONG ulTDPLimit;
+ ULONG ulNearTDPLimit;
+ ULONG ulSQRampingThreshold;
+ USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
+ ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
+ USHORT usTDPODLimit;
+ USHORT usLoadLineSlope; // in milliOhms * 100
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
+// 2, 4, 6, 7 are reserved
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
+#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
+#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
+#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
+#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
+#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
+#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
+
+//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
+#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
+
+// 0 is 2.5Gb/s, 1 is 5Gb/s
+#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
+#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
+
+// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
+
+// lookup into reduced refresh-rate table
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
+
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
+// 2-15 TBD as needed.
+
+#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
+#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
+
+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
+
+#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
+
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
+
+//M3 Arb //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK 0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT 17
+
+#define ATOM_PPLIB_ENABLE_DRR 0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
+typedef struct _ATOM_PPLIB_NONCLOCK_INFO
+{
+ USHORT usClassification;
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ ULONG ulCapsAndSettings;
+ UCHAR ucRequiredPower;
+ USHORT usClassification2;
+ ULONG ulVCLK;
+ ULONG ulDCLK;
+ UCHAR ucUnused[5];
+} ATOM_PPLIB_NONCLOCK_INFO;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usUnused1;
+ USHORT usUnused2;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_R600_CLOCK_INFO;
+
+// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
+#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
+#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
+#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+
+{
+ USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
+ UCHAR ucLowEngineClockHigh;
+ USHORT usHighEngineClockLow; // High Engine clock in MHz.
+ UCHAR ucHighEngineClockHigh;
+ USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
+ UCHAR ucMemoryClockHigh; // Currentyl unused.
+ UCHAR ucPadding; // For proper alignment and size.
+ USHORT usVDDC; // For the 780, use: None, Low, High, Variable
+ UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
+ UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could
+ USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+ ULONG ulFlags;
+} ATOM_PPLIB_RS780_CLOCK_INFO;
+
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
+
+#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
+#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
+#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
+
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ USHORT usUnused;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ UCHAR ucPCIEGen;
+ UCHAR ucUnused1;
+
+ ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
+
+} ATOM_PPLIB_SI_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ UCHAR ucPCIEGen;
+ USHORT usPCIELane;
+} ATOM_PPLIB_CI_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+ USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
+ UCHAR ucEngineClockHigh; //clockfrequency >> 16.
+ UCHAR vddcIndex; //2-bit vddc index;
+ USHORT tdpLimit;
+ //please initalize to 0
+ USHORT rsv1;
+ //please initialize to 0s
+ ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+ //number of valid dpm levels in this state; Driver uses it to calculate the whole
+ //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+ UCHAR ucNumDPMLevels;
+
+ //a index to the array of nonClockInfos
+ UCHAR nonClockInfoIndex;
+ /**
+ * Driver will read the first ucNumDPMLevels in this array
+ */
+ UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct _StateArray{
+ //how many states we have
+ UCHAR ucNumEntries;
+
+ ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct _ClockInfoArray{
+ //how many clock levels we have
+ UCHAR ucNumEntries;
+
+ //sizeof(ATOM_PPLIB_CLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ UCHAR clockInfo[1];
+}ClockInfoArray;
+
+typedef struct _NonClockInfoArray{
+
+ //how many non-clock levels we have. normally should be same as number of states
+ UCHAR ucNumEntries;
+ //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+ USHORT usClockLow;
+ UCHAR ucClockHigh;
+ USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+ USHORT usSclkLow;
+ UCHAR ucSclkHigh;
+ USHORT usMclkLow;
+ UCHAR ucMclkHigh;
+ USHORT usVddc;
+ USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
+union _ATOM_PPLIB_CAC_Leakage_Record
+{
+ struct
+ {
+ USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. in CI we read it as StdVoltageHiSidd
+ ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value. in CI we read it as StdVoltageLoSidd
+
+ };
+ struct
+ {
+ USHORT usVddc1;
+ USHORT usVddc2;
+ USHORT usVddc3;
+ };
+};
+
+typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_CAC_Leakage_Table;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
+{
+ USHORT usVoltage;
+ USHORT usSclkLow;
+ UCHAR ucSclkHigh;
+ USHORT usMclkLow;
+ UCHAR ucMclkHigh;
+}ATOM_PPLIB_PhaseSheddingLimits_Record;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_PhaseSheddingLimits_Table;
+
+typedef struct _VCEClockInfo{
+ USHORT usEVClkLow;
+ UCHAR ucEVClkHigh;
+ USHORT usECClkLow;
+ UCHAR ucECClkHigh;
+}VCEClockInfo;
+
+typedef struct _VCEClockInfoArray{
+ UCHAR ucNumEntries;
+ VCEClockInfo entries[1];
+}VCEClockInfoArray;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ UCHAR ucVCEClockInfoIndex;
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_VCE_State_Record
+{
+ UCHAR ucVCEClockInfoIndex;
+ UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_VCE_State_Record;
+
+typedef struct _ATOM_PPLIB_VCE_State_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_State_Record entries[1];
+}ATOM_PPLIB_VCE_State_Table;
+
+
+typedef struct _ATOM_PPLIB_VCE_Table
+{
+ UCHAR revid;
+// VCEClockInfoArray array;
+// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
+// ATOM_PPLIB_VCE_State_Table states;
+}ATOM_PPLIB_VCE_Table;
+
+
+typedef struct _UVDClockInfo{
+ USHORT usVClkLow;
+ UCHAR ucVClkHigh;
+ USHORT usDClkLow;
+ UCHAR ucDClkHigh;
+}UVDClockInfo;
+
+typedef struct _UVDClockInfoArray{
+ UCHAR ucNumEntries;
+ UVDClockInfo entries[1];
+}UVDClockInfoArray;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ UCHAR ucUVDClockInfoIndex;
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_UVD_Table
+{
+ UCHAR revid;
+// UVDClockInfoArray array;
+// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
+}ATOM_PPLIB_UVD_Table;
+
+typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ USHORT usSAMClockLow;
+ UCHAR ucSAMClockHigh;
+}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
+ UCHAR numEntries;
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_SAMU_Table
+{
+ UCHAR revid;
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
+}ATOM_PPLIB_SAMU_Table;
+
+typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ USHORT usACPClockLow;
+ UCHAR ucACPClockHigh;
+}ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
+ UCHAR numEntries;
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_ACP_Table
+{
+ UCHAR revid;
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Table limits;
+}ATOM_PPLIB_ACP_Table;
+
+typedef struct _ATOM_PowerTune_Table{
+ USHORT usTDP;
+ USHORT usConfigurableTDP;
+ USHORT usTDC;
+ USHORT usBatteryPowerLimit;
+ USHORT usSmallPowerLimit;
+ USHORT usLowCACLeakage;
+ USHORT usHighCACLeakage;
+}ATOM_PowerTune_Table;
+
+typedef struct _ATOM_PPLIB_POWERTUNE_Table
+{
+ UCHAR revid;
+ ATOM_PowerTune_Table power_tune_table;
+}ATOM_PPLIB_POWERTUNE_Table;
+
+typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1
+{
+ UCHAR revid;
+ ATOM_PowerTune_Table power_tune_table;
+ USHORT usMaximumPowerDeliveryLimit;
+ USHORT usReserve[7];
+} ATOM_PPLIB_POWERTUNE_Table_V1;
+
+#define ATOM_PPM_A_A 1
+#define ATOM_PPM_A_I 2
+typedef struct _ATOM_PPLIB_PPM_Table
+{
+ UCHAR ucRevId;
+ UCHAR ucPpmDesign; //A+I or A+A
+ USHORT usCpuCoreNumber;
+ ULONG ulPlatformTDP;
+ ULONG ulSmallACPlatformTDP;
+ ULONG ulPlatformTDC;
+ ULONG ulSmallACPlatformTDC;
+ ULONG ulApuTDP;
+ ULONG ulDGpuTDP;
+ ULONG ulDGpuUlvPower;
+ ULONG ulTjmax;
+} ATOM_PPLIB_PPM_Table;
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5625cf706f0c..9fc61dd68bc0 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1102,7 +1102,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
r100_cp_load_microcode(rdev);
r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
- 0, 0x7fffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cfc1d28ade39..ea4d3734e6d9 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1374,7 +1374,7 @@ static bool r600_is_display_hung(struct radeon_device *rdev)
return true;
}
-static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -1622,28 +1622,6 @@ bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * r600_dma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = r600_gpu_check_soft_reset(rdev);
-
- if (!(reset_mask & RADEON_RESET_DMA)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
@@ -2299,9 +2277,13 @@ int r600_init_microcode(struct radeon_device *rdev)
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->smc_fw->size != smc_req_size) {
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"smc: Bogus length %zu in firmware \"%s\"\n",
rdev->smc_fw->size, fw_name);
@@ -2490,327 +2472,6 @@ void r600_cp_fini(struct radeon_device *rdev)
}
/*
- * DMA
- * Starting with R600, the GPU has an asynchronous
- * DMA engine. The programming model is very similar
- * to the 3D engine (ring buffer, IBs, etc.), but the
- * DMA controller has it's own packet format that is
- * different form the PM4 format used by the 3D engine.
- * It supports copying data, writing embedded data,
- * solid fills, and a number of other things. It also
- * has support for tiling/detiling of buffers.
- */
-/**
- * r600_dma_stop - stop the async dma engine
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engine (r6xx-evergreen).
- */
-void r600_dma_stop(struct radeon_device *rdev)
-{
- u32 rb_cntl = RREG32(DMA_RB_CNTL);
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, rb_cntl);
-
- rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
-}
-
-/**
- * r600_dma_resume - setup and start the async dma engine
- *
- * @rdev: radeon_device pointer
- *
- * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
- * Returns 0 for success, error for failure.
- */
-int r600_dma_resume(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- u32 rb_cntl, dma_cntl, ib_cntl;
- u32 rb_bufsz;
- int r;
-
- /* Reset dma */
- if (rdev->family >= CHIP_RV770)
- WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
- else
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
-
- WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
- WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
-
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = rb_bufsz << 1;
-#ifdef __BIG_ENDIAN
- rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
-#endif
- WREG32(DMA_RB_CNTL, rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(DMA_RB_RPTR, 0);
- WREG32(DMA_RB_WPTR, 0);
-
- /* set the wb address whether it's enabled or not */
- WREG32(DMA_RB_RPTR_ADDR_HI,
- upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
- WREG32(DMA_RB_RPTR_ADDR_LO,
- ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
-
- if (rdev->wb.enabled)
- rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
-
- WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
-
- /* enable DMA IBs */
- ib_cntl = DMA_IB_ENABLE;
-#ifdef __BIG_ENDIAN
- ib_cntl |= DMA_IB_SWAP_ENABLE;
-#endif
- WREG32(DMA_IB_CNTL, ib_cntl);
-
- dma_cntl = RREG32(DMA_CNTL);
- dma_cntl &= ~CTXEMPTY_INT_ENABLE;
- WREG32(DMA_CNTL, dma_cntl);
-
- if (rdev->family >= CHIP_RV770)
- WREG32(DMA_MODE, 1);
-
- ring->wptr = 0;
- WREG32(DMA_RB_WPTR, ring->wptr << 2);
-
- ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
-
- WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
-
- ring->ready = true;
-
- r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
- if (r) {
- ring->ready = false;
- return r;
- }
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
- return 0;
-}
-
-/**
- * r600_dma_fini - tear down the async dma engine
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engine and free the ring (r6xx-evergreen).
- */
-void r600_dma_fini(struct radeon_device *rdev)
-{
- r600_dma_stop(rdev);
- radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
-}
-
-/*
- * UVD
- */
-int r600_uvd_rbc_start(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- uint64_t rptr_addr;
- uint32_t rb_bufsz, tmp;
- int r;
-
- rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
-
- if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
- DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
- return -EINVAL;
- }
-
- /* force RBC into idle state */
- WREG32(UVD_RBC_RB_CNTL, 0x11010101);
-
- /* Set the write pointer delay */
- WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
-
- /* set the wb address */
- WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
-
- /* programm the 4GB memory segment for rptr and ring buffer */
- WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
- (0x7 << 16) | (0x1 << 31));
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(UVD_RBC_RB_RPTR, 0x0);
-
- ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
- WREG32(UVD_RBC_RB_WPTR, ring->wptr);
-
- /* set the ring address */
- WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
-
- /* Set ring buffer size */
- rb_bufsz = order_base_2(ring->ring_size);
- rb_bufsz = (0x1 << 8) | rb_bufsz;
- WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
-
- ring->ready = true;
- r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
- if (r) {
- ring->ready = false;
- return r;
- }
-
- r = radeon_ring_lock(rdev, ring, 10);
- if (r) {
- DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
- return r;
- }
-
- tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
- radeon_ring_write(ring, tmp);
- radeon_ring_write(ring, 0xFFFFF);
-
- tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
- radeon_ring_write(ring, tmp);
- radeon_ring_write(ring, 0xFFFFF);
-
- tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
- radeon_ring_write(ring, tmp);
- radeon_ring_write(ring, 0xFFFFF);
-
- /* Clear timeout status bits */
- radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
- radeon_ring_write(ring, 0x8);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
- radeon_ring_write(ring, 3);
-
- radeon_ring_unlock_commit(rdev, ring);
-
- return 0;
-}
-
-void r600_uvd_rbc_stop(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-
- /* force RBC into idle state */
- WREG32(UVD_RBC_RB_CNTL, 0x11010101);
- ring->ready = false;
-}
-
-int r600_uvd_init(struct radeon_device *rdev)
-{
- int i, j, r;
- /* disable byte swapping */
- u32 lmi_swap_cntl = 0;
- u32 mp_swap_cntl = 0;
-
- /* raise clocks while booting up the VCPU */
- radeon_set_uvd_clocks(rdev, 53300, 40000);
-
- /* disable clock gating */
- WREG32(UVD_CGC_GATE, 0);
-
- /* disable interupt */
- WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
-
- /* put LMI, VCPU, RBC etc... into reset */
- WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
- LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
- CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
- mdelay(5);
-
- /* take UVD block out of reset */
- WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
- mdelay(5);
-
- /* initialize UVD memory controller */
- WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
- (1 << 21) | (1 << 9) | (1 << 20));
-
-#ifdef __BIG_ENDIAN
- /* swap (8 in 32) RB and IB */
- lmi_swap_cntl = 0xa;
- mp_swap_cntl = 0;
-#endif
- WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
- WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
-
- WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
- WREG32(UVD_MPC_SET_MUXA1, 0x0);
- WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
- WREG32(UVD_MPC_SET_MUXB1, 0x0);
- WREG32(UVD_MPC_SET_ALU, 0);
- WREG32(UVD_MPC_SET_MUX, 0x88);
-
- /* Stall UMC */
- WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
- WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
-
- /* take all subblocks out of reset, except VCPU */
- WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
- mdelay(5);
-
- /* enable VCPU clock */
- WREG32(UVD_VCPU_CNTL, 1 << 9);
-
- /* enable UMC */
- WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
-
- /* boot up the VCPU */
- WREG32(UVD_SOFT_RESET, 0);
- mdelay(10);
-
- WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
-
- for (i = 0; i < 10; ++i) {
- uint32_t status;
- for (j = 0; j < 100; ++j) {
- status = RREG32(UVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
-
- DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
- WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
- mdelay(10);
- WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
- mdelay(10);
- r = -1;
- }
-
- if (r) {
- DRM_ERROR("UVD not responding, giving up!!!\n");
- radeon_set_uvd_clocks(rdev, 0, 0);
- return r;
- }
-
- /* enable interupt */
- WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
-
- r = r600_uvd_rbc_start(rdev);
- if (!r)
- DRM_INFO("UVD initialized successfully.\n");
-
- /* lower clocks again */
- radeon_set_uvd_clocks(rdev, 0, 0);
-
- return r;
-}
-
-/*
* GPU scratch registers helpers function.
*/
void r600_scratch_init(struct radeon_device *rdev)
@@ -2865,94 +2526,6 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
-/**
- * r600_dma_ring_test - simple async dma engine test
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test the DMA engine by writing using it to write an
- * value to memory. (r6xx-SI).
- * Returns 0 for success, error for failure.
- */
-int r600_dma_ring_test(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ring_lock(rdev, ring, 4);
- if (r) {
- DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
- return r;
- }
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
- radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
- radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
-
- if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- return r;
-}
-
-int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- uint32_t tmp = 0;
- unsigned i;
- int r;
-
- WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
- r = radeon_ring_lock(rdev, ring, 3);
- if (r) {
- DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
- return r;
- }
- radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
- radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(UVD_CONTEXT_ID);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
-
- if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- return r;
-}
-
/*
* CP fences/semaphores
*/
@@ -3004,30 +2577,6 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
-void r600_uvd_fence_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
-
- radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
- radeon_ring_write(ring, fence->seq);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
- radeon_ring_write(ring, addr & 0xffffffff);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
- radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
- radeon_ring_write(ring, 2);
- return;
-}
-
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
@@ -3044,95 +2593,6 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
}
-/*
- * DMA fences/semaphores
- */
-
-/**
- * r600_dma_fence_ring_emit - emit a fence on the DMA ring
- *
- * @rdev: radeon_device pointer
- * @fence: radeon fence object
- *
- * Add a DMA fence packet to the ring to write
- * the fence seq number and DMA trap packet to generate
- * an interrupt if needed (r6xx-r7xx).
- */
-void r600_dma_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
-
- /* write the fence */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
- radeon_ring_write(ring, addr & 0xfffffffc);
- radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
- radeon_ring_write(ring, lower_32_bits(fence->seq));
- /* generate an interrupt */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
-}
-
-/**
- * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- * @semaphore: radeon semaphore object
- * @emit_wait: wait or signal semaphore
- *
- * Add a DMA semaphore packet to the ring wait on or signal
- * other rings (r6xx-SI).
- */
-void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- u64 addr = semaphore->gpu_addr;
- u32 s = emit_wait ? 0 : 1;
-
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
- radeon_ring_write(ring, addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
-}
-
-void r600_uvd_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- uint64_t addr = semaphore->gpu_addr;
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
- radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
- radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
- radeon_ring_write(ring, emit_wait ? 1 : 0);
-}
-
-int r600_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- struct radeon_sa_bo *vb = NULL;
- int r;
-
- r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
- if (r) {
- return r;
- }
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
- r600_blit_done_copy(rdev, fence, vb, sem);
- return 0;
-}
-
/**
* r600_copy_cpdma - copy pages using the CP DMA engine
*
@@ -3217,80 +2677,6 @@ int r600_copy_cpdma(struct radeon_device *rdev,
return r;
}
-/**
- * r600_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (r6xx).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int r600_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_dw, cur_size_in_dw;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
- num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
- r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_dw = size_in_dw;
- if (cur_size_in_dw > 0xFFFE)
- cur_size_in_dw = 0xFFFE;
- size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
- radeon_ring_write(ring, src_offset & 0xfffffffc);
- radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
- (upper_32_bits(src_offset) & 0xff)));
- src_offset += cur_size_in_dw * 4;
- dst_offset += cur_size_in_dw * 4;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -3312,6 +2698,13 @@ static int r600_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
r600_pcie_gen2_enable(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ r600_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -3320,11 +2713,6 @@ static int r600_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
- r600_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
} else {
@@ -3333,12 +2721,6 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
r600_gpu_init(rdev);
- r = r600_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -3375,14 +2757,14 @@ static int r600_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -3551,7 +2933,6 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- r600_blit_fini(rdev);
r600_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
@@ -3603,16 +2984,6 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, ib->length_dw);
}
-void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
-
- radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
- radeon_ring_write(ring, ib->gpu_addr);
- radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
- radeon_ring_write(ring, ib->length_dw);
-}
-
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_ib ib;
@@ -3666,139 +3037,6 @@ free_scratch:
return r;
}
-/**
- * r600_dma_ib_test - test an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test a simple IB in the DMA ring (r6xx-SI).
- * Returns 0 on success, error on failure.
- */
-int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- struct radeon_ib ib;
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp = 0;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
- if (r) {
- DRM_ERROR("radeon: failed to get ib (%d).\n", r);
- return r;
- }
-
- ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
- ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
- ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
- ib.ptr[3] = 0xDEADBEEF;
- ib.length_dw = 4;
-
- r = radeon_ib_schedule(rdev, &ib, NULL);
- if (r) {
- radeon_ib_free(rdev, &ib);
- DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
- return r;
- }
- r = radeon_fence_wait(ib.fence, false);
- if (r) {
- DRM_ERROR("radeon: fence wait failed (%d).\n", r);
- return r;
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < rdev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
- } else {
- DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
- r = -EINVAL;
- }
- radeon_ib_free(rdev, &ib);
- return r;
-}
-
-int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- struct radeon_fence *fence = NULL;
- int r;
-
- r = radeon_set_uvd_clocks(rdev, 53300, 40000);
- if (r) {
- DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
- return r;
- }
-
- r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
- if (r) {
- DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
- goto error;
- }
-
- r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
- if (r) {
- DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
- goto error;
- }
-
- r = radeon_fence_wait(fence, false);
- if (r) {
- DRM_ERROR("radeon: fence wait failed (%d).\n", r);
- goto error;
- }
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
-error:
- radeon_fence_unref(&fence);
- radeon_set_uvd_clocks(rdev, 0, 0);
- return r;
-}
-
-/**
- * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- *
- * Schedule an IB in the DMA ring (r6xx-r7xx).
- */
-void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
-
- if (rdev->wb.enabled) {
- u32 next_rptr = ring->wptr + 4;
- while ((next_rptr & 7) != 5)
- next_rptr++;
- next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
- radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
- radeon_ring_write(ring, next_rptr);
- }
-
- /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
- * Pad as necessary with NOPs.
- */
- while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
- radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
- radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
-
-}
-
/*
* Interrupts
*
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index c92eb86a8e55..47fc2b886979 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,12 +57,12 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev);
+ return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
}
-struct r600_audio r600_audio_status(struct radeon_device *rdev)
+struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
{
- struct r600_audio status;
+ struct r600_audio_pin status;
uint32_t value;
value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
@@ -120,16 +120,16 @@ void r600_audio_update_hdmi(struct work_struct *work)
struct radeon_device *rdev = container_of(work, struct radeon_device,
audio_work);
struct drm_device *dev = rdev->ddev;
- struct r600_audio audio_status = r600_audio_status(rdev);
+ struct r600_audio_pin audio_status = r600_audio_status(rdev);
struct drm_encoder *encoder;
bool changed = false;
- if (rdev->audio_status.channels != audio_status.channels ||
- rdev->audio_status.rate != audio_status.rate ||
- rdev->audio_status.bits_per_sample != audio_status.bits_per_sample ||
- rdev->audio_status.status_bits != audio_status.status_bits ||
- rdev->audio_status.category_code != audio_status.category_code) {
- rdev->audio_status = audio_status;
+ if (rdev->audio.pin[0].channels != audio_status.channels ||
+ rdev->audio.pin[0].rate != audio_status.rate ||
+ rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample ||
+ rdev->audio.pin[0].status_bits != audio_status.status_bits ||
+ rdev->audio.pin[0].category_code != audio_status.category_code) {
+ rdev->audio.pin[0] = audio_status;
changed = true;
}
@@ -141,13 +141,13 @@ void r600_audio_update_hdmi(struct work_struct *work)
}
}
-/*
- * turn on/off audio engine
- */
-static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
+/* enable the audio stream */
+static void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
u32 value = 0;
- DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
+
if (ASIC_IS_DCE4(rdev)) {
if (enable) {
value |= 0x81000000; /* Required to enable audio */
@@ -158,7 +158,7 @@ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
WREG32_P(R600_AUDIO_ENABLE,
enable ? 0x81000000 : 0x0, ~0x81000000);
}
- rdev->audio_enabled = enable;
+ DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
}
/*
@@ -169,13 +169,17 @@ int r600_audio_init(struct radeon_device *rdev)
if (!radeon_audio || !r600_audio_chipset_supported(rdev))
return 0;
- r600_audio_engine_enable(rdev, true);
+ rdev->audio.enabled = true;
+
+ rdev->audio.num_pins = 1;
+ rdev->audio.pin[0].channels = -1;
+ rdev->audio.pin[0].rate = -1;
+ rdev->audio.pin[0].bits_per_sample = -1;
+ rdev->audio.pin[0].status_bits = 0;
+ rdev->audio.pin[0].category_code = 0;
+ rdev->audio.pin[0].id = 0;
- rdev->audio_status.channels = -1;
- rdev->audio_status.rate = -1;
- rdev->audio_status.bits_per_sample = -1;
- rdev->audio_status.status_bits = 0;
- rdev->audio_status.category_code = 0;
+ r600_audio_enable(rdev, &rdev->audio.pin[0], true);
return 0;
}
@@ -186,8 +190,16 @@ int r600_audio_init(struct radeon_device *rdev)
*/
void r600_audio_fini(struct radeon_device *rdev)
{
- if (!rdev->audio_enabled)
+ if (!rdev->audio.enabled)
return;
- r600_audio_engine_enable(rdev, false);
+ r600_audio_enable(rdev, &rdev->audio.pin[0], false);
+
+ rdev->audio.enabled = false;
+}
+
+struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
+{
+ /* only one pin on 6xx-NI */
+ return &rdev->audio.pin[0];
}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index f651881eb0ae..daf7572be976 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -31,6 +31,37 @@
#include "r600_blit_shaders.h"
+/* 23 bits of float fractional data */
+#define I2F_FRAC_BITS 23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Will be exact from 0 to 2^24. Above that, we round towards zero
+ * as the fractional bits will not fit in a float. (It would be better to
+ * round towards even as the fpu does, but that is slower.)
+ */
+static __pure uint32_t int2float(uint32_t x)
+{
+ uint32_t msb, exponent, fraction;
+
+ /* Zero is special */
+ if (!x) return 0;
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ /*
+ * Use a rotate instead of a shift because that works both leftwards
+ * and rightwards due to the mod(32) behaviour. This means we don't
+ * need to check to see if we are above 2^24 or not.
+ */
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return fraction + exponent;
+}
+
#define DI_PT_RECTLIST 0x11
#define DI_INDEX_SIZE_16_BIT 0x0
#define DI_SRC_SEL_AUTO_INDEX 0x2
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
deleted file mode 100644
index 9fb5780a552f..000000000000
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ /dev/null
@@ -1,785 +0,0 @@
-/*
- * Copyright 2009 Advanced Micro Devices, Inc.
- * Copyright 2009 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon.h"
-
-#include "r600d.h"
-#include "r600_blit_shaders.h"
-#include "radeon_blit_common.h"
-
-/* 23 bits of float fractional data */
-#define I2F_FRAC_BITS 23
-#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
-
-/*
- * Converts unsigned integer into 32-bit IEEE floating point representation.
- * Will be exact from 0 to 2^24. Above that, we round towards zero
- * as the fractional bits will not fit in a float. (It would be better to
- * round towards even as the fpu does, but that is slower.)
- */
-__pure uint32_t int2float(uint32_t x)
-{
- uint32_t msb, exponent, fraction;
-
- /* Zero is special */
- if (!x) return 0;
-
- /* Get location of the most significant bit */
- msb = __fls(x);
-
- /*
- * Use a rotate instead of a shift because that works both leftwards
- * and rightwards due to the mod(32) behaviour. This means we don't
- * need to check to see if we are above 2^24 or not.
- */
- fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
- exponent = (127 + msb) << I2F_FRAC_BITS;
-
- return fraction + exponent;
-}
-
-/* emits 21 on rv770+, 23 on r600 */
-static void
-set_render_target(struct radeon_device *rdev, int format,
- int w, int h, u64 gpu_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 cb_color_info;
- int pitch, slice;
-
- h = ALIGN(h, 8);
- if (h < 8)
- h = 8;
-
- cb_color_info = CB_FORMAT(format) |
- CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
- CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- pitch = (w / 8) - 1;
- slice = ((w * h) / 64) - 1;
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
-
- if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
- radeon_ring_write(ring, 2 << 0);
- }
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (pitch << 0) | (slice << 10));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, cb_color_info);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-}
-
-/* emits 5dw */
-static void
-cp_set_surface_sync(struct radeon_device *rdev,
- u32 sync_type, u32 size,
- u64 mc_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 cp_coher_size;
-
- if (size == 0xffffffff)
- cp_coher_size = 0xffffffff;
- else
- cp_coher_size = ((size + 255) >> 8);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, sync_type);
- radeon_ring_write(ring, cp_coher_size);
- radeon_ring_write(ring, mc_addr >> 8);
- radeon_ring_write(ring, 10); /* poll interval */
-}
-
-/* emits 21dw + 1 surface sync = 26dw */
-static void
-set_shaders(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u64 gpu_addr;
- u32 sq_pgm_resources;
-
- /* setup shader regs */
- sq_pgm_resources = (1 << 0);
-
- /* VS */
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, sq_pgm_resources);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- /* PS */
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 2);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
-}
-
-/* emits 9 + 1 sync (5) = 14*/
-static void
-set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_vtx_constant_word2;
-
- sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
- SQ_VTXC_STRIDE(16);
-#ifdef __BIG_ENDIAN
- sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
-#endif
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(ring, 0x460);
- radeon_ring_write(ring, gpu_addr & 0xffffffff);
- radeon_ring_write(ring, 48 - 1);
- radeon_ring_write(ring, sq_vtx_constant_word2);
- radeon_ring_write(ring, 1 << 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
-
- if ((rdev->family == CHIP_RV610) ||
- (rdev->family == CHIP_RV620) ||
- (rdev->family == CHIP_RS780) ||
- (rdev->family == CHIP_RS880) ||
- (rdev->family == CHIP_RV710))
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, 48, gpu_addr);
- else
- cp_set_surface_sync(rdev,
- PACKET3_VC_ACTION_ENA, 48, gpu_addr);
-}
-
-/* emits 9 */
-static void
-set_tex_resource(struct radeon_device *rdev,
- int format, int w, int h, int pitch,
- u64 gpu_addr, u32 size)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
-
- if (h < 1)
- h = 1;
-
- sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) |
- S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
- sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) |
- S_038000_TEX_WIDTH(w - 1);
-
- sq_tex_resource_word1 = S_038004_DATA_FORMAT(format);
- sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1);
-
- sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) |
- S_038010_DST_SEL_X(SQ_SEL_X) |
- S_038010_DST_SEL_Y(SQ_SEL_Y) |
- S_038010_DST_SEL_Z(SQ_SEL_Z) |
- S_038010_DST_SEL_W(SQ_SEL_W);
-
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, size, gpu_addr);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_tex_resource_word0);
- radeon_ring_write(ring, sq_tex_resource_word1);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, sq_tex_resource_word4);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
-}
-
-/* emits 12 */
-static void
-set_scissors(struct radeon_device *rdev, int x1, int y1,
- int x2, int y2)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-}
-
-/* emits 10 */
-static void
-draw_auto(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(ring, DI_PT_RECTLIST);
-
- radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(ring,
-#ifdef __BIG_ENDIAN
- (2 << 2) |
-#endif
- DI_INDEX_SIZE_16_BIT);
-
- radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(ring, 1);
-
- radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(ring, 3);
- radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
-
-}
-
-/* emits 14 */
-static void
-set_default_state(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
- u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
- int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
- int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
- int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
- u64 gpu_addr;
- int dwords;
-
- switch (rdev->family) {
- case CHIP_R600:
- num_ps_gprs = 192;
- num_vs_gprs = 56;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 136;
- num_vs_threads = 48;
- num_gs_threads = 4;
- num_es_threads = 4;
- num_ps_stack_entries = 128;
- num_vs_stack_entries = 128;
- num_gs_stack_entries = 0;
- num_es_stack_entries = 0;
- break;
- case CHIP_RV630:
- case CHIP_RV635:
- num_ps_gprs = 84;
- num_vs_gprs = 36;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 144;
- num_vs_threads = 40;
- num_gs_threads = 4;
- num_es_threads = 4;
- num_ps_stack_entries = 40;
- num_vs_stack_entries = 40;
- num_gs_stack_entries = 32;
- num_es_stack_entries = 16;
- break;
- case CHIP_RV610:
- case CHIP_RV620:
- case CHIP_RS780:
- case CHIP_RS880:
- default:
- num_ps_gprs = 84;
- num_vs_gprs = 36;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 136;
- num_vs_threads = 48;
- num_gs_threads = 4;
- num_es_threads = 4;
- num_ps_stack_entries = 40;
- num_vs_stack_entries = 40;
- num_gs_stack_entries = 32;
- num_es_stack_entries = 16;
- break;
- case CHIP_RV670:
- num_ps_gprs = 144;
- num_vs_gprs = 40;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 136;
- num_vs_threads = 48;
- num_gs_threads = 4;
- num_es_threads = 4;
- num_ps_stack_entries = 40;
- num_vs_stack_entries = 40;
- num_gs_stack_entries = 32;
- num_es_stack_entries = 16;
- break;
- case CHIP_RV770:
- num_ps_gprs = 192;
- num_vs_gprs = 56;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 188;
- num_vs_threads = 60;
- num_gs_threads = 0;
- num_es_threads = 0;
- num_ps_stack_entries = 256;
- num_vs_stack_entries = 256;
- num_gs_stack_entries = 0;
- num_es_stack_entries = 0;
- break;
- case CHIP_RV730:
- case CHIP_RV740:
- num_ps_gprs = 84;
- num_vs_gprs = 36;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 188;
- num_vs_threads = 60;
- num_gs_threads = 0;
- num_es_threads = 0;
- num_ps_stack_entries = 128;
- num_vs_stack_entries = 128;
- num_gs_stack_entries = 0;
- num_es_stack_entries = 0;
- break;
- case CHIP_RV710:
- num_ps_gprs = 192;
- num_vs_gprs = 56;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 144;
- num_vs_threads = 48;
- num_gs_threads = 0;
- num_es_threads = 0;
- num_ps_stack_entries = 128;
- num_vs_stack_entries = 128;
- num_gs_stack_entries = 0;
- num_es_stack_entries = 0;
- break;
- }
-
- if ((rdev->family == CHIP_RV610) ||
- (rdev->family == CHIP_RV620) ||
- (rdev->family == CHIP_RS780) ||
- (rdev->family == CHIP_RS880) ||
- (rdev->family == CHIP_RV710))
- sq_config = 0;
- else
- sq_config = VC_ENABLE;
-
- sq_config |= (DX9_CONSTS |
- ALU_INST_PREFER_VECTOR |
- PS_PRIO(0) |
- VS_PRIO(1) |
- GS_PRIO(2) |
- ES_PRIO(3));
-
- sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
- NUM_VS_GPRS(num_vs_gprs) |
- NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
- sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
- NUM_ES_GPRS(num_es_gprs));
- sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
- NUM_VS_THREADS(num_vs_threads) |
- NUM_GS_THREADS(num_gs_threads) |
- NUM_ES_THREADS(num_es_threads));
- sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
- NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
- sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
- NUM_ES_STACK_ENTRIES(num_es_stack_entries));
-
- /* emit an IB pointing at default state */
- dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(ring,
-#ifdef __BIG_ENDIAN
- (2 << 0) |
-#endif
- (gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(ring, dwords);
-
- /* SQ config */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
- radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(ring, sq_config);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
- radeon_ring_write(ring, sq_thread_resource_mgmt);
- radeon_ring_write(ring, sq_stack_resource_mgmt_1);
- radeon_ring_write(ring, sq_stack_resource_mgmt_2);
-}
-
-int r600_blit_init(struct radeon_device *rdev)
-{
- u32 obj_size;
- int i, r, dwords;
- void *ptr;
- u32 packet2s[16];
- int num_packet2s = 0;
-
- rdev->r600_blit.primitives.set_render_target = set_render_target;
- rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
- rdev->r600_blit.primitives.set_shaders = set_shaders;
- rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
- rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
- rdev->r600_blit.primitives.set_scissors = set_scissors;
- rdev->r600_blit.primitives.draw_auto = draw_auto;
- rdev->r600_blit.primitives.set_default_state = set_default_state;
-
- rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
- rdev->r600_blit.ring_size_common += 40; /* shaders + def state */
- rdev->r600_blit.ring_size_common += 5; /* done copy */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
-
- rdev->r600_blit.ring_size_per_loop = 76;
- /* set_render_target emits 2 extra dwords on rv6xx */
- if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
- rdev->r600_blit.ring_size_per_loop += 2;
-
- rdev->r600_blit.max_dim = 8192;
-
- rdev->r600_blit.state_offset = 0;
-
- if (rdev->family >= CHIP_RV770)
- rdev->r600_blit.state_len = r7xx_default_size;
- else
- rdev->r600_blit.state_len = r6xx_default_size;
-
- dwords = rdev->r600_blit.state_len;
- while (dwords & 0xf) {
- packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
- dwords++;
- }
-
- obj_size = dwords * 4;
- obj_size = ALIGN(obj_size, 256);
-
- rdev->r600_blit.vs_offset = obj_size;
- obj_size += r6xx_vs_size * 4;
- obj_size = ALIGN(obj_size, 256);
-
- rdev->r600_blit.ps_offset = obj_size;
- obj_size += r6xx_ps_size * 4;
- obj_size = ALIGN(obj_size, 256);
-
- /* pin copy shader into vram if not already initialized */
- if (rdev->r600_blit.shader_obj == NULL) {
- r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->r600_blit.shader_obj);
- if (r) {
- DRM_ERROR("r600 failed to allocate shader\n");
- return r;
- }
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
- }
- }
-
- DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
- obj_size,
- rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
- if (r) {
- DRM_ERROR("failed to map blit object %d\n", r);
- return r;
- }
- if (rdev->family >= CHIP_RV770)
- memcpy_toio(ptr + rdev->r600_blit.state_offset,
- r7xx_default_state, rdev->r600_blit.state_len * 4);
- else
- memcpy_toio(ptr + rdev->r600_blit.state_offset,
- r6xx_default_state, rdev->r600_blit.state_len * 4);
- if (num_packet2s)
- memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
- packet2s, num_packet2s * 4);
- for (i = 0; i < r6xx_vs_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
- for (i = 0; i < r6xx_ps_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
- radeon_bo_kunmap(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
- return 0;
-}
-
-void r600_blit_fini(struct radeon_device *rdev)
-{
- int r;
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
- if (rdev->r600_blit.shader_obj == NULL)
- return;
- /* If we can't reserve the bo, unref should be enough to destroy
- * it when it becomes idle.
- */
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (!r) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- }
- radeon_bo_unref(&rdev->r600_blit.shader_obj);
-}
-
-static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
- int *width, int *height, int max_dim)
-{
- unsigned max_pages;
- unsigned pages = num_gpu_pages;
- int w, h;
-
- if (num_gpu_pages == 0) {
- /* not supposed to be called with no pages, but just in case */
- h = 0;
- w = 0;
- pages = 0;
- WARN_ON(1);
- } else {
- int rect_order = 2;
- h = RECT_UNIT_H;
- while (num_gpu_pages / rect_order) {
- h *= 2;
- rect_order *= 4;
- if (h >= max_dim) {
- h = max_dim;
- break;
- }
- }
- max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H);
- if (pages > max_pages)
- pages = max_pages;
- w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h;
- w = (w / RECT_UNIT_W) * RECT_UNIT_W;
- pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H);
- BUG_ON(pages == 0);
- }
-
-
- DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages);
-
- /* return width and height only of the caller wants it */
- if (height)
- *height = h;
- if (width)
- *width = w;
-
- return pages;
-}
-
-
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
- struct radeon_fence **fence, struct radeon_sa_bo **vb,
- struct radeon_semaphore **sem)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- int r;
- int ring_size;
- int num_loops = 0;
- int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
-
- /* num loops */
- while (num_gpu_pages) {
- num_gpu_pages -=
- r600_blit_create_rect(num_gpu_pages, NULL, NULL,
- rdev->r600_blit.max_dim);
- num_loops++;
- }
-
- /* 48 bytes for vertex per loop */
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
- (num_loops*48)+256, 256, true);
- if (r) {
- return r;
- }
-
- r = radeon_semaphore_create(rdev, sem);
- if (r) {
- radeon_sa_bo_free(rdev, vb, NULL);
- return r;
- }
-
- /* calculate number of loops correctly */
- ring_size = num_loops * dwords_per_loop;
- ring_size += rdev->r600_blit.ring_size_common;
- r = radeon_ring_lock(rdev, ring, ring_size);
- if (r) {
- radeon_sa_bo_free(rdev, vb, NULL);
- radeon_semaphore_free(rdev, sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
- radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
- RADEON_RING_TYPE_GFX_INDEX);
- radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
- } else {
- radeon_semaphore_free(rdev, sem, NULL);
- }
-
- rdev->r600_blit.primitives.set_default_state(rdev);
- rdev->r600_blit.primitives.set_shaders(rdev);
- return 0;
-}
-
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
- struct radeon_sa_bo *vb, struct radeon_semaphore *sem)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- int r;
-
- r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_sa_bo_free(rdev, &vb, *fence);
- radeon_semaphore_free(rdev, &sem, *fence);
-}
-
-void r600_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages,
- struct radeon_sa_bo *vb)
-{
- u64 vb_gpu_addr;
- u32 *vb_cpu_addr;
-
- DRM_DEBUG("emitting copy %16llx %16llx %d\n",
- src_gpu_addr, dst_gpu_addr, num_gpu_pages);
- vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
- vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
-
- while (num_gpu_pages) {
- int w, h;
- unsigned size_in_bytes;
- unsigned pages_per_loop =
- r600_blit_create_rect(num_gpu_pages, &w, &h,
- rdev->r600_blit.max_dim);
-
- size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
- DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
-
- vb_cpu_addr[0] = 0;
- vb_cpu_addr[1] = 0;
- vb_cpu_addr[2] = 0;
- vb_cpu_addr[3] = 0;
-
- vb_cpu_addr[4] = 0;
- vb_cpu_addr[5] = int2float(h);
- vb_cpu_addr[6] = 0;
- vb_cpu_addr[7] = int2float(h);
-
- vb_cpu_addr[8] = int2float(w);
- vb_cpu_addr[9] = int2float(h);
- vb_cpu_addr[10] = int2float(w);
- vb_cpu_addr[11] = int2float(h);
-
- rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
- w, h, w, src_gpu_addr, size_in_bytes);
- rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
- w, h, dst_gpu_addr);
- rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
- rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
- rdev->r600_blit.primitives.draw_auto(rdev);
- rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
- PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
- size_in_bytes, dst_gpu_addr);
-
- vb_cpu_addr += 12;
- vb_gpu_addr += 4*12;
- src_gpu_addr += size_in_bytes;
- dst_gpu_addr += size_in_bytes;
- num_gpu_pages -= pages_per_loop;
- }
-}
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index 2f3ce7a75976..f437d36dd98c 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -35,5 +35,4 @@ extern const u32 r6xx_default_state[];
extern const u32 r6xx_ps_size, r6xx_vs_size;
extern const u32 r6xx_default_size, r7xx_default_size;
-__pure uint32_t int2float(uint32_t x);
#endif
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
new file mode 100644
index 000000000000..3b317456512a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+
+u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ */
+
+/**
+ * r600_dma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (r6xx+).
+ */
+uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2;
+}
+
+/**
+ * r600_dma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (r6xx+).
+ */
+uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2;
+}
+
+/**
+ * r600_dma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (r6xx+).
+ */
+void r600_dma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc);
+}
+
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset dma */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+ else
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR, 0);
+ WREG32(DMA_RB_WPTR, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI,
+ upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO,
+ ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL, ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_RV770)
+ WREG32(DMA_MODE, 1);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+ WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+ r600_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
+
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & RADEON_RESET_DMA)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ring_lock(rdev, ring, 4);
+ if (r) {
+ DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, lower_32_bits(fence->seq));
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ u64 addr = semaphore->gpu_addr;
+ u32 s = emit_wait ? 0 : 1;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp = 0;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+ ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+ ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+ ib.ptr[3] = 0xDEADBEEF;
+ ib.length_dw = 4;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r6xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
+ r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFE)
+ cur_size_in_dw = 0xFFFE;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
+ (upper_32_bits(src_offset) & 0xff)));
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index e5c860f4ccbe..fa0de46fcc0d 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -174,6 +174,24 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
return vblank_time_us;
}
+u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 vrefresh = 0;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+ vrefresh = radeon_crtc->hw_mode.vrefresh;
+ break;
+ }
+ }
+
+ return vrefresh;
+}
+
void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
u32 *p, u32 *u)
{
@@ -745,6 +763,8 @@ bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
case THERMAL_TYPE_SUMO:
case THERMAL_TYPE_NI:
case THERMAL_TYPE_SI:
+ case THERMAL_TYPE_CI:
+ case THERMAL_TYPE_KV:
return true;
case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
@@ -779,15 +799,19 @@ static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependen
u32 size = atom_table->ucNumEntries *
sizeof(struct radeon_clock_voltage_dependency_entry);
int i;
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
radeon_table->entries = kzalloc(size, GFP_KERNEL);
if (!radeon_table->entries)
return -ENOMEM;
+ entry = &atom_table->entries[0];
for (i = 0; i < atom_table->ucNumEntries; i++) {
- radeon_table->entries[i].clk = le16_to_cpu(atom_table->entries[i].usClockLow) |
- (atom_table->entries[i].ucClockHigh << 16);
- radeon_table->entries[i].v = le16_to_cpu(atom_table->entries[i].usVoltage);
+ radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
+ (entry->ucClockHigh << 16);
+ radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
}
radeon_table->count = atom_table->ucNumEntries;
@@ -875,6 +899,19 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
return ret;
}
}
+ if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
+ ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ dep_table);
+ if (ret) {
+ kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
+ kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
+ kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
+ return ret;
+ }
+ }
if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
@@ -898,27 +935,27 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
(ATOM_PPLIB_PhaseSheddingLimits_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
+ ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
kzalloc(psl->ucNumEntries *
sizeof(struct radeon_phase_shedding_limits_entry),
GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
+ r600_free_extended_power_table(rdev);
return -ENOMEM;
}
+ entry = &psl->entries[0];
for (i = 0; i < psl->ucNumEntries; i++) {
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
- le16_to_cpu(psl->entries[i].usSclkLow) |
- (psl->entries[i].ucSclkHigh << 16);
+ le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
- le16_to_cpu(psl->entries[i].usMclkLow) |
- (psl->entries[i].ucMclkHigh << 16);
+ le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
- le16_to_cpu(psl->entries[i].usVoltage);
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
}
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
psl->ucNumEntries;
@@ -945,30 +982,140 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
(ATOM_PPLIB_CAC_Leakage_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
+ ATOM_PPLIB_CAC_Leakage_Record *entry;
u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
+ r600_free_extended_power_table(rdev);
return -ENOMEM;
}
+ entry = &cac_table->entries[0];
for (i = 0; i < cac_table->ucNumEntries; i++) {
- rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
- le16_to_cpu(cac_table->entries[i].usVddc);
- rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
- le32_to_cpu(cac_table->entries[i].ulLeakageValue);
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
+ le16_to_cpu(entry->usVddc1);
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
+ le16_to_cpu(entry->usVddc2);
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
+ le16_to_cpu(entry->usVddc3);
+ } else {
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
+ le16_to_cpu(entry->usVddc);
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
+ le32_to_cpu(entry->ulLeakageValue);
+ }
+ entry = (ATOM_PPLIB_CAC_Leakage_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
}
rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
}
}
- /* ppm table */
+ /* ext tables */
if (le16_to_cpu(power_info->pplib.usTableSize) >=
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
+ ext_hdr->usVCETableOffset) {
+ VCEClockInfoArray *array = (VCEClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+ 1 + array->ucNumEntries * sizeof(VCEClockInfo));
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct radeon_vce_clock_voltage_dependency_entry);
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ VCEClockInfo *vce_clk = (VCEClockInfo *)
+ ((u8 *)&array->entries[0] +
+ (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
+ le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
+ le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
+ ext_hdr->usUVDTableOffset) {
+ UVDClockInfoArray *array = (UVDClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
+ 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ UVDClockInfo *uvd_clk = (UVDClockInfo *)
+ ((u8 *)&array->entries[0] +
+ (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
+ le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
+ le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(limits->entries[i].usVoltage);
+ entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
+ ext_hdr->usSAMUTableOffset) {
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct radeon_clock_voltage_dependency_entry);
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
+ le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
+ }
+ }
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
ext_hdr->usPPMTableOffset) {
ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
@@ -977,10 +1124,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.ppm_table =
kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.ppm_table) {
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
- kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
+ r600_free_extended_power_table(rdev);
return -ENOMEM;
}
rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
@@ -1003,6 +1147,71 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.ppm_table->tj_max =
le32_to_cpu(ppm->ulTjmax);
}
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
+ ext_hdr->usACPTableOffset) {
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct radeon_clock_voltage_dependency_entry);
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
+ le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
+ ext_hdr->usPowerTuneTableOffset) {
+ u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ ATOM_PowerTune_Table *pt;
+ rdev->pm.dpm.dyn_state.cac_tdp_table =
+ kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ if (rev > 0) {
+ ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
+ ppt->usMaximumPowerDeliveryLimit;
+ pt = &ppt->power_tune_table;
+ } else {
+ ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
+ pt = &ppt->power_tune_table;
+ }
+ rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
+ le16_to_cpu(pt->usConfigurableTDP);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
+ le16_to_cpu(pt->usBatteryPowerLimit);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
+ le16_to_cpu(pt->usSmallPowerLimit);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
+ le16_to_cpu(pt->usLowCACLeakage);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
+ le16_to_cpu(pt->usHighCACLeakage);
+ }
}
return 0;
@@ -1016,12 +1225,24 @@ void r600_free_extended_power_table(struct radeon_device *rdev)
kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries)
kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
+ if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries)
+ kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries);
if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries)
kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries);
if (rdev->pm.dpm.dyn_state.ppm_table)
kfree(rdev->pm.dpm.dyn_state.ppm_table);
+ if (rdev->pm.dpm.dyn_state.cac_tdp_table)
+ kfree(rdev->pm.dpm.dyn_state.cac_tdp_table);
+ if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
+ kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries);
+ if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
+ kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries);
+ if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
+ kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries);
+ if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
+ kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries);
}
enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
@@ -1046,3 +1267,36 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
}
return RADEON_PCIE_GEN1;
}
+
+u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
+ u16 asic_lanes,
+ u16 default_lanes)
+{
+ switch (asic_lanes) {
+ case 0:
+ default:
+ return default_lanes;
+ case 1:
+ return 1;
+ case 2:
+ return 2;
+ case 4:
+ return 4;
+ case 8:
+ return 8;
+ case 12:
+ return 12;
+ case 16:
+ return 16;
+ }
+}
+
+u8 r600_encode_pci_lane_width(u32 lanes)
+{
+ u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
+
+ if (lanes > 16)
+ return 0;
+
+ return encoded_lanes[lanes];
+}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 7c822d9ae53d..1000bf9719f2 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -130,6 +130,7 @@ void r600_dpm_print_cap_info(u32 caps);
void r600_dpm_print_ps_status(struct radeon_device *rdev,
struct radeon_ps *rps);
u32 r600_dpm_get_vblank_time(struct radeon_device *rdev);
+u32 r600_dpm_get_vrefresh(struct radeon_device *rdev);
bool r600_is_uvd_state(u32 class, u32 class2);
void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
u32 *p, u32 *u);
@@ -224,4 +225,9 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
enum radeon_pcie_gen asic_gen,
enum radeon_pcie_gen default_gen);
+u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
+ u16 asic_lanes,
+ u16 default_lanes);
+u8 r600_encode_pci_lane_width(u32 lanes);
+
#endif
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f48240bb8c56..f443010ce90b 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -226,10 +226,29 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 base_rate = 24000;
+ u32 max_ratio = clock / base_rate;
+ u32 dto_phase;
+ u32 dto_modulo = clock;
+ u32 wallclock_ratio;
+ u32 dto_cntl;
if (!dig || !dig->afmt)
return;
+ if (max_ratio >= 8) {
+ dto_phase = 192 * 1000;
+ wallclock_ratio = 3;
+ } else if (max_ratio >= 4) {
+ dto_phase = 96 * 1000;
+ wallclock_ratio = 2;
+ } else if (max_ratio >= 2) {
+ dto_phase = 48 * 1000;
+ wallclock_ratio = 1;
+ } else {
+ dto_phase = 24 * 1000;
+ wallclock_ratio = 0;
+ }
+
/* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
* doesn't matter which one you use. Just use the first one.
*/
@@ -242,9 +261,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
/* according to the reg specs, this should DCE3.2 only, but in
* practice it seems to cover DCE3.0 as well.
*/
- WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
- WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
- WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ if (dig->dig_encoder == 0) {
+ dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
+ WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ } else {
+ dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl);
+ WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+ }
} else {
/* according to the reg specs, this should be DCE2.0 and DCE3.0 */
WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
@@ -252,6 +283,107 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
}
}
+static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 tmp;
+ u8 *sadb;
+ int sad_count;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ return;
+ }
+
+ /* program the speaker allocation */
+ tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set HDMI mode */
+ tmp |= HDMI_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+
+ kfree(sadb);
+}
+
+static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ struct cea_sad *sads;
+ int i, sad_count;
+
+ static const u16 eld_reg_to_type[][2] = {
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ };
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+ BUG_ON(!sads);
+
+ for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ u32 value = 0;
+ int j;
+
+ for (j = 0; j < sad_count; j++) {
+ struct cea_sad *sad = &sads[j];
+
+ if (sad->format == eld_reg_to_type[i][1]) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+ value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
+ break;
+ }
+ }
+ WREG32(eld_reg_to_type[i][0], value);
+ }
+
+ kfree(sads);
+}
+
/*
* update the info frames with the data from the current display mode
*/
@@ -296,6 +428,11 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
}
+ if (ASIC_IS_DCE32(rdev)) {
+ dce3_2_afmt_write_speaker_allocation(encoder);
+ dce3_2_afmt_write_sad_regs(encoder);
+ }
+
WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
HDMI0_ACR_SOURCE); /* select SW CTS value */
@@ -351,7 +488,7 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct r600_audio audio = r600_audio_status(rdev);
+ struct r600_audio_pin audio = r600_audio_status(rdev);
uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
struct hdmi_audio_infoframe frame;
uint32_t offset;
@@ -460,6 +597,11 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
+ if (enable)
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ else
+ dig->afmt->pin = NULL;
+
/* Older chipsets require setting HDMI and routing manually */
if (!ASIC_IS_DCE3(rdev)) {
if (enable)
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 8e3fe815edab..454f90a849e4 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -933,6 +933,9 @@
#define DCCG_AUDIO_DTO0_LOAD 0x051c
# define DTO_LOAD (1 << 31)
#define DCCG_AUDIO_DTO0_CNTL 0x0520
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
#define DCCG_AUDIO_DTO1_PHASE 0x0524
#define DCCG_AUDIO_DTO1_MODULE 0x0528
@@ -957,6 +960,42 @@
# define DIG_MODE_SDVO 4
#define DIG1_CNTL 0x79a0
+#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x71bc
+#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
+#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
+#define SPEAKER_ALLOCATION_SHIFT 0
+#define HDMI_CONNECTION (1 << 16)
+#define DP_CONNECTION (1 << 17)
+
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
* instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
* different due to the new DIG blocks, but also have 2 instances.
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 19066d1dcb7d..ff8b564ce2b2 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -152,6 +152,47 @@ extern int radeon_aspm;
#define RADEON_RESET_MC (1 << 10)
#define RADEON_RESET_DISPLAY (1 << 11)
+/* CG block flags */
+#define RADEON_CG_BLOCK_GFX (1 << 0)
+#define RADEON_CG_BLOCK_MC (1 << 1)
+#define RADEON_CG_BLOCK_SDMA (1 << 2)
+#define RADEON_CG_BLOCK_UVD (1 << 3)
+#define RADEON_CG_BLOCK_VCE (1 << 4)
+#define RADEON_CG_BLOCK_HDP (1 << 5)
+#define RADEON_CG_BLOCK_BIF (1 << 6)
+
+/* CG flags */
+#define RADEON_CG_SUPPORT_GFX_MGCG (1 << 0)
+#define RADEON_CG_SUPPORT_GFX_MGLS (1 << 1)
+#define RADEON_CG_SUPPORT_GFX_CGCG (1 << 2)
+#define RADEON_CG_SUPPORT_GFX_CGLS (1 << 3)
+#define RADEON_CG_SUPPORT_GFX_CGTS (1 << 4)
+#define RADEON_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
+#define RADEON_CG_SUPPORT_GFX_CP_LS (1 << 6)
+#define RADEON_CG_SUPPORT_GFX_RLC_LS (1 << 7)
+#define RADEON_CG_SUPPORT_MC_LS (1 << 8)
+#define RADEON_CG_SUPPORT_MC_MGCG (1 << 9)
+#define RADEON_CG_SUPPORT_SDMA_LS (1 << 10)
+#define RADEON_CG_SUPPORT_SDMA_MGCG (1 << 11)
+#define RADEON_CG_SUPPORT_BIF_LS (1 << 12)
+#define RADEON_CG_SUPPORT_UVD_MGCG (1 << 13)
+#define RADEON_CG_SUPPORT_VCE_MGCG (1 << 14)
+#define RADEON_CG_SUPPORT_HDP_LS (1 << 15)
+#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
+
+/* PG flags */
+#define RADEON_PG_SUPPORT_GFX_CG (1 << 0)
+#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
+#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
+#define RADEON_PG_SUPPORT_UVD (1 << 3)
+#define RADEON_PG_SUPPORT_VCE (1 << 4)
+#define RADEON_PG_SUPPORT_CP (1 << 5)
+#define RADEON_PG_SUPPORT_GDS (1 << 6)
+#define RADEON_PG_SUPPORT_RLC_SMU_HS (1 << 7)
+#define RADEON_PG_SUPPORT_SDMA (1 << 8)
+#define RADEON_PG_SUPPORT_ACP (1 << 9)
+#define RADEON_PG_SUPPORT_SAMU (1 << 10)
+
/* max cursor sizes (in pixels) */
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
@@ -238,6 +279,12 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
u16 *voltage,
u16 leakage_idx);
+int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
+ u16 *leakage_id);
+int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
+ u16 *vddc, u16 *vddci,
+ u16 virtual_voltage_id,
+ u16 vbios_voltage_id);
int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
u8 voltage_type,
u16 nominal_voltage,
@@ -679,7 +726,7 @@ union radeon_irq_stat_regs {
#define RADEON_MAX_HPD_PINS 6
#define RADEON_MAX_CRTCS 6
-#define RADEON_MAX_AFMT_BLOCKS 6
+#define RADEON_MAX_AFMT_BLOCKS 7
struct radeon_irq {
bool installed;
@@ -743,8 +790,6 @@ struct radeon_ring {
uint32_t align_mask;
uint32_t ptr_mask;
bool ready;
- u32 ptr_reg_shift;
- u32 ptr_reg_mask;
u32 nop;
u32 idx;
u64 last_semaphore_signal_addr;
@@ -841,35 +886,6 @@ struct r600_ih {
bool enabled;
};
-struct r600_blit_cp_primitives {
- void (*set_render_target)(struct radeon_device *rdev, int format,
- int w, int h, u64 gpu_addr);
- void (*cp_set_surface_sync)(struct radeon_device *rdev,
- u32 sync_type, u32 size,
- u64 mc_addr);
- void (*set_shaders)(struct radeon_device *rdev);
- void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
- void (*set_tex_resource)(struct radeon_device *rdev,
- int format, int w, int h, int pitch,
- u64 gpu_addr, u32 size);
- void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
- int x2, int y2);
- void (*draw_auto)(struct radeon_device *rdev);
- void (*set_default_state)(struct radeon_device *rdev);
-};
-
-struct r600_blit {
- struct radeon_bo *shader_obj;
- struct r600_blit_cp_primitives primitives;
- int max_dim;
- int ring_size_common;
- int ring_size_per_loop;
- u64 shader_gpu_addr;
- u32 vs_offset, ps_offset;
- u32 state_offset;
- u32 state_len;
-};
-
/*
* RLC stuff
*/
@@ -880,13 +896,19 @@ struct radeon_rlc {
struct radeon_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
volatile uint32_t *sr_ptr;
- u32 *reg_list;
+ const u32 *reg_list;
u32 reg_list_size;
/* for clear state */
struct radeon_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
volatile uint32_t *cs_ptr;
- struct cs_section_def *cs_data;
+ const struct cs_section_def *cs_data;
+ u32 clear_state_size;
+ /* for cp tables */
+ struct radeon_bo *cp_table_obj;
+ uint64_t cp_table_gpu_addr;
+ volatile uint32_t *cp_table_ptr;
+ u32 cp_table_size;
};
int radeon_ib_get(struct radeon_device *rdev, int ring,
@@ -918,8 +940,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned size, uint32_t *data);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
- unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
- u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop);
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -1033,7 +1054,6 @@ struct radeon_wb {
#define R600_WB_DMA_RPTR_OFFSET 1792
#define R600_WB_IH_WPTR_OFFSET 2048
#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
-#define R600_WB_UVD_RPTR_OFFSET 2560
#define R600_WB_EVENT_OFFSET 3072
#define CIK_WB_CP1_WPTR_OFFSET 3328
#define CIK_WB_CP2_WPTR_OFFSET 3584
@@ -1144,6 +1164,7 @@ enum radeon_int_thermal_type {
THERMAL_TYPE_SI,
THERMAL_TYPE_EMC2103_WITH_INTERNAL,
THERMAL_TYPE_CI,
+ THERMAL_TYPE_KV,
};
struct radeon_voltage {
@@ -1217,6 +1238,9 @@ struct radeon_ps {
/* UVD clocks */
u32 vclk;
u32 dclk;
+ /* VCE clocks */
+ u32 evclk;
+ u32 ecclk;
/* asic priv */
void *ps_priv;
};
@@ -1267,14 +1291,21 @@ struct radeon_clock_voltage_dependency_table {
struct radeon_clock_voltage_dependency_entry *entries;
};
-struct radeon_cac_leakage_entry {
- u16 vddc;
- u32 leakage;
+union radeon_cac_leakage_entry {
+ struct {
+ u16 vddc;
+ u32 leakage;
+ };
+ struct {
+ u16 vddc1;
+ u16 vddc2;
+ u16 vddc3;
+ };
};
struct radeon_cac_leakage_table {
u32 count;
- struct radeon_cac_leakage_entry *entries;
+ union radeon_cac_leakage_entry *entries;
};
struct radeon_phase_shedding_limits_entry {
@@ -1288,6 +1319,28 @@ struct radeon_phase_shedding_limits_table {
struct radeon_phase_shedding_limits_entry *entries;
};
+struct radeon_uvd_clock_voltage_dependency_entry {
+ u32 vclk;
+ u32 dclk;
+ u16 v;
+};
+
+struct radeon_uvd_clock_voltage_dependency_table {
+ u8 count;
+ struct radeon_uvd_clock_voltage_dependency_entry *entries;
+};
+
+struct radeon_vce_clock_voltage_dependency_entry {
+ u32 ecclk;
+ u32 evclk;
+ u16 v;
+};
+
+struct radeon_vce_clock_voltage_dependency_table {
+ u8 count;
+ struct radeon_vce_clock_voltage_dependency_entry *entries;
+};
+
struct radeon_ppm_table {
u8 ppm_design;
u16 cpu_core_number;
@@ -1301,11 +1354,27 @@ struct radeon_ppm_table {
u32 tj_max;
};
+struct radeon_cac_tdp_table {
+ u16 tdp;
+ u16 configurable_tdp;
+ u16 tdc;
+ u16 battery_power_limit;
+ u16 small_power_limit;
+ u16 low_cac_leakage;
+ u16 high_cac_leakage;
+ u16 maximum_power_delivery_limit;
+};
+
struct radeon_dpm_dynamic_state {
struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
+ struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk;
struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
+ struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
+ struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
+ struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
+ struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
struct radeon_clock_array valid_sclk_values;
struct radeon_clock_array valid_mclk_values;
struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
@@ -1317,6 +1386,7 @@ struct radeon_dpm_dynamic_state {
struct radeon_cac_leakage_table cac_leakage_table;
struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
struct radeon_ppm_table *ppm_table;
+ struct radeon_cac_tdp_table *cac_tdp_table;
};
struct radeon_dpm_fan {
@@ -1386,11 +1456,12 @@ struct radeon_dpm {
struct radeon_dpm_thermal thermal;
/* forced levels */
enum radeon_dpm_forced_level forced_level;
+ /* track UVD streams */
+ unsigned sd;
+ unsigned hd;
};
-void radeon_dpm_enable_power_state(struct radeon_device *rdev,
- enum radeon_pm_state_type dpm_state);
-
+void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable);
struct radeon_pm {
struct mutex mutex;
@@ -1465,9 +1536,9 @@ struct radeon_uvd {
void *cpu_addr;
uint64_t gpu_addr;
void *saved_bo;
- unsigned fw_size;
atomic_t handles[RADEON_MAX_UVD_HANDLES];
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
+ unsigned img_size[RADEON_MAX_UVD_HANDLES];
struct delayed_work idle_work;
};
@@ -1496,12 +1567,21 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
unsigned cg_upll_func_cntl);
-struct r600_audio {
+struct r600_audio_pin {
int channels;
int rate;
int bits_per_sample;
u8 status_bits;
u8 category_code;
+ u32 offset;
+ bool connected;
+ u32 id;
+};
+
+struct r600_audio {
+ bool enabled;
+ struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS];
+ int num_pins;
};
/*
@@ -1533,6 +1613,34 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
unsigned nfiles);
int radeon_debugfs_fence_init(struct radeon_device *rdev);
+/*
+ * ASIC ring specific functions.
+ */
+struct radeon_asic_ring {
+ /* ring read/write ptr handling */
+ u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
+ u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
+ void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
+
+ /* validating and patching of IBs */
+ int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+ int (*cs_parse)(struct radeon_cs_parser *p);
+
+ /* command emmit functions */
+ void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+ void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+ void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore, bool emit_wait);
+ void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+
+ /* testing functions */
+ int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
+
+ /* deprecated */
+ void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
+};
/*
* ASIC specific functions.
@@ -1576,23 +1684,7 @@ struct radeon_asic {
uint32_t incr, uint32_t flags);
} vm;
/* ring specific callbacks */
- struct {
- void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
- int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
- void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
- void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
- struct radeon_semaphore *semaphore, bool emit_wait);
- int (*cs_parse)(struct radeon_cs_parser *p);
- void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
- int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
- int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
- bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
- void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
-
- u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
- u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
- void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
- } ring[RADEON_NUM_RINGS];
+ struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
/* irqs */
struct {
int (*set)(struct radeon_device *rdev);
@@ -1685,6 +1777,7 @@ struct radeon_asic {
void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
bool (*vblank_too_short)(struct radeon_device *rdev);
+ void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
} dpm;
/* pageflipping */
struct {
@@ -2063,7 +2156,7 @@ struct radeon_device {
const struct firmware *mec_fw; /* CIK MEC firmware */
const struct firmware *sdma_fw; /* CIK SDMA firmware */
const struct firmware *smc_fw; /* SMC firmware */
- struct r600_blit r600_blit;
+ const struct firmware *uvd_fw; /* UVD firmware */
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
@@ -2074,9 +2167,8 @@ struct radeon_device {
struct work_struct reset_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
- bool audio_enabled;
bool has_uvd;
- struct r600_audio audio_status; /* audio stuff */
+ struct r600_audio audio; /* audio stuff */
struct notifier_block acpi_nb;
/* only one userspace can use Hyperz features or CMASK at a time */
struct drm_file *hyperz_filp;
@@ -2092,6 +2184,11 @@ struct radeon_device {
/* ACPI interface */
struct radeon_atif atif;
struct radeon_atcs atcs;
+ /* srbm instance registers */
+ struct mutex srbm_mutex;
+ /* clock, powergating flags */
+ u32 cg_flags;
+ u32 pg_flags;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -2150,6 +2247,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
#define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v))
#define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg))
#define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v))
+#define RREG32_DIDT(reg) cik_didt_rreg(rdev, (reg))
+#define WREG32_DIDT(reg, v) cik_didt_wreg(rdev, (reg), (v))
#define WREG32_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32(reg); \
@@ -2158,7 +2257,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
WREG32(reg, tmp_); \
} while (0)
#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
-#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
#define WREG32_PLL_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32_PLL(reg); \
@@ -2281,6 +2380,22 @@ static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
WREG32(R600_UVD_CTX_DATA, (v));
}
+
+static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
+{
+ u32 r;
+
+ WREG32(CIK_DIDT_IND_INDEX, (reg));
+ r = RREG32(CIK_DIDT_IND_DATA);
+ return r;
+}
+
+static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+ WREG32(CIK_DIDT_IND_INDEX, (reg));
+ WREG32(CIK_DIDT_IND_DATA, (v));
+}
+
void r100_pll_errata_after_index(struct radeon_device *rdev);
@@ -2376,7 +2491,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
-#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
+#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
@@ -2384,16 +2499,16 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
-#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
-#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
-#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
-#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
-#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
-#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
-#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
-#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_rptr((rdev), (r))
-#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_wptr((rdev), (r))
-#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].set_wptr((rdev), (r))
+#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
+#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
+#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
+#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
+#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
+#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm))
+#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
+#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
+#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
@@ -2401,8 +2516,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
-#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
-#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
+#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
+#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
@@ -2453,6 +2568,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
+#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
/* Common functions */
/* AGP */
@@ -2519,6 +2635,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);
+struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
+struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
/*
* R600 vram scratch functions
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f8f8b3113ddd..630853b96841 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -172,6 +172,21 @@ void radeon_agp_disable(struct radeon_device *rdev)
/*
* ASIC
*/
+
+static struct radeon_asic_ring r100_gfx_ring = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r100_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
static struct radeon_asic r100_asic = {
.init = &r100_init,
.fini = &r100_fini,
@@ -187,19 +202,7 @@ static struct radeon_asic r100_asic = {
.set_page = &r100_pci_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r100_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r100_cs_parse,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -266,19 +269,7 @@ static struct radeon_asic r200_asic = {
.set_page = &r100_pci_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r100_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r100_cs_parse,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -330,6 +321,20 @@ static struct radeon_asic r200_asic = {
},
};
+static struct radeon_asic_ring r300_gfx_ring = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
static struct radeon_asic r300_asic = {
.init = &r300_init,
.fini = &r300_fini,
@@ -345,19 +350,7 @@ static struct radeon_asic r300_asic = {
.set_page = &r100_pci_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -424,19 +417,7 @@ static struct radeon_asic r300_asic_pcie = {
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -503,19 +484,7 @@ static struct radeon_asic r420_asic = {
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -582,19 +551,7 @@ static struct radeon_asic rs400_asic = {
.set_page = &rs400_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -661,19 +618,7 @@ static struct radeon_asic rs600_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -742,19 +687,7 @@ static struct radeon_asic rs690_asic = {
.set_page = &rs400_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -823,19 +756,7 @@ static struct radeon_asic rv515_asic = {
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -902,19 +823,7 @@ static struct radeon_asic r520_asic = {
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -966,6 +875,32 @@ static struct radeon_asic r520_asic = {
},
};
+static struct radeon_asic_ring r600_gfx_ring = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &r600_gfx_is_lockup,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring r600_dma_ring = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr,
+};
+
static struct radeon_asic r600_asic = {
.init = &r600_init,
.fini = &r600_fini,
@@ -983,30 +918,8 @@ static struct radeon_asic r600_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &r600_dma_ring_ib_execute,
- .emit_fence = &r600_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &r600_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -1022,7 +935,7 @@ static struct radeon_asic r600_asic = {
.hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1078,30 +991,8 @@ static struct radeon_asic rv6xx_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &r600_dma_ring_ib_execute,
- .emit_fence = &r600_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &r600_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -1115,7 +1006,7 @@ static struct radeon_asic rv6xx_asic = {
.get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1187,30 +1078,8 @@ static struct radeon_asic rs780_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &r600_dma_ring_ib_execute,
- .emit_fence = &r600_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &r600_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -1226,7 +1095,7 @@ static struct radeon_asic rs780_asic = {
.hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1280,6 +1149,19 @@ static struct radeon_asic rs780_asic = {
},
};
+static struct radeon_asic_ring rv770_uvd_ring = {
+ .ib_execute = &uvd_v1_0_ib_execute,
+ .emit_fence = &uvd_v2_2_fence_emit,
+ .emit_semaphore = &uvd_v1_0_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &uvd_v1_0_ring_test,
+ .ib_test = &uvd_v1_0_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ .get_rptr = &uvd_v1_0_get_rptr,
+ .get_wptr = &uvd_v1_0_get_wptr,
+ .set_wptr = &uvd_v1_0_set_wptr,
+};
+
static struct radeon_asic rv770_asic = {
.init = &rv770_init,
.fini = &rv770_fini,
@@ -1297,42 +1179,9 @@ static struct radeon_asic rv770_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &r600_dma_ring_ib_execute,
- .emit_fence = &r600_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &r600_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &r600_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -1348,7 +1197,7 @@ static struct radeon_asic rv770_asic = {
.hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &rv770_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1405,6 +1254,32 @@ static struct radeon_asic rv770_asic = {
},
};
+static struct radeon_asic_ring evergreen_gfx_ring = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gfx_is_lockup,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring evergreen_dma_ring = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &evergreen_dma_is_lockup,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr,
+};
+
static struct radeon_asic evergreen_asic = {
.init = &evergreen_init,
.fini = &evergreen_fini,
@@ -1422,42 +1297,9 @@ static struct radeon_asic evergreen_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &evergreen_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &evergreen_dma_ring_ib_execute,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &evergreen_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &r600_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -1473,7 +1315,7 @@ static struct radeon_asic evergreen_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1547,42 +1389,9 @@ static struct radeon_asic sumo_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &evergreen_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &evergreen_dma_ring_ib_execute,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &evergreen_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &r600_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -1598,7 +1407,7 @@ static struct radeon_asic sumo_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1671,42 +1480,9 @@ static struct radeon_asic btc_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &evergreen_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &evergreen_dma_ring_ib_execute,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &evergreen_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &r600_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -1722,7 +1498,7 @@ static struct radeon_asic btc_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1779,6 +1555,49 @@ static struct radeon_asic btc_asic = {
},
};
+static struct radeon_asic_ring cayman_gfx_ring = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &cayman_gfx_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring cayman_dma_ring = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr
+};
+
+static struct radeon_asic_ring cayman_uvd_ring = {
+ .ib_execute = &uvd_v1_0_ib_execute,
+ .emit_fence = &uvd_v2_2_fence_emit,
+ .emit_semaphore = &uvd_v3_1_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &uvd_v1_0_ring_test,
+ .ib_test = &uvd_v1_0_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ .get_rptr = &uvd_v1_0_get_rptr,
+ .get_wptr = &uvd_v1_0_get_wptr,
+ .set_wptr = &uvd_v1_0_set_wptr,
+};
+
static struct radeon_asic cayman_asic = {
.init = &cayman_init,
.fini = &cayman_fini,
@@ -1802,88 +1621,12 @@ static struct radeon_asic cayman_asic = {
.set_page = &cayman_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
- .vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
- .vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -1899,7 +1642,7 @@ static struct radeon_asic cayman_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1979,88 +1722,12 @@ static struct radeon_asic trinity_asic = {
.set_page = &cayman_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
- .vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
- .vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -2072,9 +1739,11 @@ static struct radeon_asic trinity_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2130,6 +1799,36 @@ static struct radeon_asic trinity_asic = {
},
};
+static struct radeon_asic_ring si_gfx_ring = {
+ .ib_execute = &si_ring_ib_execute,
+ .ib_parse = &si_ib_parse,
+ .emit_fence = &si_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &si_gfx_is_lockup,
+ .vm_flush = &si_vm_flush,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring si_dma_ring = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &si_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr,
+};
+
static struct radeon_asic si_asic = {
.init = &si_init,
.fini = &si_fini,
@@ -2153,88 +1852,12 @@ static struct radeon_asic si_asic = {
.set_page = &si_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &si_ring_ib_execute,
- .ib_parse = &si_ib_parse,
- .emit_fence = &si_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &si_gfx_is_lockup,
- .vm_flush = &si_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &si_ring_ib_execute,
- .ib_parse = &si_ib_parse,
- .emit_fence = &si_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &si_gfx_is_lockup,
- .vm_flush = &si_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &si_ring_ib_execute,
- .ib_parse = &si_ib_parse,
- .emit_fence = &si_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &si_gfx_is_lockup,
- .vm_flush = &si_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &si_dma_is_lockup,
- .vm_flush = &si_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &si_dma_is_lockup,
- .vm_flush = &si_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &si_gfx_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &si_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &si_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &si_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &si_irq_set,
@@ -2246,6 +1869,8 @@ static struct radeon_asic si_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = NULL,
@@ -2305,6 +1930,51 @@ static struct radeon_asic si_asic = {
},
};
+static struct radeon_asic_ring ci_gfx_ring = {
+ .ib_execute = &cik_ring_ib_execute,
+ .ib_parse = &cik_ib_parse,
+ .emit_fence = &cik_fence_gfx_ring_emit,
+ .emit_semaphore = &cik_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &cik_ring_test,
+ .ib_test = &cik_ib_test,
+ .is_lockup = &cik_gfx_is_lockup,
+ .vm_flush = &cik_vm_flush,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring ci_cp_ring = {
+ .ib_execute = &cik_ring_ib_execute,
+ .ib_parse = &cik_ib_parse,
+ .emit_fence = &cik_fence_compute_ring_emit,
+ .emit_semaphore = &cik_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &cik_ring_test,
+ .ib_test = &cik_ib_test,
+ .is_lockup = &cik_gfx_is_lockup,
+ .vm_flush = &cik_vm_flush,
+ .get_rptr = &cik_compute_ring_get_rptr,
+ .get_wptr = &cik_compute_ring_get_wptr,
+ .set_wptr = &cik_compute_ring_set_wptr,
+};
+
+static struct radeon_asic_ring ci_dma_ring = {
+ .ib_execute = &cik_sdma_ring_ib_execute,
+ .ib_parse = &cik_ib_parse,
+ .emit_fence = &cik_sdma_fence_ring_emit,
+ .emit_semaphore = &cik_sdma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &cik_sdma_ring_test,
+ .ib_test = &cik_sdma_ib_test,
+ .is_lockup = &cik_sdma_is_lockup,
+ .vm_flush = &cik_dma_vm_flush,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr,
+};
+
static struct radeon_asic ci_asic = {
.init = &cik_init,
.fini = &cik_fini,
@@ -2328,88 +1998,12 @@ static struct radeon_asic ci_asic = {
.set_page = &cik_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_gfx_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_compute_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_compute_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cik_sdma_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_sdma_fence_ring_emit,
- .emit_semaphore = &cik_sdma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_sdma_ring_test,
- .ib_test = &cik_sdma_ib_test,
- .is_lockup = &cik_sdma_is_lockup,
- .vm_flush = &cik_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cik_sdma_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_sdma_fence_ring_emit,
- .emit_semaphore = &cik_sdma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_sdma_ring_test,
- .ib_test = &cik_sdma_ib_test,
- .is_lockup = &cik_sdma_is_lockup,
- .vm_flush = &cik_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &cik_irq_set,
@@ -2419,6 +2013,8 @@ static struct radeon_asic ci_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = NULL,
@@ -2452,6 +2048,25 @@ static struct radeon_asic ci_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &cik_set_uvd_clocks,
+ .get_temperature = &ci_get_temp,
+ },
+ .dpm = {
+ .init = &ci_dpm_init,
+ .setup_asic = &ci_dpm_setup_asic,
+ .enable = &ci_dpm_enable,
+ .disable = &ci_dpm_disable,
+ .pre_set_power_state = &ci_dpm_pre_set_power_state,
+ .set_power_state = &ci_dpm_set_power_state,
+ .post_set_power_state = &ci_dpm_post_set_power_state,
+ .display_configuration_changed = &ci_dpm_display_configuration_changed,
+ .fini = &ci_dpm_fini,
+ .get_sclk = &ci_dpm_get_sclk,
+ .get_mclk = &ci_dpm_get_mclk,
+ .print_power_state = &ci_dpm_print_power_state,
+ .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &ci_dpm_force_performance_level,
+ .vblank_too_short = &ci_dpm_vblank_too_short,
+ .powergate_uvd = &ci_dpm_powergate_uvd,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -2483,88 +2098,12 @@ static struct radeon_asic kv_asic = {
.set_page = &cik_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_gfx_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_compute_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_compute_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cik_sdma_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_sdma_fence_ring_emit,
- .emit_semaphore = &cik_sdma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_sdma_ring_test,
- .ib_test = &cik_sdma_ib_test,
- .is_lockup = &cik_sdma_is_lockup,
- .vm_flush = &cik_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cik_sdma_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_sdma_fence_ring_emit,
- .emit_semaphore = &cik_sdma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_sdma_ring_test,
- .ib_test = &cik_sdma_ib_test,
- .is_lockup = &cik_sdma_is_lockup,
- .vm_flush = &cik_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &cik_irq_set,
@@ -2574,6 +2113,8 @@ static struct radeon_asic kv_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = NULL,
@@ -2607,6 +2148,24 @@ static struct radeon_asic kv_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &cik_set_uvd_clocks,
+ .get_temperature = &kv_get_temp,
+ },
+ .dpm = {
+ .init = &kv_dpm_init,
+ .setup_asic = &kv_dpm_setup_asic,
+ .enable = &kv_dpm_enable,
+ .disable = &kv_dpm_disable,
+ .pre_set_power_state = &kv_dpm_pre_set_power_state,
+ .set_power_state = &kv_dpm_set_power_state,
+ .post_set_power_state = &kv_dpm_post_set_power_state,
+ .display_configuration_changed = &kv_dpm_display_configuration_changed,
+ .fini = &kv_dpm_fini,
+ .get_sclk = &kv_dpm_get_sclk,
+ .get_mclk = &kv_dpm_get_mclk,
+ .print_power_state = &kv_dpm_print_power_state,
+ .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &kv_dpm_force_performance_level,
+ .powergate_uvd = &kv_dpm_powergate_uvd,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -2776,19 +2335,188 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->has_uvd = false;
else
rdev->has_uvd = true;
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ break;
+ case CHIP_PITCAIRN:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_GFX_RLC_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ break;
+ case CHIP_VERDE:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_GFX_RLC_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0 |
+ /*RADEON_PG_SUPPORT_GFX_CG | */
+ RADEON_PG_SUPPORT_SDMA;
+ break;
+ case CHIP_OLAND:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_GFX_RLC_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ break;
+ case CHIP_HAINAN:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_GFX_RLC_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ break;
+ default:
+ rdev->cg_flags = 0;
+ rdev->pg_flags = 0;
+ break;
+ }
break;
case CHIP_BONAIRE:
rdev->asic = &ci_asic;
rdev->num_crtc = 6;
+ rdev->has_uvd = true;
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CGTS_LS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_SDMA_LS |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
break;
case CHIP_KAVERI:
case CHIP_KABINI:
rdev->asic = &kv_asic;
/* set num crtcs */
- if (rdev->family == CHIP_KAVERI)
+ if (rdev->family == CHIP_KAVERI) {
rdev->num_crtc = 4;
- else
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CGTS_LS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_SDMA_LS |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ /*RADEON_PG_SUPPORT_GFX_CG |
+ RADEON_PG_SUPPORT_GFX_SMG |
+ RADEON_PG_SUPPORT_GFX_DMG |
+ RADEON_PG_SUPPORT_UVD |
+ RADEON_PG_SUPPORT_VCE |
+ RADEON_PG_SUPPORT_CP |
+ RADEON_PG_SUPPORT_GDS |
+ RADEON_PG_SUPPORT_RLC_SMU_HS |
+ RADEON_PG_SUPPORT_ACP |
+ RADEON_PG_SUPPORT_SAMU;*/
+ } else {
rdev->num_crtc = 2;
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CGTS_LS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_SDMA_LS |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ /*RADEON_PG_SUPPORT_GFX_CG |
+ RADEON_PG_SUPPORT_GFX_SMG |
+ RADEON_PG_SUPPORT_UVD |
+ RADEON_PG_SUPPORT_VCE |
+ RADEON_PG_SUPPORT_CP |
+ RADEON_PG_SUPPORT_GDS |
+ RADEON_PG_SUPPORT_RLC_SMU_HS |
+ RADEON_PG_SUPPORT_SAMU;*/
+ }
+ rdev->has_uvd = true;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 902479fa737f..818bbe6b884b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -336,10 +336,6 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
-int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
-int r600_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages, struct radeon_fence **fence);
int r600_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence);
@@ -371,8 +367,6 @@ int r600_count_pipe_bits(uint32_t val);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
int r600_pcie_gart_init(struct radeon_device *rdev);
void r600_scratch_init(struct radeon_device *rdev);
-int r600_blit_init(struct radeon_device *rdev);
-void r600_blit_fini(struct radeon_device *rdev);
int r600_init_microcode(struct radeon_device *rdev);
/* r600 irq */
int r600_irq_process(struct radeon_device *rdev);
@@ -385,28 +379,25 @@ void r600_disable_interrupts(struct radeon_device *rdev);
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
-struct r600_audio r600_audio_status(struct radeon_device *rdev);
+struct r600_audio_pin r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
-/* r600 blit */
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
- struct radeon_fence **fence, struct radeon_sa_bo **vb,
- struct radeon_semaphore **sem);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
- struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
-void r600_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages,
- struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
u32 r600_get_xclk(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
int rv6xx_get_temp(struct radeon_device *rdev);
int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
void r600_dpm_post_set_power_state(struct radeon_device *rdev);
+/* r600 dma */
+uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void r600_dma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
/* rv6xx dpm */
int rv6xx_dpm_init(struct radeon_device *rdev);
int rv6xx_dpm_enable(struct radeon_device *rdev);
@@ -438,19 +429,6 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev,
void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
-/* uvd */
-int r600_uvd_init(struct radeon_device *rdev);
-int r600_uvd_rbc_start(struct radeon_device *rdev);
-void r600_uvd_rbc_stop(struct radeon_device *rdev);
-int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
-void r600_uvd_fence_emit(struct radeon_device *rdev,
- struct radeon_fence *fence);
-void r600_uvd_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
-void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-
/*
* rv770,rv730,rv710,rv740
*/
@@ -468,7 +446,6 @@ int rv770_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence **fence);
u32 rv770_get_xclk(struct radeon_device *rdev);
-int rv770_uvd_resume(struct radeon_device *rdev);
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int rv770_get_temp(struct radeon_device *rdev);
/* rv7xx pm */
@@ -530,7 +507,6 @@ extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_ba
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
-int evergreen_blit_init(struct radeon_device *rdev);
int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
@@ -652,6 +628,8 @@ int trinity_dpm_force_performance_level(struct radeon_device *rdev,
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
+int dce6_audio_init(struct radeon_device *rdev);
+void dce6_audio_fini(struct radeon_device *rdev);
/*
* si
@@ -712,7 +690,6 @@ u32 cik_get_xclk(struct radeon_device *rdev);
uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
-int cik_uvd_resume(struct radeon_device *rdev);
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
@@ -763,5 +740,81 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void cik_compute_ring_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
+int ci_get_temp(struct radeon_device *rdev);
+int kv_get_temp(struct radeon_device *rdev);
+
+int ci_dpm_init(struct radeon_device *rdev);
+int ci_dpm_enable(struct radeon_device *rdev);
+void ci_dpm_disable(struct radeon_device *rdev);
+int ci_dpm_pre_set_power_state(struct radeon_device *rdev);
+int ci_dpm_set_power_state(struct radeon_device *rdev);
+void ci_dpm_post_set_power_state(struct radeon_device *rdev);
+void ci_dpm_setup_asic(struct radeon_device *rdev);
+void ci_dpm_display_configuration_changed(struct radeon_device *rdev);
+void ci_dpm_fini(struct radeon_device *rdev);
+u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void ci_dpm_print_power_state(struct radeon_device *rdev,
+ struct radeon_ps *ps);
+void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m);
+int ci_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level);
+bool ci_dpm_vblank_too_short(struct radeon_device *rdev);
+void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+
+int kv_dpm_init(struct radeon_device *rdev);
+int kv_dpm_enable(struct radeon_device *rdev);
+void kv_dpm_disable(struct radeon_device *rdev);
+int kv_dpm_pre_set_power_state(struct radeon_device *rdev);
+int kv_dpm_set_power_state(struct radeon_device *rdev);
+void kv_dpm_post_set_power_state(struct radeon_device *rdev);
+void kv_dpm_setup_asic(struct radeon_device *rdev);
+void kv_dpm_display_configuration_changed(struct radeon_device *rdev);
+void kv_dpm_fini(struct radeon_device *rdev);
+u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void kv_dpm_print_power_state(struct radeon_device *rdev,
+ struct radeon_ps *ps);
+void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m);
+int kv_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level);
+void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+
+/* uvd v1.0 */
+uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void uvd_v1_0_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+
+int uvd_v1_0_init(struct radeon_device *rdev);
+void uvd_v1_0_fini(struct radeon_device *rdev);
+int uvd_v1_0_start(struct radeon_device *rdev);
+void uvd_v1_0_stop(struct radeon_device *rdev);
+
+int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+
+/* uvd v2.2 */
+int uvd_v2_2_resume(struct radeon_device *rdev);
+void uvd_v2_2_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+
+/* uvd v3.1 */
+void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+
+/* uvd v4.2 */
+int uvd_v4_2_resume(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index e3f3e8841789..404e25d285ba 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -163,8 +163,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+ gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
- gpio = &i2c_info->asGPIO_Info[i];
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
@@ -172,6 +172,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
break;
}
+ gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+ ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
}
}
@@ -195,9 +197,8 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+ gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
- gpio = &i2c_info->asGPIO_Info[i];
-
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
@@ -206,12 +207,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
sprintf(stmp, "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}
+ gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+ ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
}
}
}
static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
- u8 id)
+ u8 id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
struct radeon_gpio_rec gpio;
@@ -230,8 +233,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+ pin = gpio_info->asGPIO_Pin;
for (i = 0; i < num_indices; i++) {
- pin = &gpio_info->asGPIO_Pin[i];
if (id == pin->ucGPIO_ID) {
gpio.id = pin->ucGPIO_ID;
gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
@@ -239,6 +242,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
gpio.valid = true;
break;
}
+ pin = (ATOM_GPIO_PIN_ASSIGNMENT *)
+ ((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT));
}
}
@@ -711,13 +716,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
(ctx->bios + data_offset +
le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
+ u8 *num_dst_objs = (u8 *)
+ ((u8 *)router_src_dst_table + 1 +
+ (router_src_dst_table->ucNumberOfSrc * 2));
+ u16 *dst_objs = (u16 *)(num_dst_objs + 1);
int enum_id;
router.router_id = router_obj_id;
- for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst;
- enum_id++) {
+ for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
if (le16_to_cpu(path->usConnObjectId) ==
- le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id]))
+ le16_to_cpu(dst_objs[enum_id]))
break;
}
@@ -1480,6 +1488,15 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
uint8_t frev, crev;
int i, num_indices;
+ if (id == ASIC_INTERNAL_MEMORY_SS) {
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT))
+ return false;
+ }
+ if (id == ASIC_INTERNAL_ENGINE_SS) {
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT))
+ return false;
+ }
+
memset(ss, 0, sizeof(struct radeon_atom_ss));
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
@@ -1672,7 +1689,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
kfree(edid);
}
}
- record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+ record += fake_edid_record->ucFakeEDIDLength ?
+ fake_edid_record->ucFakeEDIDLength + 2 :
+ sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
@@ -2237,6 +2256,11 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_CI;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
} else if ((controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
(controller->ucType ==
@@ -2782,7 +2806,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
dividers->enable_dithen = (args.v3.ucCntlFlag &
ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
- dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+ dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
dividers->ref_div = args.v3.ucRefDiv;
dividers->vco_mode = (args.v3.ucCntlFlag &
@@ -3077,6 +3101,121 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev
return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
}
+int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
+ u16 *leakage_id)
+{
+ union set_voltage args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+ u8 frev, crev;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return -EINVAL;
+
+ switch (crev) {
+ case 3:
+ case 4:
+ args.v3.ucVoltageType = 0;
+ args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
+ args.v3.usVoltageLevel = 0;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
+ u16 *vddc, u16 *vddci,
+ u16 virtual_voltage_id,
+ u16 vbios_voltage_id)
+{
+ int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
+ u8 frev, crev;
+ u16 data_offset, size;
+ int i, j;
+ ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
+ u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
+
+ *vddc = 0;
+ *vddci = 0;
+
+ if (!atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+
+ profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
+ (rdev->mode_info.atom_context->bios + data_offset);
+
+ switch (frev) {
+ case 1:
+ return -EINVAL;
+ case 2:
+ switch (crev) {
+ case 1:
+ if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
+ return -EINVAL;
+ leakage_bin = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usLeakageBinArrayOffset));
+ vddc_id_buf = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
+ vddc_buf = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
+ vddci_id_buf = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
+ vddci_buf = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
+
+ if (profile->ucElbVDDC_Num > 0) {
+ for (i = 0; i < profile->ucElbVDDC_Num; i++) {
+ if (vddc_id_buf[i] == virtual_voltage_id) {
+ for (j = 0; j < profile->ucLeakageBinNum; j++) {
+ if (vbios_voltage_id <= leakage_bin[j]) {
+ *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ if (profile->ucElbVDDCI_Num > 0) {
+ for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
+ if (vddci_id_buf[i] == virtual_voltage_id) {
+ for (j = 0; j < profile->ucLeakageBinNum; j++) {
+ if (vbios_voltage_id <= leakage_bin[j]) {
+ *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
u16 voltage_level, u8 voltage_type,
u32 *gpio_value, u32 *gpio_mask)
@@ -3279,10 +3418,11 @@ int radeon_atom_get_max_voltage(struct radeon_device *rdev,
ATOM_VOLTAGE_FORMULA_V2 *formula =
&voltage_object->v2.asFormula;
if (formula->ucNumOfVoltageEntries) {
+ VOLTAGE_LUT_ENTRY *lut = (VOLTAGE_LUT_ENTRY *)
+ ((u8 *)&formula->asVIDAdjustEntries[0] +
+ (sizeof(VOLTAGE_LUT_ENTRY) * (formula->ucNumOfVoltageEntries - 1)));
*max_voltage =
- le16_to_cpu(formula->asVIDAdjustEntries[
- formula->ucNumOfVoltageEntries - 1
- ].usVoltageValue);
+ le16_to_cpu(lut->usVoltageValue);
return 0;
}
}
@@ -3442,11 +3582,13 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
if (voltage_object) {
ATOM_VOLTAGE_FORMULA_V2 *formula =
&voltage_object->v2.asFormula;
+ VOLTAGE_LUT_ENTRY *lut;
if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES)
return -EINVAL;
+ lut = &formula->asVIDAdjustEntries[0];
for (i = 0; i < formula->ucNumOfVoltageEntries; i++) {
voltage_table->entries[i].value =
- le16_to_cpu(formula->asVIDAdjustEntries[i].usVoltageValue);
+ le16_to_cpu(lut->usVoltageValue);
ret = radeon_atom_get_voltage_gpio_settings(rdev,
voltage_table->entries[i].value,
voltage_type,
@@ -3454,6 +3596,8 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
&voltage_table->mask_low);
if (ret)
return ret;
+ lut = (VOLTAGE_LUT_ENTRY *)
+ ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY));
}
voltage_table->count = formula->ucNumOfVoltageEntries;
return 0;
@@ -3473,13 +3617,17 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
if (voltage_object) {
ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
&voltage_object->v3.asGpioVoltageObj;
+ VOLTAGE_LUT_ENTRY_V2 *lut;
if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
return -EINVAL;
+ lut = &gpio->asVolGpioLut[0];
for (i = 0; i < gpio->ucGpioEntryNum; i++) {
voltage_table->entries[i].value =
- le16_to_cpu(gpio->asVolGpioLut[i].usVoltageValue);
+ le16_to_cpu(lut->usVoltageValue);
voltage_table->entries[i].smio_low =
- le32_to_cpu(gpio->asVolGpioLut[i].ulVoltageId);
+ le32_to_cpu(lut->ulVoltageId);
+ lut = (VOLTAGE_LUT_ENTRY_V2 *)
+ ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2));
}
voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
voltage_table->count = gpio->ucGpioEntryNum;
@@ -3605,7 +3753,6 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
union vram_info *vram_info;
u32 mem_timing_size = gddr5 ?
sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT);
- u8 *p;
memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table));
@@ -3624,6 +3771,7 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
ATOM_VRAM_MODULE_V4 *vram_module =
(ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
+ ATOM_MEMORY_TIMING_FORMAT *format;
for (i = 0; i < module_index; i++) {
if (le16_to_cpu(vram_module->usModuleSize) == 0)
@@ -3634,11 +3782,11 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
mclk_range_table->num_entries = (u8)
((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
mem_timing_size);
- p = (u8 *)&vram_module->asMemTiming[0];
+ format = &vram_module->asMemTiming[0];
for (i = 0; i < mclk_range_table->num_entries; i++) {
- ATOM_MEMORY_TIMING_FORMAT *format = (ATOM_MEMORY_TIMING_FORMAT *)p;
mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange);
- p += mem_timing_size;
+ format = (ATOM_MEMORY_TIMING_FORMAT *)
+ ((u8 *)format + mem_timing_size);
}
} else
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_blit_common.h b/drivers/gpu/drm/radeon/radeon_blit_common.h
deleted file mode 100644
index 4ecbe72c9d2d..000000000000
--- a/drivers/gpu/drm/radeon/radeon_blit_common.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2009 Advanced Micro Devices, Inc.
- * Copyright 2009 Red Hat Inc.
- * Copyright 2012 Alcatel-Lucent, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __RADEON_BLIT_COMMON_H__
-
-#define DI_PT_RECTLIST 0x11
-#define DI_INDEX_SIZE_16_BIT 0x0
-#define DI_SRC_SEL_AUTO_INDEX 0x2
-
-#define FMT_8 0x1
-#define FMT_5_6_5 0x8
-#define FMT_8_8_8_8 0x1a
-#define COLOR_8 0x1
-#define COLOR_5_6_5 0x8
-#define COLOR_8_8_8_8 0x1a
-
-#define RECT_UNIT_H 32
-#define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
-
-#define __RADEON_BLIT_COMMON_H__
-#endif
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 13a130fb3517..a56084410372 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -268,7 +268,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
/* we only support VM on some SI+ rings */
- if ((p->rdev->asic->ring[p->ring].cs_parse == NULL) &&
+ if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) &&
((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
DRM_ERROR("Ring %d requires VM!\n", p->ring);
return -EINVAL;
@@ -383,6 +383,10 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
DRM_ERROR("Invalid command stream !\n");
return r;
}
+
+ if (parser->ring == R600_RING_TYPE_UVD_INDEX)
+ radeon_uvd_note_usage(rdev);
+
radeon_cs_sync_rings(parser);
r = radeon_ib_schedule(rdev, &parser->ib, NULL);
if (r) {
@@ -474,6 +478,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
return r;
}
+ if (parser->ring == R600_RING_TYPE_UVD_INDEX)
+ radeon_uvd_note_usage(rdev);
+
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
r = radeon_vm_alloc_pt(rdev, vm);
@@ -552,10 +559,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return r;
}
- /* XXX pick SD/HD/MVC */
- if (parser.ring == R600_RING_TYPE_UVD_INDEX)
- radeon_uvd_note_usage(rdev);
-
r = radeon_cs_ib_chunk(rdev, &parser);
if (r) {
goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0610ca4fb6a3..16cb8792b1e6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1003,16 +1003,28 @@ static void radeon_check_arguments(struct radeon_device *rdev)
radeon_vram_limit = 0;
}
+ if (radeon_gart_size == -1) {
+ /* default to a larger gart size on newer asics */
+ if (rdev->family >= CHIP_RV770)
+ radeon_gart_size = 1024;
+ else
+ radeon_gart_size = 512;
+ }
/* gtt size must be power of two and greater or equal to 32M */
if (radeon_gart_size < 32) {
- dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
+ dev_warn(rdev->dev, "gart size (%d) too small\n",
radeon_gart_size);
- radeon_gart_size = 512;
-
+ if (rdev->family >= CHIP_RV770)
+ radeon_gart_size = 1024;
+ else
+ radeon_gart_size = 512;
} else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
- radeon_gart_size = 512;
+ if (rdev->family >= CHIP_RV770)
+ radeon_gart_size = 1024;
+ else
+ radeon_gart_size = 512;
}
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
@@ -1144,7 +1156,7 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->family = flags & RADEON_FAMILY_MASK;
rdev->is_atom_bios = false;
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ rdev->mc.gtt_size = 512 * 1024 * 1024;
rdev->accel_working = false;
/* set up ring ids */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1163,6 +1175,7 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
+ mutex_init(&rdev->srbm_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
@@ -1519,6 +1532,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+ radeon_pm_suspend(rdev);
radeon_suspend(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1564,6 +1578,7 @@ retry:
}
}
+ radeon_pm_resume(rdev);
drm_helper_resume_force_mode(rdev->ddev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 358bd96c06c5..b055bddaa94c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1255,41 +1255,41 @@ static void radeon_afmt_init(struct radeon_device *rdev)
for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
rdev->mode_info.afmt[i] = NULL;
- if (ASIC_IS_DCE6(rdev)) {
- /* todo */
+ if (ASIC_IS_NODCE(rdev)) {
+ /* nothing to do */
} else if (ASIC_IS_DCE4(rdev)) {
+ static uint32_t eg_offsets[] = {
+ EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_CRTC5_REGISTER_OFFSET,
+ 0x13830 - 0x7030,
+ };
+ int num_afmt;
+
+ /* DCE8 has 7 audio blocks tied to DIG encoders */
+ /* DCE6 has 6 audio blocks tied to DIG encoders */
/* DCE4/5 has 6 audio blocks tied to DIG encoders */
/* DCE4.1 has 2 audio blocks tied to DIG encoders */
- rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[0]) {
- rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
- rdev->mode_info.afmt[0]->id = 0;
- }
- rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[1]) {
- rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
- rdev->mode_info.afmt[1]->id = 1;
- }
- if (!ASIC_IS_DCE41(rdev)) {
- rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[2]) {
- rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
- rdev->mode_info.afmt[2]->id = 2;
- }
- rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[3]) {
- rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
- rdev->mode_info.afmt[3]->id = 3;
- }
- rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[4]) {
- rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
- rdev->mode_info.afmt[4]->id = 4;
- }
- rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[5]) {
- rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
- rdev->mode_info.afmt[5]->id = 5;
+ if (ASIC_IS_DCE8(rdev))
+ num_afmt = 7;
+ else if (ASIC_IS_DCE6(rdev))
+ num_afmt = 6;
+ else if (ASIC_IS_DCE5(rdev))
+ num_afmt = 6;
+ else if (ASIC_IS_DCE41(rdev))
+ num_afmt = 2;
+ else /* DCE4 */
+ num_afmt = 6;
+
+ BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets));
+ for (i = 0; i < num_afmt; i++) {
+ rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[i]) {
+ rdev->mode_info.afmt[i]->offset = eg_offsets[i];
+ rdev->mode_info.afmt[i]->id = i;
}
}
} else if (ASIC_IS_DCE3(rdev)) {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 1f93dd503646..6d09258fb9f2 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -148,7 +148,7 @@ int radeon_dynclks = -1;
int radeon_r4xx_atom = 0;
int radeon_agpmode = 0;
int radeon_vram_limit = 0;
-int radeon_gart_size = 512; /* default gart size */
+int radeon_gart_size = -1; /* auto */
int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
@@ -181,7 +181,7 @@ module_param_named(vramlimit, radeon_vram_limit, int, 0600);
MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
module_param_named(agpmode, radeon_agpmode, int, 0444);
-MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)");
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
module_param_named(gartsize, radeon_gart_size, int, 0600);
MODULE_PARM_DESC(benchmark, "Run benchmark");
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7ddb0efe2408..ddb8f8e04eb5 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
} else {
/* put fence directly behind firmware */
- index = ALIGN(rdev->uvd.fw_size, 8);
+ index = ALIGN(rdev->uvd_fw->size, 8);
rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 6a51d943ccf4..b990b1a2bd50 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -207,7 +207,6 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev)
if (rdev->gart.robj == NULL) {
return;
}
- radeon_gart_table_vram_unpin(rdev);
radeon_bo_unref(&rdev->gart.robj);
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 081886b0642d..cc9e8482cf30 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -275,17 +275,19 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
dev_info(rdev->dev, "radeon: using MSI.\n");
}
}
+
+ INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
+ INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
+
rdev->irq.installed = true;
r = drm_irq_install(rdev->ddev);
if (r) {
rdev->irq.installed = false;
+ flush_work(&rdev->hotplug_work);
return r;
}
- INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
- INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
- INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
-
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index b46a5616664a..205440d9544b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -433,6 +433,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_SI_CP_DMA_COMPUTE:
+ *value = 1;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 8296632a4235..d908d8d68f6b 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -225,6 +225,7 @@ struct radeon_afmt {
int offset;
bool last_buffer_filled_status;
int id;
+ struct r600_audio_pin *pin;
};
struct radeon_mode_info {
@@ -233,7 +234,7 @@ struct radeon_mode_info {
enum radeon_connector_table connector_table;
bool mode_config_initialized;
struct radeon_crtc *crtcs[6];
- struct radeon_afmt *afmt[6];
+ struct radeon_afmt *afmt[7];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f374c467aaca..d7555369a3e5 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -569,6 +569,8 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
case THERMAL_TYPE_NI:
case THERMAL_TYPE_SUMO:
case THERMAL_TYPE_SI:
+ case THERMAL_TYPE_CI:
+ case THERMAL_TYPE_KV:
if (rdev->asic->pm.get_temperature == NULL)
return err;
rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
@@ -624,7 +626,15 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
/* switch back the user state */
dpm_state = rdev->pm.dpm.user_state;
}
- radeon_dpm_enable_power_state(rdev, dpm_state);
+ mutex_lock(&rdev->pm.mutex);
+ if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
+ rdev->pm.dpm.thermal_active = true;
+ else
+ rdev->pm.dpm.thermal_active = false;
+ rdev->pm.dpm.state = dpm_state;
+ mutex_unlock(&rdev->pm.mutex);
+
+ radeon_pm_compute_clocks(rdev);
}
static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
@@ -687,7 +697,10 @@ restart_search:
break;
/* internal states */
case POWER_STATE_TYPE_INTERNAL_UVD:
- return rdev->pm.dpm.uvd_ps;
+ if (rdev->pm.dpm.uvd_ps)
+ return rdev->pm.dpm.uvd_ps;
+ else
+ break;
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
return ps;
@@ -729,10 +742,17 @@ restart_search:
/* use a fallback state if we didn't match */
switch (dpm_state) {
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+ goto restart_search;
case POWER_STATE_TYPE_INTERNAL_UVD_HD:
case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- return rdev->pm.dpm.uvd_ps;
+ if (rdev->pm.dpm.uvd_ps) {
+ return rdev->pm.dpm.uvd_ps;
+ } else {
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ goto restart_search;
+ }
case POWER_STATE_TYPE_INTERNAL_THERMAL:
dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
goto restart_search;
@@ -850,38 +870,51 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
radeon_dpm_post_set_power_state(rdev);
+ /* force low perf level for thermal */
+ if (rdev->pm.dpm.thermal_active &&
+ rdev->asic->dpm.force_performance_level) {
+ radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
+ }
+
done:
mutex_unlock(&rdev->ring_lock);
up_write(&rdev->pm.mclk_lock);
mutex_unlock(&rdev->ddev->struct_mutex);
}
-void radeon_dpm_enable_power_state(struct radeon_device *rdev,
- enum radeon_pm_state_type dpm_state)
+void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
{
- if (!rdev->pm.dpm_enabled)
- return;
+ enum radeon_pm_state_type dpm_state;
- mutex_lock(&rdev->pm.mutex);
- switch (dpm_state) {
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- rdev->pm.dpm.thermal_active = true;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD:
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- rdev->pm.dpm.uvd_active = true;
- break;
- default:
- rdev->pm.dpm.thermal_active = false;
- rdev->pm.dpm.uvd_active = false;
- break;
+ if (rdev->asic->dpm.powergate_uvd) {
+ mutex_lock(&rdev->pm.mutex);
+ /* enable/disable UVD */
+ radeon_dpm_powergate_uvd(rdev, !enable);
+ mutex_unlock(&rdev->pm.mutex);
+ } else {
+ if (enable) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.dpm.uvd_active = true;
+ if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
+ else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+ else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+ else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
+ else
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
+ rdev->pm.dpm.state = dpm_state;
+ mutex_unlock(&rdev->pm.mutex);
+ } else {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.dpm.uvd_active = false;
+ mutex_unlock(&rdev->pm.mutex);
+ }
+
+ radeon_pm_compute_clocks(rdev);
}
- rdev->pm.dpm.state = dpm_state;
- mutex_unlock(&rdev->pm.mutex);
- radeon_pm_compute_clocks(rdev);
}
static void radeon_pm_suspend_old(struct radeon_device *rdev)
@@ -1176,7 +1209,17 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
- if (radeon_dpm == 1)
+ case CHIP_BONAIRE:
+ case CHIP_KABINI:
+ case CHIP_KAVERI:
+ /* DPM requires the RLC, RV770+ dGPU requires SMC */
+ if (!rdev->rlc_fw)
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if ((rdev->family >= CHIP_RV770) &&
+ (!(rdev->flags & RADEON_IS_IGP)) &&
+ (!rdev->smc_fw))
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if (radeon_dpm == 1)
rdev->pm.pm_method = PM_METHOD_DPM;
else
rdev->pm.pm_method = PM_METHOD_PROFILE;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index fb5ea6208970..46a25f037b84 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -363,11 +363,10 @@ u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
{
u32 rptr;
- if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
+ if (rdev->wb.enabled)
rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
else
rptr = RREG32(ring->rptr_reg);
- rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
return rptr;
}
@@ -378,7 +377,6 @@ u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
u32 wptr;
wptr = RREG32(ring->wptr_reg);
- wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
return wptr;
}
@@ -386,7 +384,7 @@ u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
+ WREG32(ring->wptr_reg, ring->wptr);
(void)RREG32(ring->wptr_reg);
}
@@ -719,16 +717,13 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
* @rptr_offs: offset of the rptr writeback location in the WB buffer
* @rptr_reg: MMIO offset of the rptr register
* @wptr_reg: MMIO offset of the wptr register
- * @ptr_reg_shift: bit offset of the rptr/wptr values
- * @ptr_reg_mask: bit mask of the rptr/wptr values
* @nop: nop packet for this ring
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
- unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
- u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop)
{
int r;
@@ -736,8 +731,6 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
ring->rptr_offs = rptr_offs;
ring->rptr_reg = rptr_reg;
ring->wptr_reg = wptr_reg;
- ring->ptr_reg_shift = ptr_reg_shift;
- ring->ptr_reg_mask = ptr_reg_mask;
ring->nop = nop;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index d8b05f7bcf1a..33858364fe89 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -35,6 +35,12 @@
#define SI_PFP_UCODE_SIZE 2144
#define SI_PM4_UCODE_SIZE 2144
#define SI_CE_UCODE_SIZE 2144
+#define CIK_PFP_UCODE_SIZE 2144
+#define CIK_ME_UCODE_SIZE 2144
+#define CIK_CE_UCODE_SIZE 2144
+
+/* MEC */
+#define CIK_MEC_UCODE_SIZE 4192
/* RLC */
#define R600_RLC_UCODE_SIZE 768
@@ -43,12 +49,20 @@
#define CAYMAN_RLC_UCODE_SIZE 1024
#define ARUBA_RLC_UCODE_SIZE 1536
#define SI_RLC_UCODE_SIZE 2048
+#define BONAIRE_RLC_UCODE_SIZE 2048
+#define KB_RLC_UCODE_SIZE 2560
+#define KV_RLC_UCODE_SIZE 2560
/* MC */
#define BTC_MC_UCODE_SIZE 6024
#define CAYMAN_MC_UCODE_SIZE 6037
#define SI_MC_UCODE_SIZE 7769
#define OLAND_MC_UCODE_SIZE 7863
+#define CIK_MC_UCODE_SIZE 7866
+
+/* SDMA */
+#define CIK_SDMA_UCODE_SIZE 1050
+#define CIK_SDMA_UCODE_VERSION 64
/* SMC */
#define RV770_SMC_UCODE_START 0x0100
@@ -126,4 +140,7 @@
#define HAINAN_SMC_UCODE_START 0x10000
#define HAINAN_SMC_UCODE_SIZE 0xe67C
+#define BONAIRE_SMC_UCODE_START 0x20000
+#define BONAIRE_SMC_UCODE_SIZE 0x1FDEC
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 414fd145d20e..1a01bbff9bfa 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -56,7 +56,6 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work);
int radeon_uvd_init(struct radeon_device *rdev)
{
- const struct firmware *fw;
unsigned long bo_size;
const char *fw_name;
int i, r;
@@ -105,14 +104,14 @@ int radeon_uvd_init(struct radeon_device *rdev)
return -EINVAL;
}
- r = request_firmware(&fw, fw_name, rdev->dev);
+ r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
if (r) {
dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
fw_name);
return r;
}
- bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) +
+ bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
@@ -145,15 +144,10 @@ int radeon_uvd_init(struct radeon_device *rdev)
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
- rdev->uvd.fw_size = fw->size;
- memset(rdev->uvd.cpu_addr, 0, bo_size);
- memcpy(rdev->uvd.cpu_addr, fw->data, fw->size);
-
- release_firmware(fw);
-
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
atomic_set(&rdev->uvd.handles[i], 0);
rdev->uvd.filp[i] = NULL;
+ rdev->uvd.img_size[i] = 0;
}
return 0;
@@ -174,33 +168,60 @@ void radeon_uvd_fini(struct radeon_device *rdev)
}
radeon_bo_unref(&rdev->uvd.vcpu_bo);
+
+ release_firmware(rdev->uvd_fw);
}
int radeon_uvd_suspend(struct radeon_device *rdev)
{
unsigned size;
+ void *ptr;
+ int i;
if (rdev->uvd.vcpu_bo == NULL)
return 0;
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+ if (atomic_read(&rdev->uvd.handles[i]))
+ break;
+
+ if (i == RADEON_MAX_UVD_HANDLES)
+ return 0;
+
size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ size -= rdev->uvd_fw->size;
+
+ ptr = rdev->uvd.cpu_addr;
+ ptr += rdev->uvd_fw->size;
+
rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
- memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size);
+ memcpy(rdev->uvd.saved_bo, ptr, size);
return 0;
}
int radeon_uvd_resume(struct radeon_device *rdev)
{
+ unsigned size;
+ void *ptr;
+
if (rdev->uvd.vcpu_bo == NULL)
return -EINVAL;
+ memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+
+ size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ size -= rdev->uvd_fw->size;
+
+ ptr = rdev->uvd.cpu_addr;
+ ptr += rdev->uvd_fw->size;
+
if (rdev->uvd.saved_bo != NULL) {
- unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo);
- memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size);
+ memcpy(ptr, rdev->uvd.saved_bo, size);
kfree(rdev->uvd.saved_bo);
rdev->uvd.saved_bo = NULL;
- }
+ } else
+ memset(ptr, 0, size);
return 0;
}
@@ -215,8 +236,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
{
int i, r;
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
- if (rdev->uvd.filp[i] == filp) {
- uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ if (handle != 0 && rdev->uvd.filp[i] == filp) {
struct radeon_fence *fence;
r = radeon_uvd_get_destroy_msg(rdev,
@@ -327,6 +348,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
unsigned offset, unsigned buf_sizes[])
{
int32_t *msg, msg_type, handle;
+ unsigned img_size = 0;
void *ptr;
int i, r;
@@ -336,9 +358,19 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
+ if (bo->tbo.sync_obj) {
+ r = radeon_fence_wait(bo->tbo.sync_obj, false);
+ if (r) {
+ DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+ return r;
+ }
+ }
+
r = radeon_bo_kmap(bo, &ptr);
- if (r)
+ if (r) {
+ DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
return r;
+ }
msg = ptr + offset;
@@ -353,6 +385,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
if (msg_type == 1) {
/* it's a decode msg, calc buffer sizes */
r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+ /* calc image size (width * height) */
+ img_size = msg[6] * msg[7];
radeon_bo_kunmap(bo);
if (r)
return r;
@@ -364,8 +398,16 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
radeon_bo_kunmap(bo);
return 0;
} else {
- /* it's a create msg, no special handling needed */
+ /* it's a create msg, calc image size (width * height) */
+ img_size = msg[7] * msg[8];
radeon_bo_kunmap(bo);
+
+ if (msg_type != 0) {
+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+ return -EINVAL;
+ }
+
+ /* it's a create msg, no special handling needed */
}
/* create or decode, validate the handle */
@@ -378,6 +420,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
p->rdev->uvd.filp[i] = p->filp;
+ p->rdev->uvd.img_size[i] = img_size;
return 0;
}
}
@@ -388,7 +431,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
int data0, int data1,
- unsigned buf_sizes[])
+ unsigned buf_sizes[], bool *has_msg_cmd)
{
struct radeon_cs_chunk *relocs_chunk;
struct radeon_cs_reloc *reloc;
@@ -417,7 +460,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
if (cmd < 0x4) {
if ((end - start) < buf_sizes[cmd]) {
- DRM_ERROR("buffer to small (%d / %d)!\n",
+ DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
(unsigned)(end - start), buf_sizes[cmd]);
return -EINVAL;
}
@@ -442,9 +485,17 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
}
if (cmd == 0) {
+ if (*has_msg_cmd) {
+ DRM_ERROR("More than one message in a UVD-IB!\n");
+ return -EINVAL;
+ }
+ *has_msg_cmd = true;
r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
if (r)
return r;
+ } else if (!*has_msg_cmd) {
+ DRM_ERROR("Message needed before other commands are send!\n");
+ return -EINVAL;
}
return 0;
@@ -453,7 +504,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
int *data0, int *data1,
- unsigned buf_sizes[])
+ unsigned buf_sizes[],
+ bool *has_msg_cmd)
{
int i, r;
@@ -467,7 +519,8 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
*data1 = p->idx;
break;
case UVD_GPCOM_VCPU_CMD:
- r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes);
+ r = radeon_uvd_cs_reloc(p, *data0, *data1,
+ buf_sizes, has_msg_cmd);
if (r)
return r;
break;
@@ -488,6 +541,9 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
struct radeon_cs_packet pkt;
int r, data0 = 0, data1 = 0;
+ /* does the IB has a msg command */
+ bool has_msg_cmd = false;
+
/* minimum buffer sizes */
unsigned buf_sizes[] = {
[0x00000000] = 2048,
@@ -514,8 +570,8 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
return r;
switch (pkt.type) {
case RADEON_PACKET_TYPE0:
- r = radeon_uvd_cs_reg(p, &pkt, &data0,
- &data1, buf_sizes);
+ r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
+ buf_sizes, &has_msg_cmd);
if (r)
return r;
break;
@@ -527,6 +583,12 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
return -EINVAL;
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+
+ if (!has_msg_cmd) {
+ DRM_ERROR("UVD-IBs need a msg command!\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -678,6 +740,34 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
return radeon_uvd_send_msg(rdev, ring, bo, fence);
}
+/**
+ * radeon_uvd_count_handles - count number of open streams
+ *
+ * @rdev: radeon_device pointer
+ * @sd: number of SD streams
+ * @hd: number of HD streams
+ *
+ * Count the number of open SD/HD streams as a hint for power mangement
+ */
+static void radeon_uvd_count_handles(struct radeon_device *rdev,
+ unsigned *sd, unsigned *hd)
+{
+ unsigned i;
+
+ *sd = 0;
+ *hd = 0;
+
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (!atomic_read(&rdev->uvd.handles[i]))
+ continue;
+
+ if (rdev->uvd.img_size[i] >= 720*576)
+ ++(*hd);
+ else
+ ++(*sd);
+ }
+}
+
static void radeon_uvd_idle_work_handler(struct work_struct *work)
{
struct radeon_device *rdev =
@@ -685,10 +775,7 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
- mutex_lock(&rdev->pm.mutex);
- rdev->pm.dpm.uvd_active = false;
- mutex_unlock(&rdev->pm.mutex);
- radeon_pm_compute_clocks(rdev);
+ radeon_dpm_enable_uvd(rdev, false);
} else {
radeon_set_uvd_clocks(rdev, 0, 0);
}
@@ -700,13 +787,25 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
void radeon_uvd_note_usage(struct radeon_device *rdev)
{
+ bool streams_changed = false;
bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
- if (set_clocks) {
+
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ unsigned hd = 0, sd = 0;
+ radeon_uvd_count_handles(rdev, &sd, &hd);
+ if ((rdev->pm.dpm.sd != sd) ||
+ (rdev->pm.dpm.hd != hd)) {
+ rdev->pm.dpm.sd = sd;
+ rdev->pm.dpm.hd = hd;
+ streams_changed = true;
+ }
+ }
+
+ if (set_clocks || streams_changed) {
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
- /* XXX pick SD/HD/MVC */
- radeon_dpm_enable_power_state(rdev, POWER_STATE_TYPE_INTERNAL_UVD);
+ radeon_dpm_enable_uvd(rdev, true);
} else {
radeon_set_uvd_clocks(rdev, 53300, 40000);
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 233a9b9fa1f7..b8074a8ec75a 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -174,10 +174,13 @@ int rs400_gart_enable(struct radeon_device *rdev)
/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
* AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
- WREG32_MC(RS480_MC_MISC_CNTL,
- (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
+ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+ tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
+ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
} else {
- WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
+ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+ tmp |= RS480_GART_INDEX_REG_EN;
+ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
}
/* Enable gart */
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 363018c60412..ab1f2016f21e 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1918,6 +1918,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
if (power_info->pplib.ucStateEntrySize - 1) {
+ u8 *idx;
ps = kzalloc(sizeof(struct rv6xx_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
@@ -1926,12 +1927,12 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
rdev->pm.dpm.ps[i].ps_priv = ps;
rv6xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info);
+ idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
clock_info = (union pplib_clock_info *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
- (power_state->v1.ucClockStateIndices[j] *
- power_info->pplib.ucClockInfoSize));
+ (idx[j] * power_info->pplib.ucClockInfoSize));
rv6xx_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], j,
clock_info);
@@ -1944,9 +1945,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
int rv6xx_dpm_init(struct radeon_device *rdev)
{
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- uint16_t data_offset, size;
- uint8_t frev, crev;
+ struct radeon_atom_ss ss;
struct atom_clock_dividers dividers;
struct rv6xx_power_info *pi;
int ret;
@@ -1989,16 +1988,18 @@ int rv6xx_dpm_init(struct radeon_device *rdev)
pi->gfx_clock_gating = true;
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
+ pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, 0);
+ pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, 0);
+
+ /* Disable sclk ss, causes hangs on a lot of systems */
+ pi->sclk_ss = false;
+
+ if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
+ else
pi->dynamic_ss = false;
- }
pi->dynamic_pcie_gen2 = true;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 30ea14e8854c..9f5846743c9e 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv730_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv730_golden_registers));
radeon_program_register_sequence(rdev,
rv730_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv730_mgcg_init));
break;
case CHIP_RV710:
radeon_program_register_sequence(rdev,
@@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv710_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv710_golden_registers));
radeon_program_register_sequence(rdev,
rv710_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv710_mgcg_init));
break;
case CHIP_RV740:
radeon_program_register_sequence(rdev,
rv740_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv740_golden_registers));
radeon_program_register_sequence(rdev,
rv740_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv740_mgcg_init));
break;
default:
break;
@@ -801,103 +801,6 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
return reference_clock;
}
-int rv770_uvd_resume(struct radeon_device *rdev)
-{
- uint64_t addr;
- uint32_t chip_id, size;
- int r;
-
- r = radeon_uvd_resume(rdev);
- if (r)
- return r;
-
- /* programm the VCPU memory controller bits 0-27 */
- addr = rdev->uvd.gpu_addr >> 3;
- size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
- WREG32(UVD_VCPU_CACHE_SIZE0, size);
-
- addr += size;
- size = RADEON_UVD_STACK_SIZE >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
- WREG32(UVD_VCPU_CACHE_SIZE1, size);
-
- addr += size;
- size = RADEON_UVD_HEAP_SIZE >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
- WREG32(UVD_VCPU_CACHE_SIZE2, size);
-
- /* bits 28-31 */
- addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
- WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
-
- /* bits 32-39 */
- addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
- WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
-
- /* tell firmware which hardware it is running on */
- switch (rdev->family) {
- default:
- return -EINVAL;
- case CHIP_RV710:
- chip_id = 0x01000005;
- break;
- case CHIP_RV730:
- chip_id = 0x01000006;
- break;
- case CHIP_RV740:
- chip_id = 0x01000007;
- break;
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- chip_id = 0x01000008;
- break;
- case CHIP_JUNIPER:
- chip_id = 0x01000009;
- break;
- case CHIP_REDWOOD:
- chip_id = 0x0100000a;
- break;
- case CHIP_CEDAR:
- chip_id = 0x0100000b;
- break;
- case CHIP_SUMO:
- case CHIP_SUMO2:
- chip_id = 0x0100000c;
- break;
- case CHIP_PALM:
- chip_id = 0x0100000e;
- break;
- case CHIP_CAYMAN:
- chip_id = 0x0100000f;
- break;
- case CHIP_BARTS:
- chip_id = 0x01000010;
- break;
- case CHIP_TURKS:
- chip_id = 0x01000011;
- break;
- case CHIP_CAICOS:
- chip_id = 0x01000012;
- break;
- case CHIP_TAHITI:
- chip_id = 0x01000014;
- break;
- case CHIP_VERDE:
- chip_id = 0x01000015;
- break;
- case CHIP_PITCAIRN:
- chip_id = 0x01000016;
- break;
- case CHIP_ARUBA:
- chip_id = 0x01000017;
- break;
- }
- WREG32(UVD_VCPU_CHIP_ID, chip_id);
-
- return 0;
-}
-
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -1747,80 +1650,6 @@ static int rv770_mc_init(struct radeon_device *rdev)
return 0;
}
-/**
- * rv770_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (r7xx).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int rv770_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_dw, cur_size_in_dw;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
- num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
- r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_dw = size_in_dw;
- if (cur_size_in_dw > 0xFFFF)
- cur_size_in_dw = 0xFFFF;
- size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
- radeon_ring_write(ring, src_offset & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
- src_offset += cur_size_in_dw * 4;
- dst_offset += cur_size_in_dw * 4;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
static int rv770_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
@@ -1829,6 +1658,13 @@ static int rv770_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
rv770_pcie_gen2_enable(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ rv770_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -1837,11 +1673,6 @@ static int rv770_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
- rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
} else {
@@ -1851,12 +1682,6 @@ static int rv770_startup(struct radeon_device *rdev)
}
rv770_gpu_init(rdev);
- r = r600_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -1875,7 +1700,7 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
- r = rv770_uvd_resume(rdev);
+ r = uvd_v2_2_resume(rdev);
if (!r) {
r = radeon_fence_driver_start_ring(rdev,
R600_RING_TYPE_UVD_INDEX);
@@ -1904,14 +1729,14 @@ static int rv770_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -1928,12 +1753,11 @@ static int rv770_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
@@ -1983,6 +1807,7 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
@@ -2090,7 +1915,6 @@ int rv770_init(struct radeon_device *rdev)
void rv770_fini(struct radeon_device *rdev)
{
- r600_blit_fini(rdev);
r700_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
@@ -2098,6 +1922,7 @@ void rv770_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
new file mode 100644
index 000000000000..f9b02e3d6830
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rv770d.h"
+
+/**
+ * rv770_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFF)
+ cur_size_in_dw = 0xFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 2d347925f77d..8cbb85dae5aa 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2294,6 +2294,7 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
if (power_info->pplib.ucStateEntrySize - 1) {
+ u8 *idx;
ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
@@ -2303,12 +2304,12 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info,
power_info->pplib.ucNonClockSize);
+ idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
clock_info = (union pplib_clock_info *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
- (power_state->v1.ucClockStateIndices[j] *
- power_info->pplib.ucClockInfoSize));
+ (idx[j] * power_info->pplib.ucClockInfoSize));
rv7xx_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], j,
clock_info);
@@ -2319,12 +2320,25 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
return 0;
}
+void rv770_get_engine_memory_ss(struct radeon_device *rdev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct radeon_atom_ss ss;
+
+ pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, 0);
+ pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, 0);
+
+ if (pi->sclk_ss || pi->mclk_ss)
+ pi->dynamic_ss = true;
+ else
+ pi->dynamic_ss = false;
+}
+
int rv770_dpm_init(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- uint16_t data_offset, size;
- uint8_t frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -2369,16 +2383,7 @@ int rv770_dpm_init(struct radeon_device *rdev)
pi->mvdd_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = false;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = RV770_HASI_DFLT;
@@ -2393,8 +2398,7 @@ int rv770_dpm_init(struct radeon_device *rdev)
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
@@ -2514,8 +2518,16 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
{
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+ u32 switch_limit = 300;
+
+ /* quirks */
+ /* ASUS K70AF */
+ if ((rdev->pdev->device == 0x9553) &&
+ (rdev->pdev->subsystem_vendor == 0x1043) &&
+ (rdev->pdev->subsystem_device == 0x1c42))
+ switch_limit = 200;
- if (vblank_time < 300)
+ if (vblank_time < switch_limit)
return true;
else
return false;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
index 96b1b2a62a8a..9244effc6b59 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.h
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -275,6 +275,7 @@ void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
struct radeon_ps *new_ps,
struct radeon_ps *old_ps);
+void rv770_get_engine_memory_ss(struct radeon_device *rdev);
/* smc */
int rv770_read_smc_soft_register(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 6bef2b7d601b..9fe60e542922 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -971,7 +971,21 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
+/*
+ * PM4
+ */
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
/* UVD */
+#define UVD_GPCOM_VCPU_CMD 0xef0c
+#define UVD_GPCOM_VCPU_DATA0 0xef10
+#define UVD_GPCOM_VCPU_DATA1 0xef14
+
#define UVD_LMI_EXT40_ADDR 0xf498
#define UVD_VCPU_CHIP_ID 0xf4d4
#define UVD_VCPU_CACHE_OFFSET0 0xf4d8
@@ -985,4 +999,6 @@
#define UVD_RBC_RB_RPTR 0xf690
#define UVD_RBC_RB_WPTR 0xf694
+#define UVD_CONTEXT_ID 0xf6f4
+
#endif
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d71037f4f68f..3e23b757dcfa 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -68,6 +68,8 @@ MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
static void si_pcie_gen3_enable(struct radeon_device *rdev);
static void si_program_aspm(struct radeon_device *rdev);
+extern void sumo_rlc_fini(struct radeon_device *rdev);
+extern int sumo_rlc_init(struct radeon_device *rdev);
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
@@ -76,6 +78,11 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void si_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
static const u32 verde_rlc_save_restore_register_list[] =
{
@@ -1663,9 +1670,13 @@ static int si_init_microcode(struct radeon_device *rdev)
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->smc_fw->size != smc_req_size) {
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"si_smc: Bogus length %zu in firmware \"%s\"\n",
rdev->smc_fw->size, fw_name);
@@ -1700,7 +1711,8 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
- u32 tmp;
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
@@ -1715,16 +1727,30 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
* non-linked crtcs for maximum line buffer allocation.
*/
if (radeon_crtc->base.enabled && mode) {
- if (other_mode)
+ if (other_mode) {
tmp = 0; /* 1/2 */
- else
+ buffer_alloc = 1;
+ } else {
tmp = 2; /* whole */
- } else
+ buffer_alloc = 2;
+ }
+ } else {
tmp = 0;
+ buffer_alloc = 0;
+ }
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
DC_LB_MEMORY_CONFIG(tmp));
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
@@ -3360,17 +3386,6 @@ static int si_cp_resume(struct radeon_device *rdev)
u32 rb_bufsz;
int r;
- /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
- WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
- SOFT_RESET_PA |
- SOFT_RESET_VGT |
- SOFT_RESET_SPI |
- SOFT_RESET_SX));
- RREG32(GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(GRBM_SOFT_RESET, 0);
- RREG32(GRBM_SOFT_RESET);
-
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
@@ -3489,7 +3504,7 @@ static int si_cp_resume(struct radeon_device *rdev)
return 0;
}
-static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -3738,34 +3753,6 @@ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * si_dma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = si_gpu_check_soft_reset(rdev);
- u32 mask;
-
- if (ring->idx == R600_RING_TYPE_DMA_INDEX)
- mask = RADEON_RESET_DMA;
- else
- mask = RADEON_RESET_DMA1;
-
- if (!(reset_mask & mask)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
/* MC */
static void si_mc_program(struct radeon_device *rdev)
{
@@ -4079,13 +4066,64 @@ static int si_vm_packet3_ce_check(struct radeon_device *rdev,
return 0;
}
+static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
+{
+ u32 start_reg, reg, i;
+ u32 command = ib[idx + 4];
+ u32 info = ib[idx + 1];
+ u32 idx_value = ib[idx];
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ if (((info & 0x60000000) >> 29) == 0) {
+ start_reg = idx_value << 2;
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ if (((info & 0x00300000) >> 20) == 0) {
+ start_reg = ib[idx + 2];
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ return 0;
+}
+
static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
u32 *ib, struct radeon_cs_packet *pkt)
{
+ int r;
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, end_reg, reg, i;
- u32 command, info;
switch (pkt->opcode) {
case PACKET3_NOP:
@@ -4186,50 +4224,9 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
}
break;
case PACKET3_CP_DMA:
- command = ib[idx + 4];
- info = ib[idx + 1];
- if (command & PACKET3_CP_DMA_CMD_SAS) {
- /* src address space is register */
- if (((info & 0x60000000) >> 29) == 0) {
- start_reg = idx_value << 2;
- if (command & PACKET3_CP_DMA_CMD_SAIC) {
- reg = start_reg;
- if (!si_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad SRC register\n");
- return -EINVAL;
- }
- } else {
- for (i = 0; i < (command & 0x1fffff); i++) {
- reg = start_reg + (4 * i);
- if (!si_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad SRC register\n");
- return -EINVAL;
- }
- }
- }
- }
- }
- if (command & PACKET3_CP_DMA_CMD_DAS) {
- /* dst address space is register */
- if (((info & 0x00300000) >> 20) == 0) {
- start_reg = ib[idx + 2];
- if (command & PACKET3_CP_DMA_CMD_DAIC) {
- reg = start_reg;
- if (!si_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad DST register\n");
- return -EINVAL;
- }
- } else {
- for (i = 0; i < (command & 0x1fffff); i++) {
- reg = start_reg + (4 * i);
- if (!si_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad DST register\n");
- return -EINVAL;
- }
- }
- }
- }
- }
+ r = si_vm_packet3_cp_dma_check(ib, idx);
+ if (r)
+ return r;
break;
default:
DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
@@ -4241,6 +4238,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
static int si_vm_packet3_compute_check(struct radeon_device *rdev,
u32 *ib, struct radeon_cs_packet *pkt)
{
+ int r;
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, reg, i;
@@ -4313,6 +4311,11 @@ static int si_vm_packet3_compute_check(struct radeon_device *rdev,
return -EINVAL;
}
break;
+ case PACKET3_CP_DMA:
+ r = si_vm_packet3_cp_dma_check(ib, idx);
+ if (r)
+ return r;
+ break;
default:
DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
return -EINVAL;
@@ -4704,58 +4707,7 @@ void si_vm_set_page(struct radeon_device *rdev,
}
} else {
/* DMA */
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
- }
- }
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+ si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
}
}
@@ -4802,32 +4754,6 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 0x0);
}
-void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
-{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- if (vm->id < 8) {
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
- } else {
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
- }
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
-
- /* flush hdp cache */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
- radeon_ring_write(ring, 1);
-
- /* bits 0-7 are the VM contexts0-7 */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
- radeon_ring_write(ring, 1 << vm->id);
-}
-
/*
* Power and clock gating
*/
@@ -4895,7 +4821,7 @@ static void si_set_uvd_dcm(struct radeon_device *rdev,
WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
}
-static void si_init_uvd_internal_cg(struct radeon_device *rdev)
+void si_init_uvd_internal_cg(struct radeon_device *rdev)
{
bool hw_mode = true;
@@ -4938,7 +4864,7 @@ static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
u32 data, orig;
orig = data = RREG32(DMA_PG);
- if (enable)
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
data |= PG_CNTL_ENABLE;
else
data &= ~PG_CNTL_ENABLE;
@@ -4962,7 +4888,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev,
{
u32 tmp;
- if (enable) {
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
WREG32(RLC_TTOP_D, tmp);
@@ -5065,9 +4991,9 @@ static void si_enable_cgcg(struct radeon_device *rdev,
orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
- si_enable_gui_idle_interrupt(rdev, enable);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
+ si_enable_gui_idle_interrupt(rdev, true);
- if (enable) {
WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
tmp = si_halt_rlc(rdev);
@@ -5084,6 +5010,8 @@ static void si_enable_cgcg(struct radeon_device *rdev,
data |= CGCG_EN | CGLS_EN;
} else {
+ si_enable_gui_idle_interrupt(rdev, false);
+
RREG32(CB_CGTT_SCLK_CTRL);
RREG32(CB_CGTT_SCLK_CTRL);
RREG32(CB_CGTT_SCLK_CTRL);
@@ -5101,16 +5029,18 @@ static void si_enable_mgcg(struct radeon_device *rdev,
{
u32 data, orig, tmp = 0;
- if (enable) {
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
orig = data = RREG32(CGTS_SM_CTRL_REG);
data = 0x96940200;
if (orig != data)
WREG32(CGTS_SM_CTRL_REG, data);
- orig = data = RREG32(CP_MEM_SLP_CNTL);
- data |= CP_MEM_LS_EN;
- if (orig != data)
- WREG32(CP_MEM_SLP_CNTL, data);
+ if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
+ orig = data = RREG32(CP_MEM_SLP_CNTL);
+ data |= CP_MEM_LS_EN;
+ if (orig != data)
+ WREG32(CP_MEM_SLP_CNTL, data);
+ }
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
data &= 0xffffffc0;
@@ -5155,7 +5085,7 @@ static void si_enable_uvd_mgcg(struct radeon_device *rdev,
{
u32 orig, data, tmp;
- if (enable) {
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
tmp |= 0x3fff;
WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
@@ -5203,7 +5133,7 @@ static void si_enable_mc_ls(struct radeon_device *rdev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable)
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
data |= MC_LS_ENABLE;
else
data &= ~MC_LS_ENABLE;
@@ -5212,230 +5142,295 @@ static void si_enable_mc_ls(struct radeon_device *rdev,
}
}
-
-static void si_init_cg(struct radeon_device *rdev)
+static void si_enable_mc_mgcg(struct radeon_device *rdev,
+ bool enable)
{
- bool has_uvd = true;
+ int i;
+ u32 orig, data;
- si_enable_mgcg(rdev, true);
- si_enable_cgcg(rdev, true);
- /* disable MC LS on Tahiti */
- if (rdev->family == CHIP_TAHITI)
- si_enable_mc_ls(rdev, false);
- if (has_uvd) {
- si_enable_uvd_mgcg(rdev, true);
- si_init_uvd_internal_cg(rdev);
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
+ data |= MC_CG_ENABLE;
+ else
+ data &= ~MC_CG_ENABLE;
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
}
}
-static void si_fini_cg(struct radeon_device *rdev)
+static void si_enable_dma_mgcg(struct radeon_device *rdev,
+ bool enable)
{
- bool has_uvd = true;
+ u32 orig, data, offset;
+ int i;
- if (has_uvd)
- si_enable_uvd_mgcg(rdev, false);
- si_enable_cgcg(rdev, false);
- si_enable_mgcg(rdev, false);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ offset = DMA0_REGISTER_OFFSET;
+ else
+ offset = DMA1_REGISTER_OFFSET;
+ orig = data = RREG32(DMA_POWER_CNTL + offset);
+ data &= ~MEM_POWER_OVERRIDE;
+ if (data != orig)
+ WREG32(DMA_POWER_CNTL + offset, data);
+ WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+ }
+ } else {
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ offset = DMA0_REGISTER_OFFSET;
+ else
+ offset = DMA1_REGISTER_OFFSET;
+ orig = data = RREG32(DMA_POWER_CNTL + offset);
+ data |= MEM_POWER_OVERRIDE;
+ if (data != orig)
+ WREG32(DMA_POWER_CNTL + offset, data);
+
+ orig = data = RREG32(DMA_CLK_CTRL + offset);
+ data = 0xff000000;
+ if (data != orig)
+ WREG32(DMA_CLK_CTRL + offset, data);
+ }
+ }
}
-static void si_init_pg(struct radeon_device *rdev)
+static void si_enable_bif_mgls(struct radeon_device *rdev,
+ bool enable)
{
- bool has_pg = false;
+ u32 orig, data;
- /* only cape verde supports PG */
- if (rdev->family == CHIP_VERDE)
- has_pg = true;
+ orig = data = RREG32_PCIE(PCIE_CNTL2);
- if (has_pg) {
- si_init_ao_cu_mask(rdev);
- si_init_dma_pg(rdev);
- si_enable_dma_pg(rdev, true);
- si_init_gfx_cgpg(rdev);
- si_enable_gfx_cgpg(rdev, true);
- } else {
- WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
- WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
- }
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
+ data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
+ REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
+ else
+ data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
+ REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
+
+ if (orig != data)
+ WREG32_PCIE(PCIE_CNTL2, data);
}
-static void si_fini_pg(struct radeon_device *rdev)
+static void si_enable_hdp_mgcg(struct radeon_device *rdev,
+ bool enable)
{
- bool has_pg = false;
+ u32 orig, data;
- /* only cape verde supports PG */
- if (rdev->family == CHIP_VERDE)
- has_pg = true;
+ orig = data = RREG32(HDP_HOST_PATH_CNTL);
- if (has_pg) {
- si_enable_dma_pg(rdev, false);
- si_enable_gfx_cgpg(rdev, false);
- }
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
+ data &= ~CLOCK_GATING_DIS;
+ else
+ data |= CLOCK_GATING_DIS;
+
+ if (orig != data)
+ WREG32(HDP_HOST_PATH_CNTL, data);
}
-/*
- * RLC
- */
-void si_rlc_fini(struct radeon_device *rdev)
+static void si_enable_hdp_ls(struct radeon_device *rdev,
+ bool enable)
{
- int r;
-
- /* save restore block */
- if (rdev->rlc.save_restore_obj) {
- r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
- if (unlikely(r != 0))
- dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
- radeon_bo_unpin(rdev->rlc.save_restore_obj);
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+ u32 orig, data;
- radeon_bo_unref(&rdev->rlc.save_restore_obj);
- rdev->rlc.save_restore_obj = NULL;
- }
+ orig = data = RREG32(HDP_MEM_POWER_LS);
- /* clear state block */
- if (rdev->rlc.clear_state_obj) {
- r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
- if (unlikely(r != 0))
- dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
- radeon_bo_unpin(rdev->rlc.clear_state_obj);
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
+ data |= HDP_LS_ENABLE;
+ else
+ data &= ~HDP_LS_ENABLE;
- radeon_bo_unref(&rdev->rlc.clear_state_obj);
- rdev->rlc.clear_state_obj = NULL;
- }
+ if (orig != data)
+ WREG32(HDP_MEM_POWER_LS, data);
}
-#define RLC_CLEAR_STATE_END_MARKER 0x00000001
-
-int si_rlc_init(struct radeon_device *rdev)
+void si_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable)
{
- volatile u32 *dst_ptr;
- u32 dws, data, i, j, k, reg_num;
- u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index;
- u64 reg_list_mc_addr;
- const struct cs_section_def *cs_data = si_cs_data;
- int r;
-
- /* save restore block */
- if (rdev->rlc.save_restore_obj == NULL) {
- r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL,
- &rdev->rlc.save_restore_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
- return r;
+ if (block & RADEON_CG_BLOCK_GFX) {
+ /* order matters! */
+ if (enable) {
+ si_enable_mgcg(rdev, true);
+ si_enable_cgcg(rdev, true);
+ } else {
+ si_enable_cgcg(rdev, false);
+ si_enable_mgcg(rdev, false);
}
}
- r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
- if (unlikely(r != 0)) {
- si_rlc_fini(rdev);
- return r;
+ if (block & RADEON_CG_BLOCK_MC) {
+ si_enable_mc_mgcg(rdev, enable);
+ si_enable_mc_ls(rdev, enable);
}
- r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.save_restore_gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
+
+ if (block & RADEON_CG_BLOCK_SDMA) {
+ si_enable_dma_mgcg(rdev, enable);
}
- if (rdev->family == CHIP_VERDE) {
- r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
- }
- /* write the sr buffer */
- dst_ptr = rdev->rlc.sr_ptr;
- for (i = 0; i < ARRAY_SIZE(verde_rlc_save_restore_register_list); i++) {
- dst_ptr[i] = verde_rlc_save_restore_register_list[i];
- }
- radeon_bo_kunmap(rdev->rlc.save_restore_obj);
+ if (block & RADEON_CG_BLOCK_BIF) {
+ si_enable_bif_mgls(rdev, enable);
}
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- /* clear state block */
- reg_list_num = 0;
- dws = 0;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_list_num++;
- dws += cs_data[i].section[j].reg_count;
+ if (block & RADEON_CG_BLOCK_UVD) {
+ if (rdev->has_uvd) {
+ si_enable_uvd_mgcg(rdev, enable);
}
}
- reg_list_blk_index = (3 * reg_list_num + 2);
- dws += reg_list_blk_index;
- if (rdev->rlc.clear_state_obj == NULL) {
- r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
- }
+ if (block & RADEON_CG_BLOCK_HDP) {
+ si_enable_hdp_mgcg(rdev, enable);
+ si_enable_hdp_ls(rdev, enable);
}
- r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- si_rlc_fini(rdev);
- return r;
+}
+
+static void si_init_cg(struct radeon_device *rdev)
+{
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), true);
+ if (rdev->has_uvd) {
+ si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
+ si_init_uvd_internal_cg(rdev);
}
- r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.clear_state_gpu_addr);
- if (r) {
+}
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
- dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
+static void si_fini_cg(struct radeon_device *rdev)
+{
+ if (rdev->has_uvd) {
+ si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
}
- r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), false);
+}
+
+u32 si_get_csb_size(struct radeon_device *rdev)
+{
+ u32 count = 0;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (rdev->rlc.cs_data == NULL)
+ return 0;
+
+ /* begin clear state */
+ count += 2;
+ /* context control state */
+ count += 3;
+
+ for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT)
+ count += 2 + ext->reg_count;
+ else
+ return 0;
+ }
}
- /* set up the cs buffer */
- dst_ptr = rdev->rlc.cs_ptr;
- reg_list_hdr_blk_index = 0;
- reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
- data = upper_32_bits(reg_list_mc_addr);
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_num = cs_data[i].section[j].reg_count;
- data = reg_list_mc_addr & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- data = 0x08000000 | (reg_num * 4);
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- for (k = 0; k < reg_num; k++) {
- data = cs_data[i].section[j].extent[k];
- dst_ptr[reg_list_blk_index + k] = data;
+ /* pa_sc_raster_config */
+ count += 3;
+ /* end clear state */
+ count += 2;
+ /* clear state */
+ count += 2;
+
+ return count;
+}
+
+void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
+{
+ u32 count = 0, i;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (rdev->rlc.cs_data == NULL)
+ return;
+ if (buffer == NULL)
+ return;
+
+ buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
+ buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+
+ buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
+ buffer[count++] = 0x80000000;
+ buffer[count++] = 0x80000000;
+
+ for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
+ buffer[count++] = ext->reg_index - 0xa000;
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = ext->extent[i];
+ } else {
+ return;
}
- reg_list_mc_addr += reg_num * 4;
- reg_list_blk_index += reg_num;
}
}
- dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
- radeon_bo_kunmap(rdev->rlc.clear_state_obj);
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ buffer[count++] = 0x2a00126a;
+ break;
+ case CHIP_VERDE:
+ buffer[count++] = 0x0000124a;
+ break;
+ case CHIP_OLAND:
+ buffer[count++] = 0x00000082;
+ break;
+ case CHIP_HAINAN:
+ buffer[count++] = 0x00000000;
+ break;
+ default:
+ buffer[count++] = 0x00000000;
+ break;
+ }
- return 0;
+ buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
+ buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
+
+ buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
+ buffer[count++] = 0;
+}
+
+static void si_init_pg(struct radeon_device *rdev)
+{
+ if (rdev->pg_flags) {
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
+ si_init_dma_pg(rdev);
+ }
+ si_init_ao_cu_mask(rdev);
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ si_init_gfx_cgpg(rdev);
+ }
+ si_enable_dma_pg(rdev, true);
+ si_enable_gfx_cgpg(rdev, true);
+ } else {
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+ }
+}
+
+static void si_fini_pg(struct radeon_device *rdev)
+{
+ if (rdev->pg_flags) {
+ si_enable_dma_pg(rdev, false);
+ si_enable_gfx_cgpg(rdev, false);
+ }
}
-static void si_rlc_reset(struct radeon_device *rdev)
+/*
+ * RLC
+ */
+void si_rlc_reset(struct radeon_device *rdev)
{
u32 tmp = RREG32(GRBM_SOFT_RESET);
@@ -6335,80 +6330,6 @@ restart_ih:
return IRQ_HANDLED;
}
-/**
- * si_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (SI).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int si_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_bytes, cur_size_in_bytes;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
- num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_bytes = size_in_bytes;
- if (cur_size_in_bytes > 0xFFFFF)
- cur_size_in_bytes = 0xFFFFF;
- size_in_bytes -= cur_size_in_bytes;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
- radeon_ring_write(ring, dst_offset & 0xffffffff);
- radeon_ring_write(ring, src_offset & 0xffffffff);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
- src_offset += cur_size_in_bytes;
- dst_offset += cur_size_in_bytes;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
/*
* startup/shutdown callbacks
*/
@@ -6422,6 +6343,13 @@ static int si_startup(struct radeon_device *rdev)
/* enable aspm */
si_program_aspm(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ si_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
!rdev->rlc_fw || !rdev->mc_fw) {
r = si_init_microcode(rdev);
@@ -6437,18 +6365,19 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
- si_mc_program(rdev);
r = si_pcie_gart_enable(rdev);
if (r)
return r;
si_gpu_init(rdev);
/* allocate rlc buffers */
- r = si_rlc_init(rdev);
+ if (rdev->family == CHIP_VERDE) {
+ rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
+ }
+ rdev->rlc.cs_data = si_cs_data;
+ r = sumo_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -6490,7 +6419,7 @@ static int si_startup(struct radeon_device *rdev)
}
if (rdev->has_uvd) {
- r = rv770_uvd_resume(rdev);
+ r = uvd_v2_2_resume(rdev);
if (!r) {
r = radeon_fence_driver_start_ring(rdev,
R600_RING_TYPE_UVD_INDEX);
@@ -6519,21 +6448,21 @@ static int si_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
CP_RB1_RPTR, CP_RB1_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
CP_RB2_RPTR, CP_RB2_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
@@ -6541,7 +6470,7 @@ static int si_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
@@ -6549,7 +6478,7 @@ static int si_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
@@ -6567,12 +6496,11 @@ static int si_startup(struct radeon_device *rdev)
if (rdev->has_uvd) {
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
@@ -6590,6 +6518,10 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
+ r = dce6_audio_init(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -6621,13 +6553,16 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
+ dce6_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
si_cp_enable(rdev, false);
cayman_dma_stop(rdev);
if (rdev->has_uvd) {
- r600_uvd_rbc_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
}
+ si_fini_pg(rdev);
+ si_fini_cg(rdev);
si_irq_suspend(rdev);
radeon_wb_disable(rdev);
si_pcie_gart_disable(rdev);
@@ -6734,7 +6669,7 @@ int si_init(struct radeon_device *rdev)
si_cp_fini(rdev);
cayman_dma_fini(rdev);
si_irq_fini(rdev);
- si_rlc_fini(rdev);
+ sumo_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_vm_manager_fini(rdev);
@@ -6759,16 +6694,18 @@ void si_fini(struct radeon_device *rdev)
{
si_cp_fini(rdev);
cayman_dma_fini(rdev);
- si_irq_fini(rdev);
- si_rlc_fini(rdev);
- si_fini_cg(rdev);
si_fini_pg(rdev);
+ si_fini_cg(rdev);
+ si_irq_fini(rdev);
+ sumo_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- if (rdev->has_uvd)
+ if (rdev->has_uvd) {
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
+ }
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
new file mode 100644
index 000000000000..49909d23dfce
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "sid.h"
+
+u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/**
+ * si_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = si_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * si_dma_vm_set_page - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the DMA (SI).
+ */
+void si_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ uint64_t value;
+ unsigned ndw;
+
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ if (vm->id < 8) {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ } else {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+ }
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0xFFFFF)
+ cur_size_in_bytes = 0xFFFFF;
+ size_in_bytes -= cur_size_in_bytes;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 73aaa2e4c312..5be9b4e72350 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -37,8 +37,6 @@
#define SMC_RAM_END 0x20000
-#define DDR3_DRAM_ROWS 0x2000
-
#define SCLK_MIN_DEEPSLEEP_FREQ 1350
static const struct si_cac_config_reg cac_weights_tahiti[] =
@@ -1755,6 +1753,9 @@ static int si_calculate_sclk_params(struct radeon_device *rdev,
u32 engine_clock,
SISLANDS_SMC_SCLK_VALUE *sclk);
+extern void si_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable);
+
static struct si_power_info *si_get_pi(struct radeon_device *rdev)
{
struct si_power_info *pi = rdev->pm.dpm.priv;
@@ -1767,8 +1768,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe
{
s64 kt, kv, leakage_w, i_leakage, vddc;
s64 temperature, t_slope, t_intercept, av, bv, t_ref;
+ s64 tmp;
- i_leakage = drm_int2fixp(ileakage / 100);
+ i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
vddc = div64_s64(drm_int2fixp(v), 1000);
temperature = div64_s64(drm_int2fixp(t), 1000);
@@ -1778,8 +1780,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe
bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
t_ref = drm_int2fixp(coeff->t_ref);
- kt = drm_fixp_div(drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, temperature)),
- drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, t_ref)));
+ tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
+ kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
+ kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
@@ -1931,6 +1934,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
si_pi->cac_override = cac_override_pitcairn;
si_pi->powertune_data = &powertune_data_pitcairn;
si_pi->dte_data = dte_data_pitcairn;
+ break;
}
} else if (rdev->family == CHIP_VERDE) {
si_pi->lcac_config = lcac_cape_verde;
@@ -1941,6 +1945,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
case 0x683B:
case 0x683F:
case 0x6829:
+ case 0x6835:
si_pi->cac_weights = cac_weights_cape_verde_pro;
si_pi->dte_data = dte_data_cape_verde;
break;
@@ -2901,7 +2906,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
{
struct ni_ps *ps = ni_get_ps(rps);
struct radeon_clock_and_voltage_limits *max_limits;
- bool disable_mclk_switching;
+ bool disable_mclk_switching = false;
+ bool disable_sclk_switching = false;
u32 mclk, sclk;
u16 vddc, vddci;
int i;
@@ -2909,8 +2915,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
disable_mclk_switching = true;
- else
- disable_mclk_switching = false;
+
+ if (rps->vclk || rps->dclk) {
+ disable_mclk_switching = true;
+ disable_sclk_switching = true;
+ }
if (rdev->pm.dpm.ac_power)
max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
@@ -2938,27 +2947,43 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (disable_mclk_switching) {
mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
- sclk = ps->performance_levels[0].sclk;
- vddc = ps->performance_levels[0].vddc;
vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
} else {
- sclk = ps->performance_levels[0].sclk;
mclk = ps->performance_levels[0].mclk;
- vddc = ps->performance_levels[0].vddc;
vddci = ps->performance_levels[0].vddci;
}
+ if (disable_sclk_switching) {
+ sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
+ vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
+ } else {
+ sclk = ps->performance_levels[0].sclk;
+ vddc = ps->performance_levels[0].vddc;
+ }
+
/* adjusted low state */
ps->performance_levels[0].sclk = sclk;
ps->performance_levels[0].mclk = mclk;
ps->performance_levels[0].vddc = vddc;
ps->performance_levels[0].vddci = vddci;
- for (i = 1; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
- ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
- if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
- ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+ if (disable_sclk_switching) {
+ sclk = ps->performance_levels[0].sclk;
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (sclk < ps->performance_levels[i].sclk)
+ sclk = ps->performance_levels[i].sclk;
+ }
+ for (i = 0; i < ps->performance_level_count; i++) {
+ ps->performance_levels[i].sclk = sclk;
+ ps->performance_levels[i].vddc = vddc;
+ }
+ } else {
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
+ ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
+ if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
+ ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+ }
}
if (disable_mclk_switching) {
@@ -3237,10 +3262,10 @@ int si_dpm_force_performance_level(struct radeon_device *rdev,
{
struct radeon_ps *rps = rdev->pm.dpm.current_ps;
struct ni_ps *ps = ni_get_ps(rps);
- u32 levels;
+ u32 levels = ps->performance_level_count;
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
- if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
+ if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
return -EINVAL;
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
@@ -3249,14 +3274,13 @@ int si_dpm_force_performance_level(struct radeon_device *rdev,
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
- levels = ps->performance_level_count - 1;
- if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+ if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
- if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
+ if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
return -EINVAL;
}
@@ -3620,8 +3644,12 @@ static void si_enable_display_gap(struct radeon_device *rdev)
{
u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+ tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+ tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+ DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+
tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
- tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
+ tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
WREG32(CG_DISPLAY_GAP_CNTL, tmp);
}
@@ -3638,7 +3666,7 @@ static void si_clear_vc(struct radeon_device *rdev)
WREG32(CG_FTV, 0);
}
-static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
+u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
{
u8 mc_para_index;
@@ -3651,7 +3679,7 @@ static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
return mc_para_index;
}
-static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
+u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
{
u8 mc_para_index;
@@ -3733,20 +3761,21 @@ static bool si_validate_phase_shedding_tables(struct radeon_device *rdev,
return true;
}
-static void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
- struct atom_voltage_table *voltage_table)
+void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
+ u32 max_voltage_steps,
+ struct atom_voltage_table *voltage_table)
{
unsigned int i, diff;
- if (voltage_table->count <= SISLANDS_MAX_NO_VREG_STEPS)
+ if (voltage_table->count <= max_voltage_steps)
return;
- diff = voltage_table->count - SISLANDS_MAX_NO_VREG_STEPS;
+ diff = voltage_table->count - max_voltage_steps;
- for (i= 0; i < SISLANDS_MAX_NO_VREG_STEPS; i++)
+ for (i= 0; i < max_voltage_steps; i++)
voltage_table->entries[i] = voltage_table->entries[i + diff];
- voltage_table->count = SISLANDS_MAX_NO_VREG_STEPS;
+ voltage_table->count = max_voltage_steps;
}
static int si_construct_voltage_tables(struct radeon_device *rdev)
@@ -3762,7 +3791,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
return ret;
if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddc_voltage_table);
+ si_trim_voltage_table_to_fit_state_table(rdev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddc_voltage_table);
if (eg_pi->vddci_control) {
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
@@ -3771,7 +3802,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
return ret;
if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddci_voltage_table);
+ si_trim_voltage_table_to_fit_state_table(rdev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddci_voltage_table);
}
if (pi->mvdd_control) {
@@ -3789,7 +3822,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
}
if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(rdev, &si_pi->mvdd_voltage_table);
+ si_trim_voltage_table_to_fit_state_table(rdev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &si_pi->mvdd_voltage_table);
}
if (si_pi->vddc_phase_shed_control) {
@@ -4036,16 +4071,15 @@ static int si_force_switch_to_arb_f0(struct radeon_device *rdev)
static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev,
u32 engine_clock)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 dram_rows;
u32 dram_refresh_rate;
u32 mc_arb_rfsh_rate;
u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
- if (pi->mem_gddr5)
- dram_rows = 1 << (tmp + 10);
+ if (tmp >= 4)
+ dram_rows = 16384;
else
- dram_rows = DDR3_DRAM_ROWS;
+ dram_rows = 1 << (tmp + 10);
dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
@@ -5728,6 +5762,13 @@ int si_dpm_enable(struct radeon_device *rdev)
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
int ret;
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
if (si_is_smc_running(rdev))
return -EINVAL;
if (pi->voltage_control)
@@ -5847,6 +5888,13 @@ int si_dpm_enable(struct radeon_device *rdev)
si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
+
ni_update_current_ps(rdev, boot_ps);
return 0;
@@ -5857,6 +5905,13 @@ void si_dpm_disable(struct radeon_device *rdev)
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
if (!si_is_smc_running(rdev))
return;
si_disable_ulv(rdev);
@@ -5921,6 +5976,13 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
struct radeon_ps *old_ps = &eg_pi->current_rps;
int ret;
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
ret = si_disable_ulv(rdev);
if (ret) {
DRM_ERROR("si_disable_ulv failed\n");
@@ -6013,16 +6075,18 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
-#if 0
- /* XXX */
ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("si_dpm_force_performance_level failed\n");
return ret;
}
-#else
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
-#endif
+
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
return 0;
}
@@ -6213,6 +6277,7 @@ static int si_parse_power_table(struct radeon_device *rdev)
rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -6229,14 +6294,16 @@ static int si_parse_power_table(struct radeon_device *rdev)
non_clock_info,
non_clock_info_array->ucEntrySize);
k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = power_state->v2.clockInfoIndex[j];
+ clock_array_index = idx[j];
if (clock_array_index >= clock_info_array->ucNumEntries)
continue;
if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
break;
clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
si_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], k,
clock_info);
@@ -6254,9 +6321,6 @@ int si_dpm_init(struct radeon_device *rdev)
struct evergreen_power_info *eg_pi;
struct ni_power_info *ni_pi;
struct si_power_info *si_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- u16 data_offset, size;
- u8 frev, crev;
struct atom_clock_dividers dividers;
int ret;
u32 mask;
@@ -6347,16 +6411,7 @@ int si_dpm_init(struct radeon_device *rdev)
si_pi->vddc_phase_shed_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -6367,8 +6422,7 @@ int si_dpm_init(struct radeon_device *rdev)
eg_pi->sclk_deep_sleep = true;
si_pi->sclk_deep_sleep_above_low = false;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
@@ -6395,6 +6449,12 @@ int si_dpm_init(struct radeon_device *rdev)
si_initialize_powertune_defaults(rdev);
+ /* make sure dc limits are valid */
+ if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 2c8da27a929f..52d2ab6b67a0 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -282,6 +282,10 @@
#define DMIF_ADDR_CALC 0xC00
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
#define SRBM_STATUS 0xE50
#define GRBM_RQ_PENDING (1 << 5)
#define VMC_BUSY (1 << 8)
@@ -581,6 +585,7 @@
#define CLKS_MASK (0xfff << 0)
#define HDP_HOST_PATH_CNTL 0x2C00
+#define CLOCK_GATING_DIS (1 << 23)
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
@@ -588,6 +593,8 @@
#define HDP_ADDR_CONFIG 0x2F48
#define HDP_MISC_CNTL 0x2F4C
#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
+#define HDP_MEM_POWER_LS 0x2F50
+#define HDP_LS_ENABLE (1 << 0)
#define ATC_MISC_CG 0x3350
@@ -635,6 +642,54 @@
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+/* DCE6 ELD audio interface */
+#define AZ_F0_CODEC_ENDPOINT_INDEX 0x5E00
+# define AZ_ENDPOINT_REG_INDEX(x) (((x) & 0xff) << 0)
+# define AZ_ENDPOINT_REG_WRITE_EN (1 << 8)
+#define AZ_F0_CODEC_ENDPOINT_DATA 0x5E04
+
+#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x25
+#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
+#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
+#define SPEAKER_ALLOCATION_SHIFT 0
+#define HDMI_CONNECTION (1 << 16)
+#define DP_CONNECTION (1 << 17)
+
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54
+# define AUDIO_ENABLED (1 << 31)
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
+#define PORT_CONNECTIVITY_MASK (3 << 30)
+#define PORT_CONNECTIVITY_SHIFT 30
+
#define DC_LB_MEMORY_SPLIT 0x6b0c
#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
@@ -755,6 +810,17 @@
/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
#define CRTC_STATUS_FRAME_COUNT 0x6e98
+#define AFMT_AUDIO_SRC_CONTROL 0x713c
+#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
+/* AFMT_AUDIO_SRC_SELECT
+ * 0 = stream0
+ * 1 = stream1
+ * 2 = stream2
+ * 3 = stream3
+ * 4 = stream4
+ * 5 = stream5
+ */
+
#define GRBM_CNTL 0x8000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
@@ -1295,6 +1361,7 @@
/* PCIE registers idx/data 0x30/0x34 */
#define PCIE_CNTL2 0x1c /* PCIE */
# define SLV_MEM_LS_EN (1 << 16)
+# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
# define MST_MEM_LS_EN (1 << 18)
# define REPLAY_MEM_LS_EN (1 << 19)
#define PCIE_LC_STATUS1 0x28 /* PCIE */
@@ -1644,6 +1711,10 @@
# define DMA_IDLE (1 << 0)
#define DMA_TILING_CONFIG 0xd0b8
+#define DMA_POWER_CNTL 0xd0bc
+# define MEM_POWER_OVERRIDE (1 << 8)
+#define DMA_CLK_CTRL 0xd0c0
+
#define DMA_PG 0xd0d4
# define PG_CNTL_ENABLE (1 << 0)
#define DMA_PGFSM_CONFIG 0xd0d8
diff --git a/drivers/gpu/drm/radeon/smu7.h b/drivers/gpu/drm/radeon/smu7.h
new file mode 100644
index 000000000000..75a380a15292
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_H
+#define SMU7_H
+
+#pragma pack(push, 1)
+
+#define SMU7_CONTEXT_ID_SMC 1
+#define SMU7_CONTEXT_ID_VBIOS 2
+
+
+#define SMU7_CONTEXT_ID_SMC 1
+#define SMU7_CONTEXT_ID_VBIOS 2
+
+#define SMU7_MAX_LEVELS_VDDC 8
+#define SMU7_MAX_LEVELS_VDDCI 4
+#define SMU7_MAX_LEVELS_MVDD 4
+#define SMU7_MAX_LEVELS_VDDNB 8
+
+#define SMU7_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE // SCLK + SQ DPM + ULV
+#define SMU7_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS // MCLK Levels DPM
+#define SMU7_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS // LCLK Levels
+#define SMU7_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS // PCIe speed and number of lanes.
+#define SMU7_MAX_LEVELS_UVD 8 // VCLK/DCLK levels for UVD.
+#define SMU7_MAX_LEVELS_VCE 8 // ECLK levels for VCE.
+#define SMU7_MAX_LEVELS_ACP 8 // ACLK levels for ACP.
+#define SMU7_MAX_LEVELS_SAMU 8 // SAMCLK levels for SAMU.
+#define SMU7_MAX_ENTRIES_SMIO 32 // Number of entries in SMIO table.
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
+
+#define GPIO_CLAMP_MODE_VRHOT 1
+#define GPIO_CLAMP_MODE_THERM 2
+#define GPIO_CLAMP_MODE_DC 4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+
+struct SMU7_PIDController
+{
+ uint32_t Ki;
+ int32_t LFWindupUL;
+ int32_t LFWindupLL;
+ uint32_t StatePrecision;
+ uint32_t LfPrecision;
+ uint32_t LfOffset;
+ uint32_t MaxState;
+ uint32_t MaxLfFraction;
+ uint32_t StateShift;
+};
+
+typedef struct SMU7_PIDController SMU7_PIDController;
+
+// -------------------------------------------------------------------------------------------------------------------------
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
+
+#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
+#define SMU7_UVD_DPM_CONFIG_MASK 0x10
+#define SMU7_VCE_DPM_CONFIG_MASK 0x20
+#define SMU7_ACP_DPM_CONFIG_MASK 0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
+
+struct SMU7_Firmware_Header
+{
+ uint32_t Digest[5];
+ uint32_t Version;
+ uint32_t HeaderSize;
+ uint32_t Flags;
+ uint32_t EntryPoint;
+ uint32_t CodeSize;
+ uint32_t ImageSize;
+
+ uint32_t Rtos;
+ uint32_t SoftRegisters;
+ uint32_t DpmTable;
+ uint32_t FanTable;
+ uint32_t CacConfigTable;
+ uint32_t CacStatusTable;
+
+ uint32_t mcRegisterTable;
+
+ uint32_t mcArbDramTimingTable;
+
+ uint32_t PmFuseTable;
+ uint32_t Globals;
+ uint32_t Reserved[42];
+ uint32_t Signature;
+};
+
+typedef struct SMU7_Firmware_Header SMU7_Firmware_Header;
+
+#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum DisplayConfig {
+ PowerDown = 1,
+ DP54x4,
+ DP54x2,
+ DP54x1,
+ DP27x4,
+ DP27x2,
+ DP27x1,
+ HDMI297,
+ HDMI162,
+ LVDS,
+ DP324x4,
+ DP324x2,
+ DP324x1
+};
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h
new file mode 100644
index 000000000000..82f70c90a9ee
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7_discrete.h
@@ -0,0 +1,486 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_DISCRETE_H
+#define SMU7_DISCRETE_H
+
+#include "smu7.h"
+
+#pragma pack(push, 1)
+
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 3
+#define SMU7_DTE_SINKS 1
+#define SMU7_NUM_CPU_TES 0
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+struct SMU7_SoftRegisters
+{
+ uint32_t RefClockFrequency;
+ uint32_t PmTimerP;
+ uint32_t FeatureEnables;
+ uint32_t PreVBlankGap;
+ uint32_t VBlankTimeout;
+ uint32_t TrainTimeGap;
+
+ uint32_t MvddSwitchTime;
+ uint32_t LongestAcpiTrainTime;
+ uint32_t AcpiDelay;
+ uint32_t G5TrainTime;
+ uint32_t DelayMpllPwron;
+ uint32_t VoltageChangeTimeout;
+ uint32_t HandshakeDisables;
+
+ uint8_t DisplayPhy1Config;
+ uint8_t DisplayPhy2Config;
+ uint8_t DisplayPhy3Config;
+ uint8_t DisplayPhy4Config;
+
+ uint8_t DisplayPhy5Config;
+ uint8_t DisplayPhy6Config;
+ uint8_t DisplayPhy7Config;
+ uint8_t DisplayPhy8Config;
+
+ uint32_t AverageGraphicsA;
+ uint32_t AverageMemoryA;
+ uint32_t AverageGioA;
+
+ uint8_t SClkDpmEnabledLevels;
+ uint8_t MClkDpmEnabledLevels;
+ uint8_t LClkDpmEnabledLevels;
+ uint8_t PCIeDpmEnabledLevels;
+
+ uint8_t UVDDpmEnabledLevels;
+ uint8_t SAMUDpmEnabledLevels;
+ uint8_t ACPDpmEnabledLevels;
+ uint8_t VCEDpmEnabledLevels;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+ uint32_t UlvEnterC;
+ uint32_t UlvTime;
+ uint32_t Reserved[3];
+
+};
+
+typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
+
+struct SMU7_Discrete_VoltageLevel
+{
+ uint16_t Voltage;
+ uint16_t StdVoltageHiSidd;
+ uint16_t StdVoltageLoSidd;
+ uint8_t Smio;
+ uint8_t padding;
+};
+
+typedef struct SMU7_Discrete_VoltageLevel SMU7_Discrete_VoltageLevel;
+
+struct SMU7_Discrete_GraphicsLevel
+{
+ uint32_t Flags;
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+
+ uint32_t SclkFrequency;
+
+ uint8_t padding1[2];
+ uint16_t ActivityLevel;
+
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t SpllSpreadSpectrum;
+ uint32_t SpllSpreadSpectrum2;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t EnabledForActivity;
+ uint8_t EnabledForThrottle;
+ uint8_t UpH;
+ uint8_t DownH;
+ uint8_t VoltageDownH;
+ uint8_t PowerThrottle;
+ uint8_t DeepSleepDivId;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Discrete_GraphicsLevel SMU7_Discrete_GraphicsLevel;
+
+struct SMU7_Discrete_ACPILevel
+{
+ uint32_t Flags;
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+ uint32_t SclkFrequency;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t DeepSleepDivId;
+ uint8_t padding;
+ uint32_t CgSpllFuncCntl;
+ uint32_t CgSpllFuncCntl2;
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t SpllSpreadSpectrum;
+ uint32_t SpllSpreadSpectrum2;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+};
+
+typedef struct SMU7_Discrete_ACPILevel SMU7_Discrete_ACPILevel;
+
+struct SMU7_Discrete_Ulv
+{
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint16_t VddcOffset;
+ uint8_t VddcOffsetVid;
+ uint8_t VddcPhase;
+ uint32_t Reserved;
+};
+
+typedef struct SMU7_Discrete_Ulv SMU7_Discrete_Ulv;
+
+struct SMU7_Discrete_MemoryLevel
+{
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+ uint32_t MinVddci;
+ uint32_t MinMvdd;
+
+ uint32_t MclkFrequency;
+
+ uint8_t EdcReadEnable;
+ uint8_t EdcWriteEnable;
+ uint8_t RttEnable;
+ uint8_t StutterEnable;
+
+ uint8_t StrobeEnable;
+ uint8_t StrobeRatio;
+ uint8_t EnabledForThrottle;
+ uint8_t EnabledForActivity;
+
+ uint8_t UpH;
+ uint8_t DownH;
+ uint8_t VoltageDownH;
+ uint8_t padding;
+
+ uint16_t ActivityLevel;
+ uint8_t DisplayWatermark;
+ uint8_t padding1;
+
+ uint32_t MpllFuncCntl;
+ uint32_t MpllFuncCntl_1;
+ uint32_t MpllFuncCntl_2;
+ uint32_t MpllAdFuncCntl;
+ uint32_t MpllDqFuncCntl;
+ uint32_t MclkPwrmgtCntl;
+ uint32_t DllCntl;
+ uint32_t MpllSs1;
+ uint32_t MpllSs2;
+};
+
+typedef struct SMU7_Discrete_MemoryLevel SMU7_Discrete_MemoryLevel;
+
+struct SMU7_Discrete_LinkLevel
+{
+ uint8_t PcieGenSpeed;
+ uint8_t PcieLaneCount;
+ uint8_t EnabledForActivity;
+ uint8_t Padding;
+ uint32_t DownT;
+ uint32_t UpT;
+ uint32_t Reserved;
+};
+
+typedef struct SMU7_Discrete_LinkLevel SMU7_Discrete_LinkLevel;
+
+
+struct SMU7_Discrete_MCArbDramTimingTableEntry
+{
+ uint32_t McArbDramTiming;
+ uint32_t McArbDramTiming2;
+ uint8_t McArbBurstTime;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Discrete_MCArbDramTimingTableEntry SMU7_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU7_Discrete_MCArbDramTimingTable
+{
+ SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU7_Discrete_MCArbDramTimingTable SMU7_Discrete_MCArbDramTimingTable;
+
+struct SMU7_Discrete_UvdLevel
+{
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint16_t MinVddc;
+ uint8_t MinVddcPhases;
+ uint8_t VclkDivider;
+ uint8_t DclkDivider;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Discrete_UvdLevel SMU7_Discrete_UvdLevel;
+
+struct SMU7_Discrete_ExtClkLevel
+{
+ uint32_t Frequency;
+ uint16_t MinVoltage;
+ uint8_t MinPhases;
+ uint8_t Divider;
+};
+
+typedef struct SMU7_Discrete_ExtClkLevel SMU7_Discrete_ExtClkLevel;
+
+struct SMU7_Discrete_StateInfo
+{
+ uint32_t SclkFrequency;
+ uint32_t MclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t SamclkFrequency;
+ uint32_t AclkFrequency;
+ uint32_t EclkFrequency;
+ uint16_t MvddVoltage;
+ uint16_t padding16;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+ uint8_t McRegIndex;
+ uint8_t SeqIndex;
+ uint8_t SclkDid;
+ int8_t SclkIndex;
+ int8_t MclkIndex;
+ uint8_t PCIeGen;
+
+};
+
+typedef struct SMU7_Discrete_StateInfo SMU7_Discrete_StateInfo;
+
+
+struct SMU7_Discrete_DpmTable
+{
+ SMU7_PIDController GraphicsPIDController;
+ SMU7_PIDController MemoryPIDController;
+ SMU7_PIDController LinkPIDController;
+
+ uint32_t SystemFlags;
+
+
+ uint32_t SmioMaskVddcVid;
+ uint32_t SmioMaskVddcPhase;
+ uint32_t SmioMaskVddciVid;
+ uint32_t SmioMaskMvddVid;
+
+ uint32_t VddcLevelCount;
+ uint32_t VddciLevelCount;
+ uint32_t MvddLevelCount;
+
+ SMU7_Discrete_VoltageLevel VddcLevel [SMU7_MAX_LEVELS_VDDC];
+// SMU7_Discrete_VoltageLevel VddcStandardReference [SMU7_MAX_LEVELS_VDDC];
+ SMU7_Discrete_VoltageLevel VddciLevel [SMU7_MAX_LEVELS_VDDCI];
+ SMU7_Discrete_VoltageLevel MvddLevel [SMU7_MAX_LEVELS_MVDD];
+
+ uint8_t GraphicsDpmLevelCount;
+ uint8_t MemoryDpmLevelCount;
+ uint8_t LinkLevelCount;
+ uint8_t UvdLevelCount;
+ uint8_t VceLevelCount;
+ uint8_t AcpLevelCount;
+ uint8_t SamuLevelCount;
+ uint8_t MasterDeepSleepControl;
+ uint32_t Reserved[5];
+// uint32_t SamuDefaultLevel;
+
+ SMU7_Discrete_GraphicsLevel GraphicsLevel [SMU7_MAX_LEVELS_GRAPHICS];
+ SMU7_Discrete_MemoryLevel MemoryACPILevel;
+ SMU7_Discrete_MemoryLevel MemoryLevel [SMU7_MAX_LEVELS_MEMORY];
+ SMU7_Discrete_LinkLevel LinkLevel [SMU7_MAX_LEVELS_LINK];
+ SMU7_Discrete_ACPILevel ACPILevel;
+ SMU7_Discrete_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
+ SMU7_Discrete_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
+ SMU7_Discrete_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
+ SMU7_Discrete_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
+ SMU7_Discrete_Ulv Ulv;
+
+ uint32_t SclkStepSize;
+ uint32_t Smio [SMU7_MAX_ENTRIES_SMIO];
+
+ uint8_t UvdBootLevel;
+ uint8_t VceBootLevel;
+ uint8_t AcpBootLevel;
+ uint8_t SamuBootLevel;
+
+ uint8_t UVDInterval;
+ uint8_t VCEInterval;
+ uint8_t ACPInterval;
+ uint8_t SAMUInterval;
+
+ uint8_t GraphicsBootLevel;
+ uint8_t GraphicsVoltageChangeEnable;
+ uint8_t GraphicsThermThrottleEnable;
+ uint8_t GraphicsInterval;
+
+ uint8_t VoltageInterval;
+ uint8_t ThermalInterval;
+ uint16_t TemperatureLimitHigh;
+
+ uint16_t TemperatureLimitLow;
+ uint8_t MemoryBootLevel;
+ uint8_t MemoryVoltageChangeEnable;
+
+ uint8_t MemoryInterval;
+ uint8_t MemoryThermThrottleEnable;
+ uint16_t VddcVddciDelta;
+
+ uint16_t VoltageResponseTime;
+ uint16_t PhaseResponseTime;
+
+ uint8_t PCIeBootLinkLevel;
+ uint8_t PCIeGenInterval;
+ uint8_t DTEInterval;
+ uint8_t DTEMode;
+
+ uint8_t SVI2Enable;
+ uint8_t VRHotGpio;
+ uint8_t AcDcGpio;
+ uint8_t ThermGpio;
+
+ uint16_t PPM_PkgPwrLimit;
+ uint16_t PPM_TemperatureLimit;
+
+ uint16_t DefaultTdp;
+ uint16_t TargetTdp;
+
+ uint16_t FpsHighT;
+ uint16_t FpsLowT;
+
+ uint16_t BAPMTI_R [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
+ uint16_t BAPMTI_RC [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
+
+ uint8_t DTEAmbientTempBase;
+ uint8_t DTETjOffset;
+ uint8_t GpuTjMax;
+ uint8_t GpuTjHyst;
+
+ uint16_t BootVddc;
+ uint16_t BootVddci;
+
+ uint16_t BootMVdd;
+ uint16_t padding;
+
+ uint32_t BAPM_TEMP_GRADIENT;
+
+ uint32_t LowSclkInterruptT;
+};
+
+typedef struct SMU7_Discrete_DpmTable SMU7_Discrete_DpmTable;
+
+#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
+#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU7_MAX_LEVELS_MEMORY
+
+struct SMU7_Discrete_MCRegisterAddress
+{
+ uint16_t s0;
+ uint16_t s1;
+};
+
+typedef struct SMU7_Discrete_MCRegisterAddress SMU7_Discrete_MCRegisterAddress;
+
+struct SMU7_Discrete_MCRegisterSet
+{
+ uint32_t value[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMU7_Discrete_MCRegisterSet SMU7_Discrete_MCRegisterSet;
+
+struct SMU7_Discrete_MCRegisters
+{
+ uint8_t last;
+ uint8_t reserved[3];
+ SMU7_Discrete_MCRegisterAddress address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+ SMU7_Discrete_MCRegisterSet data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
+
+struct SMU7_Discrete_PmFuses {
+ // dw0-dw1
+ uint8_t BapmVddCVidHiSidd[8];
+
+ // dw2-dw3
+ uint8_t BapmVddCVidLoSidd[8];
+
+ // dw4-dw5
+ uint8_t VddCVid[8];
+
+ // dw6
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t SviLoadLineTrimVddC;
+ uint8_t SviLoadLineOffsetVddC;
+
+ // dw7
+ uint16_t TDC_VDDC_PkgLimit;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+
+ // dw8
+ uint8_t TdcWaterfallCtl;
+ uint8_t LPMLTemperatureMin;
+ uint8_t LPMLTemperatureMax;
+ uint8_t Reserved;
+
+ // dw9-dw10
+ uint8_t BapmVddCVidHiSidd2[8];
+
+ // dw11-dw12
+ uint32_t Reserved6[2];
+
+ // dw13-dw16
+ uint8_t GnbLPML[16];
+
+ // dw17
+ uint8_t GnbLPMLMaxVid;
+ uint8_t GnbLPMLMinVid;
+ uint8_t Reserved1[2];
+
+ // dw18
+ uint16_t BapmVddCBaseLeakageHiSidd;
+ uint16_t BapmVddCBaseLeakageLoSidd;
+};
+
+typedef struct SMU7_Discrete_PmFuses SMU7_Discrete_PmFuses;
+
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/smu7_fusion.h b/drivers/gpu/drm/radeon/smu7_fusion.h
new file mode 100644
index 000000000000..78ada9ffd508
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7_fusion.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_FUSION_H
+#define SMU7_FUSION_H
+
+#include "smu7.h"
+
+#pragma pack(push, 1)
+
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 5
+#define SMU7_DTE_SINKS 3
+#define SMU7_NUM_CPU_TES 2
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+// All 'soft registers' should be uint32_t.
+struct SMU7_SoftRegisters
+{
+ uint32_t RefClockFrequency;
+ uint32_t PmTimerP;
+ uint32_t FeatureEnables;
+ uint32_t HandshakeDisables;
+
+ uint8_t DisplayPhy1Config;
+ uint8_t DisplayPhy2Config;
+ uint8_t DisplayPhy3Config;
+ uint8_t DisplayPhy4Config;
+
+ uint8_t DisplayPhy5Config;
+ uint8_t DisplayPhy6Config;
+ uint8_t DisplayPhy7Config;
+ uint8_t DisplayPhy8Config;
+
+ uint32_t AverageGraphicsA;
+ uint32_t AverageMemoryA;
+ uint32_t AverageGioA;
+
+ uint8_t SClkDpmEnabledLevels;
+ uint8_t MClkDpmEnabledLevels;
+ uint8_t LClkDpmEnabledLevels;
+ uint8_t PCIeDpmEnabledLevels;
+
+ uint8_t UVDDpmEnabledLevels;
+ uint8_t SAMUDpmEnabledLevels;
+ uint8_t ACPDpmEnabledLevels;
+ uint8_t VCEDpmEnabledLevels;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+ uint32_t UlvEnterC;
+ uint32_t UlvTime;
+ uint32_t Reserved[3];
+
+};
+
+typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
+
+struct SMU7_Fusion_GraphicsLevel
+{
+ uint32_t MinVddNb;
+
+ uint32_t SclkFrequency;
+
+ uint8_t Vid;
+ uint8_t VidOffset;
+ uint16_t AT;
+
+ uint8_t PowerThrottle;
+ uint8_t GnbSlow;
+ uint8_t ForceNbPs1;
+ uint8_t SclkDid;
+
+ uint8_t DisplayWatermark;
+ uint8_t EnabledForActivity;
+ uint8_t EnabledForThrottle;
+ uint8_t UpH;
+
+ uint8_t DownH;
+ uint8_t VoltageDownH;
+ uint8_t DeepSleepDivId;
+
+ uint8_t ClkBypassCntl;
+
+ uint32_t reserved;
+};
+
+typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel;
+
+struct SMU7_Fusion_GIOLevel
+{
+ uint8_t EnabledForActivity;
+ uint8_t LclkDid;
+ uint8_t Vid;
+ uint8_t VoltageDownH;
+
+ uint32_t MinVddNb;
+
+ uint16_t ResidencyCounter;
+ uint8_t UpH;
+ uint8_t DownH;
+
+ uint32_t LclkFrequency;
+
+ uint8_t ActivityLevel;
+ uint8_t EnabledForThrottle;
+
+ uint8_t ClkBypassCntl;
+
+ uint8_t padding;
+};
+
+typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel;
+
+// UVD VCLK/DCLK state (level) definition.
+struct SMU7_Fusion_UvdLevel
+{
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint16_t MinVddNb;
+ uint8_t VclkDivider;
+ uint8_t DclkDivider;
+
+ uint8_t VClkBypassCntl;
+ uint8_t DClkBypassCntl;
+
+ uint8_t padding[2];
+
+};
+
+typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel;
+
+// Clocks for other external blocks (VCE, ACP, SAMU).
+struct SMU7_Fusion_ExtClkLevel
+{
+ uint32_t Frequency;
+ uint16_t MinVoltage;
+ uint8_t Divider;
+ uint8_t ClkBypassCntl;
+
+ uint32_t Reserved;
+};
+typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel;
+
+struct SMU7_Fusion_ACPILevel
+{
+ uint32_t Flags;
+ uint32_t MinVddNb;
+ uint32_t SclkFrequency;
+ uint8_t SclkDid;
+ uint8_t GnbSlow;
+ uint8_t ForceNbPs1;
+ uint8_t DisplayWatermark;
+ uint8_t DeepSleepDivId;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel;
+
+struct SMU7_Fusion_NbDpm
+{
+ uint8_t DpmXNbPsHi;
+ uint8_t DpmXNbPsLo;
+ uint8_t Dpm0PgNbPsHi;
+ uint8_t Dpm0PgNbPsLo;
+ uint8_t EnablePsi1;
+ uint8_t SkipDPM0;
+ uint8_t SkipPG;
+ uint8_t Hysteresis;
+ uint8_t EnableDpmPstatePoll;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm;
+
+struct SMU7_Fusion_StateInfo
+{
+ uint32_t SclkFrequency;
+ uint32_t LclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t SamclkFrequency;
+ uint32_t AclkFrequency;
+ uint32_t EclkFrequency;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+ int8_t SclkIndex;
+ int8_t MclkIndex;
+};
+
+typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo;
+
+struct SMU7_Fusion_DpmTable
+{
+ uint32_t SystemFlags;
+
+ SMU7_PIDController GraphicsPIDController;
+ SMU7_PIDController GioPIDController;
+
+ uint8_t GraphicsDpmLevelCount;
+ uint8_t GIOLevelCount;
+ uint8_t UvdLevelCount;
+ uint8_t VceLevelCount;
+
+ uint8_t AcpLevelCount;
+ uint8_t SamuLevelCount;
+ uint16_t FpsHighT;
+
+ SMU7_Fusion_GraphicsLevel GraphicsLevel [SMU__NUM_SCLK_DPM_STATE];
+ SMU7_Fusion_ACPILevel ACPILevel;
+ SMU7_Fusion_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
+ SMU7_Fusion_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
+ SMU7_Fusion_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
+ SMU7_Fusion_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
+
+ uint8_t UvdBootLevel;
+ uint8_t VceBootLevel;
+ uint8_t AcpBootLevel;
+ uint8_t SamuBootLevel;
+ uint8_t UVDInterval;
+ uint8_t VCEInterval;
+ uint8_t ACPInterval;
+ uint8_t SAMUInterval;
+
+ uint8_t GraphicsBootLevel;
+ uint8_t GraphicsInterval;
+ uint8_t GraphicsThermThrottleEnable;
+ uint8_t GraphicsVoltageChangeEnable;
+
+ uint8_t GraphicsClkSlowEnable;
+ uint8_t GraphicsClkSlowDivider;
+ uint16_t FpsLowT;
+
+ uint32_t DisplayCac;
+ uint32_t LowSclkInterruptT;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+
+};
+
+struct SMU7_Fusion_GIODpmTable
+{
+
+ SMU7_Fusion_GIOLevel GIOLevel [SMU7_MAX_LEVELS_GIO];
+
+ SMU7_PIDController GioPIDController;
+
+ uint32_t GIOLevelCount;
+
+ uint8_t Enable;
+ uint8_t GIOVoltageChangeEnable;
+ uint8_t GIOBootLevel;
+ uint8_t padding;
+ uint8_t padding1[2];
+ uint8_t TargetState;
+ uint8_t CurrenttState;
+ uint8_t ThrottleOnHtc;
+ uint8_t ThermThrottleStatus;
+ uint8_t ThermThrottleTempSelect;
+ uint8_t ThermThrottleEnable;
+ uint16_t TemperatureLimitHigh;
+ uint16_t TemperatureLimitLow;
+
+};
+
+typedef struct SMU7_Fusion_DpmTable SMU7_Fusion_DpmTable;
+typedef struct SMU7_Fusion_GIODpmTable SMU7_Fusion_GIODpmTable;
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index c0a850319908..864761c0120e 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1483,6 +1483,7 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -1496,12 +1497,15 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
}
rdev->pm.dpm.ps[i].ps_priv = ps;
k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = power_state->v2.clockInfoIndex[j];
+ clock_array_index = idx[j];
if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
break;
+
clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
sumo_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], k,
clock_info);
@@ -1530,6 +1534,20 @@ u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
}
+u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ u32 vid_7bit)
+{
+ u32 i;
+
+ for (i = 0; i < vid_mapping_table->num_entries; i++) {
+ if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
+ return vid_mapping_table->entries[i].vid_2bit;
+ }
+
+ return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
+}
+
static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev,
u32 vid_2bit)
{
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.h b/drivers/gpu/drm/radeon/sumo_dpm.h
index 07dda299c784..db1ea32a907b 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.h
+++ b/drivers/gpu/drm/radeon/sumo_dpm.h
@@ -202,6 +202,9 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
struct sumo_vid_mapping_table *vid_mapping_table,
u32 vid_2bit);
+u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ u32 vid_7bit);
u32 sumo_get_sleep_divider_from_id(u32 id);
u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
u32 sclk,
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index a1eb5f59939f..b07b7b8f1aff 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1675,6 +1675,7 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -1688,14 +1689,16 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
}
rdev->pm.dpm.ps[i].ps_priv = ps;
k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = power_state->v2.clockInfoIndex[j];
+ clock_array_index = idx[j];
if (clock_array_index >= clock_info_array->ucNumEntries)
continue;
if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
break;
clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
trinity_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], k,
clock_info);
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
new file mode 100644
index 000000000000..7266805d9786
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+
+/**
+ * uvd_v1_0_get_rptr - get read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ return RREG32(UVD_RBC_RB_RPTR);
+}
+
+/**
+ * uvd_v1_0_get_wptr - get write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ return RREG32(UVD_RBC_RB_WPTR);
+}
+
+/**
+ * uvd_v1_0_set_wptr - set write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+void uvd_v1_0_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ WREG32(UVD_RBC_RB_WPTR, ring->wptr);
+}
+
+/**
+ * uvd_v1_0_init - start and test UVD block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+int uvd_v1_0_init(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ uint32_t tmp;
+ int r;
+
+ /* raise clocks while booting up the VCPU */
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
+
+ r = uvd_v1_0_start(rdev);
+ if (r)
+ goto done;
+
+ ring->ready = true;
+ r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ goto done;
+ }
+
+ r = radeon_ring_lock(rdev, ring, 10);
+ if (r) {
+ DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
+ goto done;
+ }
+
+ tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ /* Clear timeout status bits */
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
+ radeon_ring_write(ring, 0x8);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
+ radeon_ring_write(ring, 3);
+
+ radeon_ring_unlock_commit(rdev, ring);
+
+done:
+ /* lower clocks again */
+ radeon_set_uvd_clocks(rdev, 0, 0);
+
+ if (!r)
+ DRM_INFO("UVD initialized successfully.\n");
+
+ return r;
+}
+
+/**
+ * uvd_v1_0_fini - stop the hardware block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the UVD block, mark ring as not ready any more
+ */
+void uvd_v1_0_fini(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+
+ uvd_v1_0_stop(rdev);
+ ring->ready = false;
+}
+
+/**
+ * uvd_v1_0_start - start UVD block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup and start the UVD block
+ */
+int uvd_v1_0_start(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ uint32_t rb_bufsz;
+ int i, j, r;
+
+ /* disable byte swapping */
+ u32 lmi_swap_cntl = 0;
+ u32 mp_swap_cntl = 0;
+
+ /* disable clock gating */
+ WREG32(UVD_CGC_GATE, 0);
+
+ /* disable interupt */
+ WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
+ /* put LMI, VCPU, RBC etc... into reset */
+ WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
+ LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
+ CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
+ mdelay(5);
+
+ /* take UVD block out of reset */
+ WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
+ mdelay(5);
+
+ /* initialize UVD memory controller */
+ WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
+ (1 << 21) | (1 << 9) | (1 << 20));
+
+#ifdef __BIG_ENDIAN
+ /* swap (8 in 32) RB and IB */
+ lmi_swap_cntl = 0xa;
+ mp_swap_cntl = 0;
+#endif
+ WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+ WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
+
+ WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
+ WREG32(UVD_MPC_SET_MUXA1, 0x0);
+ WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
+ WREG32(UVD_MPC_SET_MUXB1, 0x0);
+ WREG32(UVD_MPC_SET_ALU, 0);
+ WREG32(UVD_MPC_SET_MUX, 0x88);
+
+ /* take all subblocks out of reset, except VCPU */
+ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+ mdelay(5);
+
+ /* enable VCPU clock */
+ WREG32(UVD_VCPU_CNTL, 1 << 9);
+
+ /* enable UMC */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+
+ /* boot up the VCPU */
+ WREG32(UVD_SOFT_RESET, 0);
+ mdelay(10);
+
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
+ for (i = 0; i < 10; ++i) {
+ uint32_t status;
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(UVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ }
+ r = 0;
+ if (status & 2)
+ break;
+
+ DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
+ mdelay(10);
+ WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
+ mdelay(10);
+ r = -1;
+ }
+
+ if (r) {
+ DRM_ERROR("UVD not responding, giving up!!!\n");
+ return r;
+ }
+
+ /* enable interupt */
+ WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
+
+ /* force RBC into idle state */
+ WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Set the write pointer delay */
+ WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
+
+ /* programm the 4GB memory segment for rptr and ring buffer */
+ WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
+ (0x7 << 16) | (0x1 << 31));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(UVD_RBC_RB_RPTR, 0x0);
+
+ ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
+ WREG32(UVD_RBC_RB_WPTR, ring->wptr);
+
+ /* set the ring address */
+ WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
+
+ /* Set ring buffer size */
+ rb_bufsz = order_base_2(ring->ring_size);
+ rb_bufsz = (0x1 << 8) | rb_bufsz;
+ WREG32_P(UVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
+
+ return 0;
+}
+
+/**
+ * uvd_v1_0_stop - stop UVD block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * stop the UVD block
+ */
+void uvd_v1_0_stop(struct radeon_device *rdev)
+{
+ /* force RBC into idle state */
+ WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
+ /* put VCPU into reset */
+ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+ mdelay(5);
+
+ /* disable VCPU clock */
+ WREG32(UVD_VCPU_CNTL, 0x0);
+
+ /* Unstall UMC and register bus */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+}
+
+/**
+ * uvd_v1_0_ring_test - register write test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Test if we can successfully write to the context register
+ */
+int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
+ r = radeon_ring_lock(rdev, ring, 3);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(UVD_CONTEXT_ID);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/**
+ * uvd_v1_0_semaphore_emit - emit semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, emit_wait ? 1 : 0);
+}
+
+/**
+ * uvd_v1_0_ib_execute - execute indirect buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer
+ */
+void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
+ radeon_ring_write(ring, ib->gpu_addr);
+ radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
+ radeon_ring_write(ring, ib->length_dw);
+}
+
+/**
+ * uvd_v1_0_ib_test - test ib execution
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Test if we can successfully execute an IB
+ */
+int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_fence *fence = NULL;
+ int r;
+
+ r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+ if (r) {
+ DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+ if (r) {
+ DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
+ if (r) {
+ DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ goto error;
+ }
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+error:
+ radeon_fence_unref(&fence);
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
new file mode 100644
index 000000000000..b19ef4951085
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rv770d.h"
+
+/**
+ * uvd_v2_2_fence_emit - emit an fence & trap command
+ *
+ * @rdev: radeon_device pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+void uvd_v2_2_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 2);
+ return;
+}
+
+/**
+ * uvd_v2_2_resume - memory controller programming
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Let the UVD memory controller know it's offsets
+ */
+int uvd_v2_2_resume(struct radeon_device *rdev)
+{
+ uint64_t addr;
+ uint32_t chip_id, size;
+ int r;
+
+ r = radeon_uvd_resume(rdev);
+ if (r)
+ return r;
+
+ /* programm the VCPU memory controller bits 0-27 */
+ addr = rdev->uvd.gpu_addr >> 3;
+ size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = RADEON_UVD_STACK_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = RADEON_UVD_HEAP_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+ /* bits 28-31 */
+ addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+ WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+ /* bits 32-39 */
+ addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+ WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+ /* tell firmware which hardware it is running on */
+ switch (rdev->family) {
+ default:
+ return -EINVAL;
+ case CHIP_RV710:
+ chip_id = 0x01000005;
+ break;
+ case CHIP_RV730:
+ chip_id = 0x01000006;
+ break;
+ case CHIP_RV740:
+ chip_id = 0x01000007;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ chip_id = 0x01000008;
+ break;
+ case CHIP_JUNIPER:
+ chip_id = 0x01000009;
+ break;
+ case CHIP_REDWOOD:
+ chip_id = 0x0100000a;
+ break;
+ case CHIP_CEDAR:
+ chip_id = 0x0100000b;
+ break;
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
+ chip_id = 0x0100000c;
+ break;
+ case CHIP_PALM:
+ chip_id = 0x0100000e;
+ break;
+ case CHIP_CAYMAN:
+ chip_id = 0x0100000f;
+ break;
+ case CHIP_BARTS:
+ chip_id = 0x01000010;
+ break;
+ case CHIP_TURKS:
+ chip_id = 0x01000011;
+ break;
+ case CHIP_CAICOS:
+ chip_id = 0x01000012;
+ break;
+ case CHIP_TAHITI:
+ chip_id = 0x01000014;
+ break;
+ case CHIP_VERDE:
+ chip_id = 0x01000015;
+ break;
+ case CHIP_PITCAIRN:
+ chip_id = 0x01000016;
+ break;
+ case CHIP_ARUBA:
+ chip_id = 0x01000017;
+ break;
+ }
+ WREG32(UVD_VCPU_CHIP_ID, chip_id);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c
new file mode 100644
index 000000000000..5b6fa1f62d4e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v3_1.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "nid.h"
+
+/**
+ * uvd_v3_1_semaphore_emit - emit semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+}
diff --git a/drivers/gpu/drm/radeon/uvd_v4_2.c b/drivers/gpu/drm/radeon/uvd_v4_2.c
new file mode 100644
index 000000000000..d04d5073eef2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v4_2.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "cikd.h"
+
+/**
+ * uvd_v4_2_resume - memory controller programming
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Let the UVD memory controller know it's offsets
+ */
+int uvd_v4_2_resume(struct radeon_device *rdev)
+{
+ uint64_t addr;
+ uint32_t size;
+
+ /* programm the VCPU memory controller bits 0-27 */
+ addr = rdev->uvd.gpu_addr >> 3;
+ size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = RADEON_UVD_STACK_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = RADEON_UVD_HEAP_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+ /* bits 28-31 */
+ addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+ WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+ /* bits 32-39 */
+ addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+ WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 3751730764a5..1a0bf07fe54b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -29,7 +29,9 @@
#include <drm/drmP.h>
#include <drm/ttm/ttm_bo_driver.h>
-#define VMW_PPN_SIZE sizeof(unsigned long)
+#define VMW_PPN_SIZE (sizeof(unsigned long))
+/* A future safe maximum remap size. */
+#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
struct page *pages[],
@@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
{
SVGAFifoCmdDefineGMR2 define_cmd;
SVGAFifoCmdRemapGMR2 remap_cmd;
- uint32_t define_size = sizeof(define_cmd) + 4;
- uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
uint32_t *cmd;
uint32_t *cmd_orig;
+ uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
+ uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
+ uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
+ uint32_t remap_pos = 0;
+ uint32_t cmd_size = define_size + remap_size;
uint32_t i;
- cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
+ cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
define_cmd.gmrId = gmr_id;
define_cmd.numPages = num_pages;
+ *cmd++ = SVGA_CMD_DEFINE_GMR2;
+ memcpy(cmd, &define_cmd, sizeof(define_cmd));
+ cmd += sizeof(define_cmd) / sizeof(*cmd);
+
+ /*
+ * Need to split the command if there are too many
+ * pages that goes into the gmr.
+ */
+
remap_cmd.gmrId = gmr_id;
remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
- remap_cmd.offsetPages = 0;
- remap_cmd.numPages = num_pages;
- *cmd++ = SVGA_CMD_DEFINE_GMR2;
- memcpy(cmd, &define_cmd, sizeof(define_cmd));
- cmd += sizeof(define_cmd) / sizeof(uint32);
+ while (num_pages > 0) {
+ unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
+
+ remap_cmd.offsetPages = remap_pos;
+ remap_cmd.numPages = nr;
- *cmd++ = SVGA_CMD_REMAP_GMR2;
- memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
- cmd += sizeof(remap_cmd) / sizeof(uint32);
+ *cmd++ = SVGA_CMD_REMAP_GMR2;
+ memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
+ cmd += sizeof(remap_cmd) / sizeof(*cmd);
- for (i = 0; i < num_pages; ++i) {
- if (VMW_PPN_SIZE <= 4)
- *cmd = page_to_pfn(*pages++);
- else
- *((uint64_t *)cmd) = page_to_pfn(*pages++);
+ for (i = 0; i < nr; ++i) {
+ if (VMW_PPN_SIZE <= 4)
+ *cmd = page_to_pfn(*pages++);
+ else
+ *((uint64_t *)cmd) = page_to_pfn(*pages++);
- cmd += VMW_PPN_SIZE / sizeof(*cmd);
+ cmd += VMW_PPN_SIZE / sizeof(*cmd);
+ }
+
+ num_pages -= nr;
+ remap_pos += nr;
}
- vmw_fifo_commit(dev_priv, define_size + remap_size);
+ BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
+
+ vmw_fifo_commit(dev_priv, cmd_size);
return 0;
}
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 5207591a598c..cd33084c7860 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -192,6 +192,7 @@ static struct hid_ll_driver logi_dj_ll_driver;
static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
size_t count,
unsigned char report_type);
+static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
@@ -232,6 +233,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
SPFUNCTION_DEVICE_LIST_EMPTY) {
dbg_hid("%s: device list is empty\n", __func__);
+ djrcv_dev->querying_devices = false;
return;
}
@@ -242,6 +244,12 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
return;
}
+ if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
+ /* The device is already known. No need to reallocate it. */
+ dbg_hid("%s: device is already known\n", __func__);
+ return;
+ }
+
dj_hiddev = hid_allocate_device();
if (IS_ERR(dj_hiddev)) {
dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
@@ -305,6 +313,7 @@ static void delayedwork_callback(struct work_struct *work)
struct dj_report dj_report;
unsigned long flags;
int count;
+ int retval;
dbg_hid("%s\n", __func__);
@@ -337,6 +346,25 @@ static void delayedwork_callback(struct work_struct *work)
logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
break;
default:
+ /* A normal report (i. e. not belonging to a pair/unpair notification)
+ * arriving here, means that the report arrived but we did not have a
+ * paired dj_device associated to the report's device_index, this
+ * means that the original "device paired" notification corresponding
+ * to this dj_device never arrived to this driver. The reason is that
+ * hid-core discards all packets coming from a device while probe() is
+ * executing. */
+ if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) {
+ /* ok, we don't know the device, just re-ask the
+ * receiver for the list of connected devices. */
+ retval = logi_dj_recv_query_paired_devices(djrcv_dev);
+ if (!retval) {
+ /* everything went fine, so just leave */
+ break;
+ }
+ dev_err(&djrcv_dev->hdev->dev,
+ "%s:logi_dj_recv_query_paired_devices "
+ "error:%d\n", __func__, retval);
+ }
dbg_hid("%s: unexpected report type\n", __func__);
}
}
@@ -367,6 +395,12 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
if (!djdev) {
dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
" is NULL, index %d\n", dj_report->device_index);
+ kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
+
+ if (schedule_work(&djrcv_dev->work) == 0) {
+ dbg_hid("%s: did not schedule the work item, was already "
+ "queued\n", __func__);
+ }
return;
}
@@ -397,6 +431,12 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
if (dj_device == NULL) {
dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
" is NULL, index %d\n", dj_report->device_index);
+ kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
+
+ if (schedule_work(&djrcv_dev->work) == 0) {
+ dbg_hid("%s: did not schedule the work item, was already "
+ "queued\n", __func__);
+ }
return;
}
@@ -444,6 +484,10 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
struct dj_report *dj_report;
int retval;
+ /* no need to protect djrcv_dev->querying_devices */
+ if (djrcv_dev->querying_devices)
+ return 0;
+
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
return -ENOMEM;
@@ -455,6 +499,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
return retval;
}
+
static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
unsigned timeout)
{
diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
index fd28a5e0ca3b..4a4000340ce1 100644
--- a/drivers/hid/hid-logitech-dj.h
+++ b/drivers/hid/hid-logitech-dj.h
@@ -101,6 +101,7 @@ struct dj_receiver_dev {
struct work_struct work;
struct kfifo notif_fifo;
spinlock_t lock;
+ bool querying_devices;
};
struct dj_device {
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index ecbc74923d06..87fbe2924cfa 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -369,7 +369,8 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
if (sc->quirks & PS3REMOTE)
return ps3remote_mapping(hdev, hi, field, usage, bit, max);
- return -1;
+ /* Let hid-core decide for the others */
+ return 0;
}
/*
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index a7451632ceb4..6f1feb2c2e97 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -518,7 +518,6 @@ int hidraw_connect(struct hid_device *hid)
goto out;
}
- mutex_unlock(&minors_lock);
init_waitqueue_head(&dev->wait);
INIT_LIST_HEAD(&dev->list);
@@ -528,6 +527,7 @@ int hidraw_connect(struct hid_device *hid)
dev->exist = 1;
hid->hidraw = dev;
+ mutex_unlock(&minors_lock);
out:
return result;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 0f34bca9f5e5..6099f50b28aa 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg,
u16 value)
{
return i2c_smbus_write_byte_data(client, reg, value & 0xFF)
- && i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
+ || i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
}
static void adt7470_init_client(struct i2c_client *client)
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 328fb0353c17..a41b5f3fc506 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -605,12 +605,12 @@ static int max6697_init_chip(struct i2c_client *client)
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY,
- pdata->ideality_mask >> 1);
+ pdata->ideality_value);
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client,
MAX6581_REG_IDEALITY_SELECT,
- pdata->ideality_value);
+ pdata->ideality_mask >> 1);
if (ret < 0)
return ret;
}
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index ccec916bc3eb..af8f65fb1c05 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -246,9 +246,9 @@ static void kempld_i2c_device_init(struct kempld_i2c_data *i2c)
bus_frequency = KEMPLD_I2C_FREQ_MAX;
if (pld->info.spec_major == 1)
- prescale = pld->pld_clock / bus_frequency * 5 - 1000;
+ prescale = pld->pld_clock / (bus_frequency * 5) - 1000;
else
- prescale = pld->pld_clock / bus_frequency * 4 - 3000;
+ prescale = pld->pld_clock / (bus_frequency * 4) - 3000;
if (prescale < 0)
prescale = 0;
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index df8ff5aea5b5..e2e9a0dade96 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -493,7 +493,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
* based on this empirical measurement and a lot of previous frobbing.
*/
i2c->cmd_err = 0;
- if (msg->len < 8) {
+ if (0) { /* disable PIO mode until a proper fix is made */
ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
if (ret)
mxs_i2c_reset(i2c);
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 0ad208a69c29..3ceac3e91dde 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -60,7 +60,6 @@ static void tiadc_step_config(struct tiadc_device *adc_dev)
{
unsigned int stepconfig;
int i, steps;
- u32 step_en;
/*
* There are 16 configurable steps and 8 analog input
@@ -86,8 +85,7 @@ static void tiadc_step_config(struct tiadc_device *adc_dev)
adc_dev->channel_step[i] = steps;
steps++;
}
- step_en = get_adc_step_mask(adc_dev);
- am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
+
}
static const char * const chan_name_ain[] = {
@@ -142,10 +140,22 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
- int i;
- unsigned int fifo1count, read;
+ int i, map_val;
+ unsigned int fifo1count, read, stepid;
u32 step = UINT_MAX;
bool found = false;
+ u32 step_en;
+ unsigned long timeout = jiffies + usecs_to_jiffies
+ (IDLE_TIMEOUT * adc_dev->channels);
+ step_en = get_adc_step_mask(adc_dev);
+ am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
+
+ /* Wait for ADC sequencer to complete sampling */
+ while (tiadc_readl(adc_dev, REG_ADCFSM) & SEQ_STATUS) {
+ if (time_after(jiffies, timeout))
+ return -EAGAIN;
+ }
+ map_val = chan->channel + TOTAL_CHANNELS;
/*
* When the sub-system is first enabled,
@@ -170,12 +180,16 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
for (i = 0; i < fifo1count; i++) {
read = tiadc_readl(adc_dev, REG_FIFO1);
- if (read >> 16 == step) {
- *val = read & 0xfff;
+ stepid = read & FIFOREAD_CHNLID_MASK;
+ stepid = stepid >> 0x10;
+
+ if (stepid == map_val) {
+ read = read & FIFOREAD_DATA_MASK;
found = true;
+ *val = read;
}
}
- am335x_tsc_se_update(adc_dev->mfd_tscadc);
+
if (found == false)
return -EBUSY;
return IIO_VAL_INT;
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index ea8a4146620d..0dd9bb873130 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -127,12 +127,17 @@ static struct iio_trigger *iio_trigger_find_by_name(const char *name,
void iio_trigger_poll(struct iio_trigger *trig, s64 time)
{
int i;
- if (!trig->use_count)
- for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
- if (trig->subirqs[i].enabled) {
- trig->use_count++;
+
+ if (!atomic_read(&trig->use_count)) {
+ atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+
+ for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+ if (trig->subirqs[i].enabled)
generic_handle_irq(trig->subirq_base + i);
- }
+ else
+ iio_trigger_notify_done(trig);
+ }
+ }
}
EXPORT_SYMBOL(iio_trigger_poll);
@@ -146,19 +151,24 @@ EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
{
int i;
- if (!trig->use_count)
- for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
- if (trig->subirqs[i].enabled) {
- trig->use_count++;
+
+ if (!atomic_read(&trig->use_count)) {
+ atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+
+ for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+ if (trig->subirqs[i].enabled)
handle_nested_irq(trig->subirq_base + i);
- }
+ else
+ iio_trigger_notify_done(trig);
+ }
+ }
}
EXPORT_SYMBOL(iio_trigger_poll_chained);
void iio_trigger_notify_done(struct iio_trigger *trig)
{
- trig->use_count--;
- if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
+ if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
+ trig->ops->try_reenable)
if (trig->ops->try_reenable(trig))
/* Missed an interrupt so launch new poll now */
iio_trigger_poll(trig, 0);
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 5f4749e60b04..c1cd5698b8ae 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -232,7 +232,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = adjd_s311_read_data(indio_dev, chan->address, val);
+ ret = adjd_s311_read_data(indio_dev,
+ ADJD_S311_DATA_REG(chan->address), val);
if (ret < 0)
return ret;
return IIO_VAL_INT;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f1c279fabe64..7c0f9535fb7d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -423,7 +423,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
struct sockaddr_ib *addr;
union ib_gid gid, sgid, *dgid;
u16 pkey, index;
- u8 port, p;
+ u8 p;
int i;
cma_dev = NULL;
@@ -443,7 +443,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
if (!memcmp(&gid, dgid, sizeof(gid))) {
cma_dev = cur_dev;
sgid = gid;
- port = p;
+ id_priv->id.port_num = p;
goto found;
}
@@ -451,7 +451,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
dgid->global.subnet_prefix)) {
cma_dev = cur_dev;
sgid = gid;
- port = p;
+ id_priv->id.port_num = p;
}
}
}
@@ -462,7 +462,6 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
found:
cma_attach_to_dev(id_priv, cma_dev);
- id_priv->id.port_num = port;
addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
memcpy(&addr->sib_addr, &sgid, sizeof sgid);
cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
@@ -880,7 +879,8 @@ static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
{
struct cma_hdr *hdr;
- if (listen_id->route.addr.src_addr.ss_family == AF_IB) {
+ if ((listen_id->route.addr.src_addr.ss_family == AF_IB) &&
+ (ib_event->event == IB_CM_REQ_RECEIVED)) {
cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
return 0;
}
@@ -2677,29 +2677,32 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
{
struct ib_cm_sidr_req_param req;
struct ib_cm_id *id;
+ void *private_data;
int offset, ret;
+ memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
req.private_data_len = offset + conn_param->private_data_len;
if (req.private_data_len < conn_param->private_data_len)
return -EINVAL;
if (req.private_data_len) {
- req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
- if (!req.private_data)
+ private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
+ if (!private_data)
return -ENOMEM;
} else {
- req.private_data = NULL;
+ private_data = NULL;
}
if (conn_param->private_data && conn_param->private_data_len)
- memcpy((void *) req.private_data + offset,
- conn_param->private_data, conn_param->private_data_len);
+ memcpy(private_data + offset, conn_param->private_data,
+ conn_param->private_data_len);
- if (req.private_data) {
- ret = cma_format_hdr((void *) req.private_data, id_priv);
+ if (private_data) {
+ ret = cma_format_hdr(private_data, id_priv);
if (ret)
goto out;
+ req.private_data = private_data;
}
id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
@@ -2721,7 +2724,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
id_priv->cm_id.ib = NULL;
}
out:
- kfree(req.private_data);
+ kfree(private_data);
return ret;
}
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index dc3fd1e8af07..4c837e66516b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2663,6 +2663,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
int ret, i;
struct ib_qp_attr *attr;
struct ib_qp *qp;
+ u16 pkey_index;
attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr) {
@@ -2670,6 +2671,11 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
return -ENOMEM;
}
+ ret = ib_find_pkey(port_priv->device, port_priv->port_num,
+ IB_DEFAULT_PKEY_FULL, &pkey_index);
+ if (ret)
+ pkey_index = 0;
+
for (i = 0; i < IB_MAD_QPS_CORE; i++) {
qp = port_priv->qp_info[i].qp;
if (!qp)
@@ -2680,7 +2686,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
* one is needed for the Reset to Init transition
*/
attr->qp_state = IB_QPS_INIT;
- attr->pkey_index = 0;
+ attr->pkey_index = pkey_index;
attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
ret = ib_modify_qp(qp, attr, IB_QP_STATE |
IB_QP_PKEY_INDEX | IB_QP_QKEY);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index e87f2201b220..d2283837d451 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -226,6 +226,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
sizeof(struct t3_cqe));
uresp.memsize = mm->len;
+ uresp.reserved = 0;
resplen = sizeof uresp;
}
if (ib_copy_to_udata(udata, &uresp, resplen)) {
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 232040447e8a..a4975e1654a6 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1657,6 +1657,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
if (mm5) {
uresp.ma_sync_key = ucontext->key;
ucontext->key += PAGE_SIZE;
+ } else {
+ uresp.ma_sync_key = 0;
}
uresp.sq_key = ucontext->key;
ucontext->key += PAGE_SIZE;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 4d599cedbb0b..f2a3f48107e7 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1511,8 +1511,14 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
memset(&attr, 0, sizeof attr);
attr.qp_state = IB_QPS_INIT;
- attr.pkey_index =
- to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
+ ret = 0;
+ if (create_tun)
+ ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
+ ctx->port, IB_DEFAULT_PKEY_FULL,
+ &attr.pkey_index);
+ if (ret || !create_tun)
+ attr.pkey_index =
+ to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
attr.qkey = IB_QP1_QKEY;
attr.port_num = ctx->port;
ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8000fff4d444..3f831de9a4d8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -619,7 +619,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.tot_uuars = req.total_num_uuars;
resp.num_ports = dev->mdev.caps.num_ports;
- err = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ err = ib_copy_to_udata(udata, &resp,
+ sizeof(resp) - sizeof(resp.reserved));
if (err)
goto out_uars;
@@ -1426,7 +1427,8 @@ static int init_one(struct pci_dev *pdev,
if (err)
goto err_eqs;
- if (ib_register_device(&dev->ib_dev, NULL))
+ err = ib_register_device(&dev->ib_dev, NULL);
+ if (err)
goto err_rsrc;
err = create_umr_res(dev);
@@ -1434,8 +1436,9 @@ static int init_one(struct pci_dev *pdev,
goto err_dev;
for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
- if (device_create_file(&dev->ib_dev.dev,
- mlx5_class_attributes[i]))
+ err = device_create_file(&dev->ib_dev.dev,
+ mlx5_class_attributes[i]);
+ if (err)
goto err_umrc;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 16ac54c9819f..045f8cdbd303 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -199,7 +199,7 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
static int sq_overhead(enum ib_qp_type qp_type)
{
- int size;
+ int size = 0;
switch (qp_type) {
case IB_QPT_XRC_INI:
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 418004c93feb..90200245c5eb 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3570,10 +3570,10 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p,"
- " Tcp state = %d, iWARP state = %d\n",
+ " Tcp state = %s, iWARP state = %s\n",
async_event_id,
le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
- tcp_state, iwarp_state);
+ nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
if (aeq_info & NES_AEQE_QP) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 8f67fe2e91e6..5b53ca5a2284 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1384,6 +1384,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
if (ibpd->uobject) {
uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index;
+ uresp.mmap_rq_db_index = 0;
uresp.actual_sq_size = sq_size;
uresp.actual_rq_size = rq_size;
uresp.qp_id = nesqp->hwqp.qp_id;
@@ -1767,7 +1768,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
resp.cq_id = nescq->hw_cq.cq_number;
resp.cq_size = nescq->hw_cq.cq_size;
resp.mmap_db_index = 0;
- if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
+ if (ib_copy_to_udata(udata, &resp, sizeof resp - sizeof resp.reserved)) {
nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
kfree(nescq);
return ERR_PTR(-EFAULT);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index a877a8ed7907..f4c587c68f64 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -29,7 +29,6 @@
#include <net/netevent.h>
#include <rdma/ib_addr.h>
-#include <rdma/ib_cache.h>
#include "ocrdma.h"
#include "ocrdma_verbs.h"
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index dcfbab177faa..f36630e4b6be 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -242,6 +242,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
memset(ctx->ah_tbl.va, 0, map_len);
ctx->ah_tbl.len = map_len;
+ memset(&resp, 0, sizeof(resp));
resp.ah_tbl_len = ctx->ah_tbl.len;
resp.ah_tbl_page = ctx->ah_tbl.pa;
@@ -253,7 +254,6 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
resp.wqe_size = dev->attr.wqe_size;
resp.rqe_size = dev->attr.rqe_size;
resp.dpp_wqe_size = dev->attr.wqe_size;
- resp.rsvd = 0;
memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
status = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -338,6 +338,7 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
struct ocrdma_alloc_pd_uresp rsp;
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
+ memset(&rsp, 0, sizeof(rsp));
rsp.id = pd->id;
rsp.dpp_enabled = pd->dpp_enabled;
db_page_addr = pd->dev->nic_info.unmapped_db +
@@ -692,6 +693,7 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
struct ocrdma_ucontext *uctx;
struct ocrdma_create_cq_uresp uresp;
+ memset(&uresp, 0, sizeof(uresp));
uresp.cq_id = cq->id;
uresp.page_size = cq->len;
uresp.num_pages = 1;
@@ -1460,6 +1462,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
int status;
struct ocrdma_create_srq_uresp uresp;
+ memset(&uresp, 0, sizeof(uresp));
uresp.rq_dbid = srq->rq.dbid;
uresp.num_rq_pages = 1;
uresp.rq_page_addr[0] = srq->rq.pa;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 21e8b09d4bf8..016e7429adf6 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -1596,6 +1596,8 @@ static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
struct qib_devdata *dd = ppd->dd;
errs &= QIB_E_P_SDMAERRS;
+ err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
+ errs, qib_7322p_error_msgs);
if (errs & QIB_E_P_SDMAUNEXPDATA)
qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index 32162d355370..9b5322d8cd5a 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -717,7 +717,7 @@ void dump_sdma_state(struct qib_pportdata *ppd)
struct qib_sdma_txreq *txp, *txpnext;
__le64 *descqp;
u64 desc[2];
- dma_addr_t addr;
+ u64 addr;
u16 gen, dwlen, dwoffset;
u16 head, tail, cnt;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 2cfa76f5d99e..196b1d13cbcb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -932,12 +932,47 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
return 0;
}
+/*
+ * Takes whatever value which is in pkey index 0 and updates priv->pkey
+ * returns 0 if the pkey value was changed.
+ */
+static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
+{
+ int result;
+ u16 prev_pkey;
+
+ prev_pkey = priv->pkey;
+ result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
+ if (result) {
+ ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
+ priv->port, result);
+ return result;
+ }
+
+ priv->pkey |= 0x8000;
+
+ if (prev_pkey != priv->pkey) {
+ ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
+ prev_pkey, priv->pkey);
+ /*
+ * Update the pkey in the broadcast address, while making sure to set
+ * the full membership bit, so that we join the right broadcast group.
+ */
+ priv->dev->broadcast[8] = priv->pkey >> 8;
+ priv->dev->broadcast[9] = priv->pkey & 0xff;
+ return 0;
+ }
+
+ return 1;
+}
+
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
enum ipoib_flush_level level)
{
struct ipoib_dev_priv *cpriv;
struct net_device *dev = priv->dev;
u16 new_index;
+ int result;
mutex_lock(&priv->vlan_mutex);
@@ -951,6 +986,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
mutex_unlock(&priv->vlan_mutex);
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
+ /* for non-child devices must check/update the pkey value here */
+ if (level == IPOIB_FLUSH_HEAVY &&
+ !test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
+ update_parent_pkey(priv);
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
return;
}
@@ -961,21 +1000,32 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
}
if (level == IPOIB_FLUSH_HEAVY) {
- if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
- clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
- ipoib_ib_dev_down(dev, 0);
- ipoib_ib_dev_stop(dev, 0);
- if (ipoib_pkey_dev_delay_open(dev))
+ /* child devices chase their origin pkey value, while non-child
+ * (parent) devices should always takes what present in pkey index 0
+ */
+ if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
+ clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+ ipoib_ib_dev_down(dev, 0);
+ ipoib_ib_dev_stop(dev, 0);
+ if (ipoib_pkey_dev_delay_open(dev))
+ return;
+ }
+ /* restart QP only if P_Key index is changed */
+ if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
+ new_index == priv->pkey_index) {
+ ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
return;
+ }
+ priv->pkey_index = new_index;
+ } else {
+ result = update_parent_pkey(priv);
+ /* restart QP only if P_Key value changed */
+ if (result) {
+ ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
+ return;
+ }
}
-
- /* restart QP only if P_Key index is changed */
- if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
- new_index == priv->pkey_index) {
- ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
- return;
- }
- priv->pkey_index = new_index;
}
if (level == IPOIB_FLUSH_LIGHT) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index b6e049a3c7a8..c6f71a88c55c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1461,7 +1461,7 @@ static ssize_t create_child(struct device *dev,
if (sscanf(buf, "%i", &pkey) != 1)
return -EINVAL;
- if (pkey < 0 || pkey > 0xffff)
+ if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
return -EINVAL;
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index 74685936c948..f81abe16cf09 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -119,6 +119,15 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
} else
child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
+ if (child_pkey == 0 || child_pkey == 0x8000)
+ return -EINVAL;
+
+ /*
+ * Set the full membership bit, so that we join the right
+ * broadcast group, etc.
+ */
+ child_pkey |= 0x8000;
+
err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD);
if (!err && data)
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 0b9a79b2f48a..82fc86a90c1a 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -439,15 +439,15 @@ static void backside_setup_pid(void)
/* Slots fan */
static const struct wf_pid_param slots_param = {
- .interval = 5,
- .history_len = 2,
- .gd = 30 << 20,
- .gp = 5 << 20,
- .gr = 0,
- .itarget = 40 << 16,
- .additive = 1,
- .min = 300,
- .max = 4000,
+ .interval = 1,
+ .history_len = 20,
+ .gd = 0,
+ .gp = 0,
+ .gr = 0x00100000,
+ .itarget = 3200000,
+ .additive = 0,
+ .min = 20,
+ .max = 100,
};
static void slots_fan_tick(void)
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index dc112a7137fe..4296155090b2 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -959,23 +959,21 @@ out:
return r;
}
-static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
+static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
{
- struct entry *e = hash_lookup(mq, oblock);
+ struct mq_policy *mq = to_mq_policy(p);
+ struct entry *e;
+
+ mutex_lock(&mq->lock);
+
+ e = hash_lookup(mq, oblock);
BUG_ON(!e || !e->in_cache);
del(mq, e);
e->in_cache = false;
push(mq, e);
-}
-static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- remove_mapping(mq, oblock);
mutex_unlock(&mq->lock);
}
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index efdc873e58d1..a9857022f71d 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -117,7 +117,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
+ int ret = -EINVAL;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
@@ -157,7 +157,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- return 0;
+ return ret;
}
static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index df4ada880e42..bd9405df1bd6 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -1987,7 +1987,7 @@ MODULE_DEVICE_TABLE(platform, coda_platform_ids);
#ifdef CONFIG_OF
static const struct of_device_id coda_dt_ids[] = {
- { .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] },
+ { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] },
{ .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] },
{ /* sentinel */ }
};
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 553d87e5ceab..fd6289d60cde 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -784,6 +784,7 @@ static int g2d_probe(struct platform_device *pdev)
}
*vfd = g2d_videodev;
vfd->lock = &dev->mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 5296385153d5..4f6dd42c9adb 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -344,7 +344,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
pix_mp->num_planes = 2;
/* Set pixelformat to the format in which MFC
outputs the decoded frame */
- pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT;
+ pix_mp->pixelformat = ctx->dst_fmt->fourcc;
pix_mp->plane_fmt[0].bytesperline = ctx->buf_width;
pix_mp->plane_fmt[0].sizeimage = ctx->luma_size;
pix_mp->plane_fmt[1].bytesperline = ctx->buf_width;
@@ -382,10 +382,16 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
mfc_err("Unsupported format for source.\n");
return -EINVAL;
}
- if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) {
- mfc_err("Not supported format.\n");
+ if (fmt->codec_mode == S5P_FIMV_CODEC_NONE) {
+ mfc_err("Unknown codec\n");
return -EINVAL;
}
+ if (!IS_MFCV6(dev)) {
+ if (fmt->fourcc == V4L2_PIX_FMT_VP8) {
+ mfc_err("Not supported format.\n");
+ return -EINVAL;
+ }
+ }
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
fmt = find_format(f, MFC_FMT_RAW);
if (!fmt) {
@@ -411,7 +417,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
int ret = 0;
- struct s5p_mfc_fmt *fmt;
struct v4l2_pix_format_mplane *pix_mp;
mfc_debug_enter();
@@ -425,54 +430,32 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
goto out;
}
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- fmt = find_format(f, MFC_FMT_RAW);
- if (!fmt) {
- mfc_err("Unsupported format for source.\n");
- return -EINVAL;
- }
- if (!IS_MFCV6(dev) && (fmt->fourcc != V4L2_PIX_FMT_NV12MT)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- } else if (IS_MFCV6(dev) &&
- (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- }
- ctx->dst_fmt = fmt;
- mfc_debug_leave();
- return ret;
- } else if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- mfc_err("Wrong type error for S_FMT : %d", f->type);
- return -EINVAL;
- }
- fmt = find_format(f, MFC_FMT_DEC);
- if (!fmt || fmt->codec_mode == S5P_MFC_CODEC_NONE) {
- mfc_err("Unknown codec\n");
- ret = -EINVAL;
+ /* dst_fmt is validated by call to vidioc_try_fmt */
+ ctx->dst_fmt = find_format(f, MFC_FMT_RAW);
+ ret = 0;
goto out;
- }
- if (fmt->type != MFC_FMT_DEC) {
- mfc_err("Wrong format selected, you should choose "
- "format for decoding\n");
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* src_fmt is validated by call to vidioc_try_fmt */
+ ctx->src_fmt = find_format(f, MFC_FMT_DEC);
+ ctx->codec_mode = ctx->src_fmt->codec_mode;
+ mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
+ pix_mp->height = 0;
+ pix_mp->width = 0;
+ if (pix_mp->plane_fmt[0].sizeimage)
+ ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
+ else
+ pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
+ DEF_CPB_SIZE;
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ ctx->state = MFCINST_INIT;
+ ret = 0;
+ goto out;
+ } else {
+ mfc_err("Wrong type error for S_FMT : %d", f->type);
ret = -EINVAL;
goto out;
}
- if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- }
- ctx->src_fmt = fmt;
- ctx->codec_mode = fmt->codec_mode;
- mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
- pix_mp->height = 0;
- pix_mp->width = 0;
- if (pix_mp->plane_fmt[0].sizeimage)
- ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
- else
- pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
- DEF_CPB_SIZE;
- pix_mp->plane_fmt[0].bytesperline = 0;
- ctx->state = MFCINST_INIT;
+
out:
mfc_debug_leave();
return ret;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 2549967b2f85..59e56f4c8ce3 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -906,6 +906,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_fmt *fmt;
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
@@ -930,6 +931,18 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
return -EINVAL;
}
+ if (!IS_MFCV6(dev)) {
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) {
+ mfc_err("Not supported format.\n");
+ return -EINVAL;
+ }
+ } else if (IS_MFCV6(dev)) {
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
+ mfc_err("Not supported format.\n");
+ return -EINVAL;
+ }
+ }
+
if (fmt->num_planes != pix_fmt_mp->num_planes) {
mfc_err("failed to try output format\n");
return -EINVAL;
@@ -947,7 +960,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
- struct s5p_mfc_fmt *fmt;
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
int ret = 0;
@@ -960,13 +972,9 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
goto out;
}
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- fmt = find_format(f, MFC_FMT_ENC);
- if (!fmt) {
- mfc_err("failed to set capture format\n");
- return -EINVAL;
- }
+ /* dst_fmt is validated by call to vidioc_try_fmt */
+ ctx->dst_fmt = find_format(f, MFC_FMT_ENC);
ctx->state = MFCINST_INIT;
- ctx->dst_fmt = fmt;
ctx->codec_mode = ctx->dst_fmt->codec_mode;
ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage;
pix_fmt_mp->plane_fmt[0].bytesperline = 0;
@@ -987,28 +995,8 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
}
mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- fmt = find_format(f, MFC_FMT_RAW);
- if (!fmt) {
- mfc_err("failed to set output format\n");
- return -EINVAL;
- }
-
- if (!IS_MFCV6(dev) &&
- (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- } else if (IS_MFCV6(dev) &&
- (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- }
-
- if (fmt->num_planes != pix_fmt_mp->num_planes) {
- mfc_err("failed to set output format\n");
- ret = -EINVAL;
- goto out;
- }
- ctx->src_fmt = fmt;
+ /* src_fmt is validated by call to vidioc_try_fmt */
+ ctx->src_fmt = find_format(f, MFC_FMT_RAW);
ctx->img_width = pix_fmt_mp->width;
ctx->img_height = pix_fmt_mp->height;
mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode);
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 4851cc2e4a4d..c4ff9739a7ae 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -726,7 +726,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
*eedata = data;
*eedata_len = len;
- dev_config = (void *)eedata;
+ dev_config = (void *)*eedata;
switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) {
case 0:
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index cb694055ba7d..6e5070774dc2 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -303,6 +303,11 @@ static int hdpvr_probe(struct usb_interface *interface,
dev->workqueue = 0;
+ /* init video transfer queues first of all */
+ /* to prevent oops in hdpvr_delete() on error paths */
+ INIT_LIST_HEAD(&dev->free_buff_list);
+ INIT_LIST_HEAD(&dev->rec_buff_list);
+
/* register v4l2_device early so it can be used for printks */
if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) {
dev_err(&interface->dev, "v4l2_device_register failed\n");
@@ -325,10 +330,6 @@ static int hdpvr_probe(struct usb_interface *interface,
if (!dev->workqueue)
goto error;
- /* init video transfer queues */
- INIT_LIST_HEAD(&dev->free_buff_list);
- INIT_LIST_HEAD(&dev->rec_buff_list);
-
dev->options = hdpvr_default_options;
if (default_video_input < HDPVR_VIDEO_INPUTS)
@@ -405,7 +406,7 @@ static int hdpvr_probe(struct usb_interface *interface,
video_nr[atomic_inc_return(&dev_nr)]);
if (retval < 0) {
v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
- goto error;
+ goto reg_fail;
}
/* let the user know what node this device is now attached to */
diff --git a/drivers/media/usb/usbtv/Kconfig b/drivers/media/usb/usbtv/Kconfig
index 8864436464bf..7c5b86006ee6 100644
--- a/drivers/media/usb/usbtv/Kconfig
+++ b/drivers/media/usb/usbtv/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_USBTV
tristate "USBTV007 video capture support"
- depends on VIDEO_DEV
+ depends on VIDEO_V4L2
select VIDEOBUF2_VMALLOC
---help---
diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c
index bf43f874685e..91650173941a 100644
--- a/drivers/media/usb/usbtv/usbtv.c
+++ b/drivers/media/usb/usbtv/usbtv.c
@@ -57,7 +57,7 @@
#define USBTV_CHUNK_SIZE 256
#define USBTV_CHUNK 240
#define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \
- / 2 / USBTV_CHUNK)
+ / 4 / USBTV_CHUNK)
/* Chunk header. */
#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \
@@ -89,6 +89,7 @@ struct usbtv {
/* Number of currently processed frame, useful find
* out when a new one begins. */
u32 frame_id;
+ int chunks_done;
int iso_size;
unsigned int sequence;
@@ -202,6 +203,26 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
return 0;
}
+/* Copy data from chunk into a frame buffer, deinterlacing the data
+ * into every second line. Unfortunately, they don't align nicely into
+ * 720 pixel lines, as the chunk is 240 words long, which is 480 pixels.
+ * Therefore, we break down the chunk into two halves before copyting,
+ * so that we can interleave a line if needed. */
+static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd)
+{
+ int half;
+
+ for (half = 0; half < 2; half++) {
+ int part_no = chunk_no * 2 + half;
+ int line = part_no / 3;
+ int part_index = (line * 2 + !odd) * 3 + (part_no % 3);
+
+ u32 *dst = &frame[part_index * USBTV_CHUNK/2];
+ memcpy(dst, src, USBTV_CHUNK/2 * sizeof(*src));
+ src += USBTV_CHUNK/2;
+ }
+}
+
/* Called for each 256-byte image chunk.
* First word identifies the chunk, followed by 240 words of image
* data and padding. */
@@ -218,17 +239,17 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
frame_id = USBTV_FRAME_ID(chunk);
odd = USBTV_ODD(chunk);
chunk_no = USBTV_CHUNK_NO(chunk);
-
- /* Deinterlace. TODO: Use interlaced frame format. */
- chunk_no = (chunk_no - chunk_no % 3) * 2 + chunk_no % 3;
- chunk_no += !odd * 3;
-
if (chunk_no >= USBTV_CHUNKS)
return;
/* Beginning of a frame. */
- if (chunk_no == 0)
+ if (chunk_no == 0) {
usbtv->frame_id = frame_id;
+ usbtv->chunks_done = 0;
+ }
+
+ if (usbtv->frame_id != frame_id)
+ return;
spin_lock_irqsave(&usbtv->buflock, flags);
if (list_empty(&usbtv->bufs)) {
@@ -241,19 +262,23 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list);
frame = vb2_plane_vaddr(&buf->vb, 0);
- /* Copy the chunk. */
- memcpy(&frame[chunk_no * USBTV_CHUNK], &chunk[1],
- USBTV_CHUNK * sizeof(chunk[1]));
+ /* Copy the chunk data. */
+ usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
+ usbtv->chunks_done++;
/* Last chunk in a frame, signalling an end */
- if (usbtv->frame_id && chunk_no == USBTV_CHUNKS-1) {
+ if (odd && chunk_no == USBTV_CHUNKS-1) {
int size = vb2_plane_size(&buf->vb, 0);
+ enum vb2_buffer_state state = usbtv->chunks_done ==
+ USBTV_CHUNKS ?
+ VB2_BUF_STATE_DONE :
+ VB2_BUF_STATE_ERROR;
buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
buf->vb.v4l2_buf.sequence = usbtv->sequence++;
v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
vb2_set_plane_payload(&buf->vb, 0, size);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&buf->vb, state);
list_del(&buf->list);
}
@@ -518,7 +543,7 @@ static int usbtv_queue_setup(struct vb2_queue *vq,
if (*nbuffers < 2)
*nbuffers = 2;
*nplanes = 1;
- sizes[0] = USBTV_CHUNK * USBTV_CHUNKS * sizeof(u32);
+ sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32);
return 0;
}
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index a746ba272f04..a956053608f9 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -1007,7 +1007,7 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
soft = &pkt.soft.rfc1201;
- lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE));
+ lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
if (pkt.hard.offset[0]) {
ofs = pkt.hard.offset[0];
length = 256 - ofs;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 07f257d44a1e..e48cb339c0c6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3714,11 +3714,17 @@ static int bond_neigh_init(struct neighbour *n)
* The bonding ndo_neigh_setup is called at init time beofre any
* slave exists. So we must declare proxy setup function which will
* be used at run time to resolve the actual slave neigh param setup.
+ *
+ * It's also called by master devices (such as vlans) to setup their
+ * underlying devices. In that case - do nothing, we're already set up from
+ * our init.
*/
static int bond_neigh_setup(struct net_device *dev,
struct neigh_parms *parms)
{
- parms->neigh_setup = bond_neigh_init;
+ /* modify only our neigh_parms */
+ if (parms->dev == dev)
+ parms->neigh_setup = bond_neigh_init;
return 0;
}
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 6aa7b3266c80..ac6177d3befc 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -412,10 +412,20 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
switch (msg->msg.hdr.cmd) {
case CMD_CAN_RX:
+ if (msg->msg.rx.net >= dev->net_count) {
+ dev_err(dev->udev->dev.parent, "format error\n");
+ break;
+ }
+
esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
break;
case CMD_CAN_TX:
+ if (msg->msg.txdone.net >= dev->net_count) {
+ dev_err(dev->udev->dev.parent, "format error\n");
+ break;
+ }
+
esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net],
msg);
break;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 25723d8ee201..925ab8ec9329 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
if ((mc->ptr + rec_len) > mc->end)
goto decode_failed;
- memcpy(cf->data, mc->ptr, rec_len);
+ memcpy(cf->data, mc->ptr, cf->can_dlc);
mc->ptr += rec_len;
}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index cbd388eea682..8becd3d838b5 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -779,6 +779,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
usb_unanchor_urb(urb);
usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf,
urb->transfer_dma);
+ usb_free_urb(urb);
break;
}
diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig
index 53ad213e865b..d8d95d4cd45a 100644
--- a/drivers/net/ethernet/allwinner/Kconfig
+++ b/drivers/net/ethernet/allwinner/Kconfig
@@ -3,19 +3,20 @@
#
config NET_VENDOR_ALLWINNER
- bool "Allwinner devices"
- default y
- depends on ARCH_SUNXI
- ---help---
- If you have a network (Ethernet) card belonging to this
- class, say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ bool "Allwinner devices"
+ default y
- Note that the answer to this question doesn't directly
- affect the kernel: saying N will just cause the configurator
- to skip all the questions about Allwinner cards. If you say Y,
- you will be asked for your specific card in the following
- questions.
+ depends on ARCH_SUNXI
+ ---help---
+ If you have a network (Ethernet) card belonging to this
+ class, say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly
+ affect the kernel: saying N will just cause the configurator
+ to skip all the questions about Allwinner cards. If you say Y,
+ you will be asked for your specific card in the following
+ questions.
if NET_VENDOR_ALLWINNER
@@ -26,6 +27,7 @@ config SUN4I_EMAC
select CRC32
select MII
select PHYLIB
+ select MDIO_SUN4I
---help---
Support for Allwinner A10 EMAC ethernet driver.
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index f1b121ee5525..55d79cb53a79 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
struct arc_emac_priv *priv = netdev_priv(ndev);
unsigned int work_done;
- for (work_done = 0; work_done <= budget; work_done++) {
+ for (work_done = 0; work_done < budget; work_done++) {
unsigned int *last_rx_bd = &priv->last_rx_bd;
struct net_device_stats *stats = &priv->stats;
struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index b2bf324631dc..0f0556526ba9 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -520,6 +520,9 @@ struct atl1c_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct napi_struct napi;
+ struct page *rx_page;
+ unsigned int rx_page_offset;
+ unsigned int rx_frag_size;
struct atl1c_hw hw;
struct atl1c_hw_stats hw_stats;
struct mii_if_info mii; /* MII interface info */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 786a87483298..a36a760ada28 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -481,10 +481,15 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
struct net_device *dev)
{
+ unsigned int head_size;
int mtu = dev->mtu;
adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
+
+ head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ adapter->rx_frag_size = roundup_pow_of_two(head_size);
}
static netdev_features_t atl1c_fix_features(struct net_device *netdev,
@@ -952,6 +957,10 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
kfree(adapter->tpd_ring[0].buffer_info);
adapter->tpd_ring[0].buffer_info = NULL;
}
+ if (adapter->rx_page) {
+ put_page(adapter->rx_page);
+ adapter->rx_page = NULL;
+ }
}
/**
@@ -1639,6 +1648,35 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
skb_checksum_none_assert(skb);
}
+static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
+{
+ struct sk_buff *skb;
+ struct page *page;
+
+ if (adapter->rx_frag_size > PAGE_SIZE)
+ return netdev_alloc_skb(adapter->netdev,
+ adapter->rx_buffer_len);
+
+ page = adapter->rx_page;
+ if (!page) {
+ adapter->rx_page = page = alloc_page(GFP_ATOMIC);
+ if (unlikely(!page))
+ return NULL;
+ adapter->rx_page_offset = 0;
+ }
+
+ skb = build_skb(page_address(page) + adapter->rx_page_offset,
+ adapter->rx_frag_size);
+ if (likely(skb)) {
+ adapter->rx_page_offset += adapter->rx_frag_size;
+ if (adapter->rx_page_offset >= PAGE_SIZE)
+ adapter->rx_page = NULL;
+ else
+ get_page(page);
+ }
+ return skb;
+}
+
static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
{
struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
@@ -1660,7 +1698,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
while (next_info->flags & ATL1C_BUFFER_FREE) {
rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
- skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len);
+ skb = atl1c_alloc_skb(adapter);
if (unlikely(!skb)) {
if (netif_msg_rx_err(adapter))
dev_warn(&pdev->dev, "alloc rx buffer failed\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index dedbd76c033e..00b88cbfde25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -486,7 +486,7 @@ struct bnx2x_fastpath {
struct napi_struct napi;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define BNX2X_FP_STATE_IDLE 0
#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
@@ -498,7 +498,7 @@ struct bnx2x_fastpath {
#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
/* protect state */
spinlock_t lock;
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
union host_hc_status_block status_blk;
/* chip independent shortcuts into sb structure */
@@ -572,7 +572,7 @@ struct bnx2x_fastpath {
#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
{
spin_lock_init(&fp->lock);
@@ -680,7 +680,7 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{
return false;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
@@ -1333,6 +1333,8 @@ enum {
BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_SP_RTNL_TX_STOP,
+ BNX2X_SP_RTNL_TX_RESUME,
};
struct bnx2x_prev_path_list {
@@ -1502,6 +1504,7 @@ struct bnx2x {
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
#define IS_VF_FLAG (1 << 22)
#define INTERRUPTS_ENABLED_FLAG (1 << 23)
+#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
@@ -1830,6 +1833,8 @@ struct bnx2x {
int fp_array_size;
u32 dump_preset_idx;
+ bool stats_started;
+ struct semaphore stats_sema;
};
/* Tx queues may be less or equal to Rx queues */
@@ -2451,4 +2456,6 @@ enum bnx2x_pci_bus_speed {
BNX2X_PCI_LINK_SPEED_5000 = 5000,
BNX2X_PCI_LINK_SPEED_8000 = 8000
};
+
+void bnx2x_set_local_cmng(struct bnx2x *bp);
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ee350bde1818..f2d1ff10054b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3117,7 +3117,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
return work_done;
}
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
int bnx2x_low_latency_recv(struct napi_struct *napi)
{
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 0c94df47e0e8..fcf2761d8828 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -30,10 +30,8 @@
#include "bnx2x_dcb.h"
/* forward declarations of dcbx related functions */
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
u32 *set_configuration_ets_pg,
u32 *pri_pg_tbl);
@@ -425,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
bnx2x_pfc_clear(bp);
}
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
{
struct bnx2x_func_state_params func_params = {NULL};
+ int rc;
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_STOP;
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
- return bnx2x_func_state_change(bp, &func_params);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to hold traffic for HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
}
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
{
struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_tx_start_params *tx_params =
&func_params.params.tx_start;
+ int rc;
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_START;
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
bnx2x_dcbx_fw_struct(bp, tx_params);
DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
- return bnx2x_func_state_change(bp, &func_params);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to resume traffic after HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
}
static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
@@ -744,7 +764,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
if (IS_MF(bp))
bnx2x_link_sync_notify(bp);
- bnx2x_dcbx_stop_hw_tx(bp);
+ set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
+
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
return;
}
@@ -753,7 +775,13 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bnx2x_pfc_set_pfc(bp);
bnx2x_dcbx_update_ets_params(bp);
- bnx2x_dcbx_resume_hw_tx(bp);
+
+ /* ets may affect cmng configuration: reinit it in hw */
+ bnx2x_set_local_cmng(bp);
+
+ set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state);
+
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
@@ -2363,21 +2391,24 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
case DCB_FEATCFG_ATTR_PG:
if (bp->dcbx_local_feat.ets.enabled)
*flags |= DCB_FEATCFG_ENABLE;
- if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
+ if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
case DCB_FEATCFG_ATTR_PFC:
if (bp->dcbx_local_feat.pfc.enabled)
*flags |= DCB_FEATCFG_ENABLE;
if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
- DCBX_LOCAL_PFC_MISMATCH))
+ DCBX_LOCAL_PFC_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
case DCB_FEATCFG_ATTR_APP:
if (bp->dcbx_local_feat.app.enabled)
*flags |= DCB_FEATCFG_ENABLE;
if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
- DCBX_LOCAL_APP_MISMATCH))
+ DCBX_LOCAL_APP_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
default:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 125bd1b6586f..804b8f64463e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -199,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
#endif /* BCM_DCBNL */
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+
#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5018e52ae2ad..32767f6aa33f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1300,6 +1300,9 @@ struct drv_func_mb {
#define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
+ #define DRV_MSG_CODE_RMMOD 0xdb000000
+ #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f
+
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1372,6 +1375,8 @@ struct drv_func_mb {
#define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
+ #define FW_MSG_CODE_RMMOD_ACK 0xdb100000
+
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e5da07858a2f..8bdc8b973007 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2261,6 +2261,23 @@ static void bnx2x_set_requested_fc(struct bnx2x *bp)
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
}
+static void bnx2x_init_dropless_fc(struct bnx2x *bp)
+{
+ u32 pause_enabled = 0;
+
+ if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_enabled = 1;
+
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
+ pause_enabled);
+ }
+
+ DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
+ pause_enabled ? "enabled" : "disabled");
+}
+
int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
{
int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
@@ -2294,6 +2311,8 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
bnx2x_release_phy_lock(bp);
+ bnx2x_init_dropless_fc(bp);
+
bnx2x_calc_fc_adv(bp);
if (bp->link_vars.link_up) {
@@ -2315,6 +2334,8 @@ void bnx2x_link_set(struct bnx2x *bp)
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
+ bnx2x_init_dropless_fc(bp);
+
bnx2x_calc_fc_adv(bp);
} else
BNX2X_ERR("Bootcode is missing - can not set link\n");
@@ -2476,7 +2497,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
input.port_rate = bp->link_vars.line_speed;
- if (cmng_type == CMNG_FNS_MINMAX) {
+ if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
int vn;
/* read mf conf from shmem */
@@ -2533,6 +2554,21 @@ static void storm_memset_cmng(struct bnx2x *bp,
}
}
+/* init cmng mode in HW according to local configuration */
+void bnx2x_set_local_cmng(struct bnx2x *bp)
+{
+ int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+ if (cmng_fns != CMNG_FNS_NONE) {
+ bnx2x_cmng_fns_init(bp, false, cmng_fns);
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+ } else {
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "single function mode without fairness\n");
+ }
+}
+
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
@@ -2541,20 +2577,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_link_update(&bp->link_params, &bp->link_vars);
- if (bp->link_vars.link_up) {
+ bnx2x_init_dropless_fc(bp);
- /* dropless flow control */
- if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
- int port = BP_PORT(bp);
- u32 pause_enabled = 0;
-
- if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
- pause_enabled = 1;
-
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
- pause_enabled);
- }
+ if (bp->link_vars.link_up) {
if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
struct host_port_stats *pstats;
@@ -2568,17 +2593,8 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
- if (bp->link_vars.link_up && bp->link_vars.line_speed) {
- int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
-
- if (cmng_fns != CMNG_FNS_NONE) {
- bnx2x_cmng_fns_init(bp, false, cmng_fns);
- storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
- } else
- /* rate shaping and fairness are disabled */
- DP(NETIF_MSG_IFUP,
- "single function mode without fairness\n");
- }
+ if (bp->link_vars.link_up && bp->link_vars.line_speed)
+ bnx2x_set_local_cmng(bp);
__bnx2x_link_report(bp);
@@ -9639,6 +9655,12 @@ sp_rtnl_not_reset:
&bp->sp_rtnl_state))
bnx2x_pf_set_vfs_vlan(bp);
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state))
+ bnx2x_dcbx_stop_hw_tx(bp);
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))
+ bnx2x_dcbx_resume_hw_tx(bp);
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@@ -10362,6 +10384,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
+ BC_SUPPORTS_RMMOD_CMD : 0;
+
boot_mode = SHMEM_RD(bp,
dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -11137,6 +11163,9 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
int tmp;
u32 cfg;
+ if (IS_VF(bp))
+ return 0;
+
if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
/* Take function: tmp = func */
tmp = BP_ABS_FUNC(bp);
@@ -11524,6 +11553,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
spin_lock_init(&bp->stats_lock);
+ sema_init(&bp->stats_sema, 1);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12026,7 +12056,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = bnx2x_low_latency_recv,
#endif
};
@@ -12817,13 +12847,17 @@ static void __bnx2x_remove(struct pci_dev *pdev,
bnx2x_dcbnl_update_applist(bp, true);
#endif
+ if (IS_PF(bp) &&
+ !BP_NOMCP(bp) &&
+ (bp->flags & BC_SUPPORTS_RMMOD_CMD))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
+
/* Close the interface - either directly or implicitly */
if (remove_netdev) {
unregister_netdev(dev);
} else {
rtnl_lock();
- if (netif_running(dev))
- bnx2x_close(dev);
+ dev_close(dev);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 95861efb5051..ad83f4b48777 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1747,11 +1747,8 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
void bnx2x_iov_init_dmae(struct bnx2x *bp)
{
- DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
- if (!IS_SRIOV(bp))
- return;
-
- REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
+ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
+ REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
}
static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
@@ -3084,8 +3081,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
pci_disable_sriov(bp->pdev);
}
-static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
- struct bnx2x_virtf *vf)
+static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin)
{
if (bp->state != BNX2X_STATE_OPEN) {
BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3103,12 +3101,22 @@ static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
return -EINVAL;
}
- if (!vf) {
+ /* init members */
+ *vf = BP_VF(bp, vfidx);
+ *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ if (!*vf) {
BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
vfidx);
return -EINVAL;
}
+ if (!*bulletin) {
+ BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -3116,17 +3124,19 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
struct ifla_vf_info *ivi)
{
struct bnx2x *bp = netdev_priv(dev);
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
- struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x_vlan_mac_obj *mac_obj;
+ struct bnx2x_vlan_mac_obj *vlan_obj;
int rc;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
- if (!mac_obj || !vlan_obj || !bulletin) {
+ mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+ vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
+ if (!mac_obj || !vlan_obj) {
BNX2X_ERR("VF partially initialized\n");
return -EINVAL;
}
@@ -3183,11 +3193,11 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
{
struct bnx2x *bp = netdev_priv(dev);
int rc, q_logical_state;
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
if (!is_valid_ether_addr(mac)) {
@@ -3249,11 +3259,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
{
struct bnx2x *bp = netdev_priv(dev);
int rc, q_logical_state;
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
@@ -3463,7 +3473,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
alloc_mem_err:
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
sizeof(struct bnx2x_vf_mbx_msg));
- BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
sizeof(union pf_vf_bulletin));
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 98366abd02bd..d63d1327b051 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
* Statistics service functions
*/
-static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
{
struct dmae_command *dmae;
u32 opcode;
@@ -518,7 +519,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
*stats_comp = 0;
}
-static void bnx2x_stats_start(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_start(struct bnx2x *bp)
{
/* vfs travel through here as part of the statistics FSM, but no action
* is required
@@ -534,13 +536,34 @@ static void bnx2x_stats_start(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
+
+ bp->stats_started = true;
+}
+
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_stats_pmf_start(struct bnx2x *bp)
{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
- bnx2x_stats_pmf_update(bp);
- bnx2x_stats_start(bp);
+ __bnx2x_stats_pmf_update(bp);
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
+}
+
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+ __bnx2x_stats_pmf_update(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -550,8 +573,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
*/
if (IS_VF(bp))
return;
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
- bnx2x_stats_start(bp);
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -888,9 +914,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
/* Make sure we use the value of the counter
* used for sending the last stats ramrod.
*/
- spin_lock_bh(&bp->stats_lock);
cur_stats_counter = bp->stats_counter - 1;
- spin_unlock_bh(&bp->stats_lock);
/* are storm stats valid? */
if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
@@ -1227,12 +1251,18 @@ static void bnx2x_stats_update(struct bnx2x *bp)
{
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
- if (bnx2x_edebug_stats_stopped(bp))
+ /* we run update from timer context, so give up
+ * if somebody is in the middle of transition
+ */
+ if (down_trylock(&bp->stats_sema))
return;
+ if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
+ goto out;
+
if (IS_PF(bp)) {
if (*stats_comp != DMAE_COMP_VAL)
- return;
+ goto out;
if (bp->port.pmf)
bnx2x_hw_stats_update(bp);
@@ -1242,7 +1272,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
BNX2X_ERR("storm stats were not updated for 3 times\n");
bnx2x_panic();
}
- return;
+ goto out;
}
} else {
/* vf doesn't collect HW statistics, and doesn't get completions
@@ -1256,7 +1286,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
/* vf is done */
if (IS_VF(bp))
- return;
+ goto out;
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1267,6 +1297,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
+
+out:
+ up(&bp->stats_sema);
}
static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1332,6 +1365,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
{
int update = 0;
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+
+ bp->stats_started = false;
+
bnx2x_stats_comp(bp);
if (bp->port.pmf)
@@ -1348,6 +1386,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_stats_comp(bp);
}
+
+ up(&bp->stats_sema);
}
static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1376,15 +1416,17 @@ static const struct {
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
{
enum bnx2x_stats_state state;
+ void (*action)(struct bnx2x *bp);
if (unlikely(bp->panic))
return;
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+ action = bnx2x_stats_stm[state][event].action;
spin_unlock_bh(&bp->stats_lock);
- bnx2x_stats_stm[state][event].action(bp);
+ action(bp);
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d964f302ac94..0da2214ef1b9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -17625,7 +17625,8 @@ err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
- pci_disable_device(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
}
@@ -17773,7 +17774,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
- if (!netif_running(netdev))
+ /* We probably don't have netdev yet */
+ if (!netdev || !netif_running(netdev))
goto done;
tg3_phy_stop(tp);
@@ -17794,8 +17796,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
done:
if (state == pci_channel_io_perm_failure) {
- tg3_napi_enable(tp);
- dev_close(netdev);
+ if (netdev) {
+ tg3_napi_enable(tp);
+ dev_close(netdev);
+ }
err = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_disable_device(pdev);
@@ -17825,7 +17829,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
rtnl_lock();
if (pci_enable_device(pdev)) {
- netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
goto done;
}
@@ -17833,7 +17838,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
pci_save_state(pdev);
- if (!netif_running(netdev)) {
+ if (!netdev || !netif_running(netdev)) {
rc = PCI_ERS_RESULT_RECOVERED;
goto done;
}
@@ -17845,7 +17850,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
rc = PCI_ERS_RESULT_RECOVERED;
done:
- if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) {
+ if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
tg3_napi_enable(tp);
dev_close(netdev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 687ec4a8bb48..9c89dc8fe105 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
q->pg_chunk.offset = 0;
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
0, q->alloc_size, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
- __free_pages(q->pg_chunk.page, order);
- q->pg_chunk.page = NULL;
- return -EIO;
- }
q->pg_chunk.mapping = mapping;
}
sd->pg_chunk = q->pg_chunk;
@@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
return flits_to_desc(flits);
}
-
-/* map_skb - map a packet main body and its page fragments
- * @pdev: the PCI device
- * @skb: the packet
- * @addr: placeholder to save the mapped addresses
- *
- * map the main body of an sk_buff and its page fragments, if any.
- */
-static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
- dma_addr_t *addr)
-{
- const skb_frag_t *fp, *end;
- const struct skb_shared_info *si;
-
- *addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, *addr))
- goto out_err;
-
- si = skb_shinfo(skb);
- end = &si->frags[si->nr_frags];
-
- for (fp = si->frags; fp < end; fp++) {
- *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
- DMA_TO_DEVICE);
- if (pci_dma_mapping_error(pdev, *addr))
- goto unwind;
- }
- return 0;
-
-unwind:
- while (fp-- > si->frags)
- dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
- DMA_TO_DEVICE);
-
- pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
-out_err:
- return -ENOMEM;
-}
-
/**
- * write_sgl - populate a scatter/gather list for a packet
+ * make_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @sgp: the SGL to populate
* @start: start address of skb main body data to include in the SGL
* @len: length of skb main body data to include in the SGL
- * @addr: the list of the mapped addresses
+ * @pdev: the PCI device
*
- * Copies the scatter/gather list for the buffers that make up a packet
+ * Generates a scatter/gather list for the buffers that make up a packet
* and returns the SGL size in 8-byte words. The caller must size the SGL
* appropriately.
*/
-static inline unsigned int write_sgl(const struct sk_buff *skb,
+static inline unsigned int make_sgl(const struct sk_buff *skb,
struct sg_ent *sgp, unsigned char *start,
- unsigned int len, const dma_addr_t *addr)
+ unsigned int len, struct pci_dev *pdev)
{
- unsigned int i, j = 0, k = 0, nfrags;
+ dma_addr_t mapping;
+ unsigned int i, j = 0, nfrags;
if (len) {
+ mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
sgp->len[0] = cpu_to_be32(len);
- sgp->addr[j++] = cpu_to_be64(addr[k++]);
+ sgp->addr[0] = cpu_to_be64(mapping);
+ j = 1;
}
nfrags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
- sgp->addr[j] = cpu_to_be64(addr[k++]);
+ sgp->addr[j] = cpu_to_be64(mapping);
j ^= 1;
if (j == 0)
++sgp;
@@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
const struct port_info *pi,
unsigned int pidx, unsigned int gen,
struct sge_txq *q, unsigned int ndesc,
- unsigned int compl, const dma_addr_t *addr)
+ unsigned int compl)
{
unsigned int flits, sgl_flits, cntrl, tso_info;
struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
}
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
+ sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct netdev_queue *txq;
struct sge_qset *qs;
struct sge_txq *q;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
/*
* The chip min packet length is 9 octets but play safe and reject
@@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
q->in_use += ndesc;
if (unlikely(credits - ndesc < q->stop_thres)) {
t3_stop_tx_queue(txq, qs, q);
@@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(!skb_shared(skb)))
skb_orphan(skb);
- write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
+ write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
check_ring_tx_db(adap, q);
return NETDEV_TX_OK;
}
@@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
*/
static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
struct sge_txq *q, unsigned int pidx,
- unsigned int gen, unsigned int ndesc,
- const dma_addr_t *addr)
+ unsigned int gen, unsigned int ndesc)
{
unsigned int sgl_flits, flits;
struct work_request_hdr *from;
@@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
flits = skb_transport_offset(skb) / 8;
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
- skb_tail_pointer(skb) -
- skb_transport_header(skb), addr);
+ sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+ skb->tail - skb->transport_header,
+ adap->pdev);
if (need_skb_unmap()) {
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
skb->destructor = deferred_unmap_destructor;
@@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
goto again;
}
- if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
- spin_unlock(&q->lock);
- return NET_XMIT_SUCCESS;
- }
-
gen = q->gen;
q->in_use += ndesc;
pidx = q->pidx;
@@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
}
spin_unlock(&q->lock);
- write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
check_ring_tx_db(adap, q);
return NET_XMIT_SUCCESS;
}
@@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data)
struct sge_txq *q = &qs->txq[TXQ_OFLD];
const struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
- unsigned int written = 0;
spin_lock(&q->lock);
again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
break;
}
- if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
- break;
-
gen = q->gen;
q->in_use += ndesc;
pidx = q->pidx;
q->pidx += ndesc;
- written += ndesc;
if (q->pidx >= q->size) {
q->pidx -= q->size;
q->gen ^= 1;
@@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
__skb_unlink(skb, &q->sendq);
spin_unlock(&q->lock);
- write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
- (dma_addr_t *)skb->head);
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
spin_lock(&q->lock);
}
spin_unlock(&q->lock);
@@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
set_bit(TXQ_LAST_PKT_DB, &q->flags);
#endif
wmb();
- if (likely(written))
- t3_write_reg(adap, A_SG_KDOORBELL,
- F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
/**
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 6e6e0a117ee2..8ec5d74ad44d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -3048,6 +3048,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
adapter->max_event_queues = le16_to_cpu(desc->eq_count);
adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
+
+ /* Clear flags that driver is not interested in */
+ adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT;
}
err:
mutex_unlock(&adapter->mbox_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 5228d88c5a02..1b3b9e886412 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -563,6 +563,12 @@ enum be_if_flags {
BE_IF_FLAGS_MULTICAST = 0x1000
};
+#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
+ BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
+ BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
+ BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
+ BE_IF_FLAGS_UNTAGGED)
+
/* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */
struct be_cmd_req_if_create {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 181edb522450..4559c35eea13 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2563,8 +2563,8 @@ static int be_close(struct net_device *netdev)
/* Wait for all pending tx completions to arrive so that
* all tx skbs are freed.
*/
- be_tx_compl_clean(adapter);
netif_tx_disable(netdev);
+ be_tx_compl_clean(adapter);
be_rx_qs_destroy(adapter);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 2b0a0ea4f8e7..ae236009f1a8 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -259,6 +259,7 @@ struct bufdesc_ex {
struct fec_enet_delayed_work {
struct delayed_work delay_work;
bool timeout;
+ bool trig_tx;
};
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d3ad5ea711d3..77ea0db0bbfc 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -93,6 +93,20 @@ static void set_multicast_list(struct net_device *ndev);
#define FEC_QUIRK_HAS_CSUM (1 << 5)
/* Controller has hardware vlan support */
#define FEC_QUIRK_HAS_VLAN (1 << 6)
+/* ENET IP errata ERR006358
+ *
+ * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
+ * detected as not set during a prior frame transmission, then the
+ * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
+ * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
+ * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
+ * detected as not set during a prior frame transmission, then the
+ * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
+ * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
+ * frames not being transmitted until there is a 0-to-1 transition on
+ * ENET_TDAR[TDAR].
+ */
+#define FEC_QUIRK_ERR006358 (1 << 7)
static struct platform_device_id fec_devtype[] = {
{
@@ -112,7 +126,7 @@ static struct platform_device_id fec_devtype[] = {
.name = "imx6q-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN,
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
}, {
.name = "mvf600-fec",
.driver_data = FEC_QUIRK_ENET_MAC,
@@ -275,16 +289,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
- struct bufdesc *bdp;
+ struct bufdesc *bdp, *bdp_pre;
void *bufaddr;
unsigned short status;
unsigned int index;
- if (!fep->link) {
- /* Link is down or auto-negotiation is in progress. */
- return NETDEV_TX_BUSY;
- }
-
/* Fill in a Tx ring entry */
bdp = fep->cur_tx;
@@ -370,6 +379,15 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ebdp->cbd_esc |= BD_ENET_TX_PINS;
}
}
+
+ bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
+ !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
+ fep->delay_work.trig_tx = true;
+ schedule_delayed_work(&(fep->delay_work.delay_work),
+ msecs_to_jiffies(1));
+ }
+
/* If this was the last BD in the ring, start at the beginning again. */
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
@@ -689,6 +707,11 @@ static void fec_enet_work(struct work_struct *work)
fec_restart(fep->netdev, fep->full_duplex);
netif_wake_queue(fep->netdev);
}
+
+ if (fep->delay_work.trig_tx) {
+ fep->delay_work.trig_tx = false;
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+ }
}
static void
@@ -2279,4 +2302,5 @@ static struct platform_driver fec_driver = {
module_platform_driver(fec_driver);
+MODULE_ALIAS("platform:"DRIVER_NAME);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 6a0c1b66ce54..c1d72c03cb59 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3739,9 +3739,8 @@ static void igb_set_rx_mode(struct net_device *netdev)
rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
if (netdev->flags & IFF_PROMISC) {
- u32 mrqc = rd32(E1000_MRQC);
/* retain VLAN HW filtering if in VT mode */
- if (mrqc & E1000_MRQC_ENABLE_VMDQ)
+ if (adapter->vfs_allocated_count)
rctl |= E1000_RCTL_VFE;
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 7be725cdfea8..a6494e5daffe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -54,7 +54,7 @@
#include <net/busy_poll.h>
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
#define LL_EXTENDED_STATS
#endif
/* common prefix used by pr_<> macros */
@@ -366,7 +366,7 @@ struct ixgbe_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define IXGBE_QV_STATE_IDLE 0
#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
@@ -377,12 +377,12 @@ struct ixgbe_q_vector {
#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
spinlock_t lock;
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/* for dynamic allocation of rings associated with this q_vector */
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
@@ -462,7 +462,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
return q_vector->state & IXGBE_QV_USER_PEND;
}
-#else /* CONFIG_NET_LL_RX_POLL */
+#else /* CONFIG_NET_RX_BUSY_POLL */
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
}
@@ -491,7 +491,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
{
return false;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_IXGBE_HWMON
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index ac780770863d..7a77f37a7cbc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -108,9 +108,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/* Enable arbiter */
reg &= ~IXGBE_DPMCS_ARBDIS;
- /* Enable DFP and Recycle mode */
- reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
reg |= IXGBE_DPMCS_TSOEF;
+
/* Configure Max TSO packet size 34KB including payload and headers */
reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bad8f14b1941..be4b1fb3d0d2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1998,7 +1998,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
return total_rx_packets;
}
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
static int ixgbe_low_latency_recv(struct napi_struct *napi)
{
@@ -2030,7 +2030,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
return found;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/**
* ixgbe_configure_msix - Configure MSI-X hardware
@@ -7227,7 +7227,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = ixgbe_low_latency_recv,
#endif
#ifdef IXGBE_FCOE
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 712779fb12b7..b017818bccae 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -88,6 +88,8 @@
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
+#define MVNETA_SGMII_SERDES_CFG 0x24A0
+#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
@@ -655,6 +657,8 @@ static void mvneta_port_sgmii_config(struct mvneta_port *pp)
val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
val |= MVNETA_GMAC2_PSC_ENABLE;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+
+ mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
}
/* Start the Ethernet port RX and TX activity */
@@ -2728,28 +2732,24 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
- pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
- init_timer(&pp->tx_done_timer);
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
-
pp->weight = MVNETA_RX_POLL_WEIGHT;
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
- pp->base = of_iomap(dn, 0);
- if (pp->base == NULL) {
- err = -ENOMEM;
- goto err_free_irq;
- }
-
pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) {
err = PTR_ERR(pp->clk);
- goto err_unmap;
+ goto err_free_irq;
}
clk_prepare_enable(pp->clk);
+ pp->base = of_iomap(dn, 0);
+ if (pp->base == NULL) {
+ err = -ENOMEM;
+ goto err_clk;
+ }
+
dt_mac_addr = of_get_mac_address(dn);
if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
mac_from = "device tree";
@@ -2766,6 +2766,9 @@ static int mvneta_probe(struct platform_device *pdev)
}
pp->tx_done_timer.data = (unsigned long)dev;
+ pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
+ init_timer(&pp->tx_done_timer);
+ clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
pp->tx_ring_size = MVNETA_MAX_TXD;
pp->rx_ring_size = MVNETA_MAX_RXD;
@@ -2776,7 +2779,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = mvneta_init(pp, phy_addr);
if (err < 0) {
dev_err(&pdev->dev, "can't init eth hal\n");
- goto err_clk;
+ goto err_unmap;
}
mvneta_port_power_up(pp, phy_mode);
@@ -2806,10 +2809,10 @@ static int mvneta_probe(struct platform_device *pdev)
err_deinit:
mvneta_deinit(pp);
-err_clk:
- clk_disable_unprepare(pp->clk);
err_unmap:
iounmap(pp->base);
+err_clk:
+ clk_disable_unprepare(pp->clk);
err_free_irq:
irq_dispose_mapping(dev->irq);
err_free_netdev:
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c896079728e1..ef94a591f9e5 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
}
/* Allocate and setup a new buffer for receiving */
-static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
- struct sk_buff *skb, unsigned int bufsize)
+static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
+ struct sk_buff *skb, unsigned int bufsize)
{
struct skge_rx_desc *rd = e->desc;
- u64 map;
+ dma_addr_t map;
map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
PCI_DMA_FROMDEVICE);
- rd->dma_lo = map;
- rd->dma_hi = map >> 32;
+ if (pci_dma_mapping_error(skge->hw->pdev, map))
+ return -1;
+
+ rd->dma_lo = lower_32_bits(map);
+ rd->dma_hi = upper_32_bits(map);
e->skb = skb;
rd->csum1_start = ETH_HLEN;
rd->csum2_start = ETH_HLEN;
@@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, bufsize);
+ return 0;
}
/* Resume receiving using existing skb,
@@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev)
return -ENOMEM;
skb_reserve(skb, NET_IP_ALIGN);
- skge_rx_setup(skge, e, skb, skge->rx_buf_size);
+ if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
} while ((e = e->next) != ring->start);
ring->to_clean = ring->start;
@@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev)
BUG_ON(skge->dma & 7);
- if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
+ if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) {
dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
err = -EINVAL;
goto free_pci_mem;
@@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
struct skge_tx_desc *td;
int i;
u32 control, len;
- u64 map;
+ dma_addr_t map;
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
@@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
e->skb = skb;
len = skb_headlen(skb);
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(hw->pdev, map))
+ goto mapping_error;
+
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, len);
- td->dma_lo = map;
- td->dma_hi = map >> 32;
+ td->dma_lo = lower_32_bits(map);
+ td->dma_hi = upper_32_bits(map);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
const int offset = skb_checksum_start_offset(skb);
@@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
+ if (dma_mapping_error(&hw->pdev->dev, map))
+ goto mapping_unwind;
e = e->next;
e->skb = skb;
tf = e->desc;
BUG_ON(tf->control & BMU_OWN);
- tf->dma_lo = map;
- tf->dma_hi = (u64) map >> 32;
+ tf->dma_lo = lower_32_bits(map);
+ tf->dma_hi = upper_32_bits(map);
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, skb_frag_size(frag));
@@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
}
return NETDEV_TX_OK;
+
+mapping_unwind:
+ e = skge->tx_ring.to_use;
+ pci_unmap_single(hw->pdev,
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
+ while (i-- > 0) {
+ e = e->next;
+ pci_unmap_page(hw->pdev,
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
+ }
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
@@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
pci_dma_sync_single_for_cpu(skge->hw->pdev,
dma_unmap_addr(e, mapaddr),
- len, PCI_DMA_FROMDEVICE);
+ dma_unmap_len(e, maplen),
+ PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(e->skb, skb->data, len);
pci_dma_sync_single_for_device(skge->hw->pdev,
dma_unmap_addr(e, mapaddr),
- len, PCI_DMA_FROMDEVICE);
+ dma_unmap_len(e, maplen),
+ PCI_DMA_FROMDEVICE);
skge_rx_reuse(e, skge->rx_buf_size);
} else {
struct sk_buff *nskb;
@@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
if (!nskb)
goto resubmit;
+ if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
+ dev_kfree_skb(nskb);
+ goto resubmit;
+ }
+
pci_unmap_single(skge->hw->pdev,
dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
skb = e->skb;
prefetch(skb->data);
- skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
}
skb_put(skb, len);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 727874f575ce..a28cd801a236 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -223,7 +223,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_STATS:
return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
(priv->tx_ring_num * 2) +
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
(priv->rx_ring_num * 5);
#else
(priv->rx_ring_num * 2);
@@ -276,7 +276,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i].packets;
data[index++] = priv->rx_ring[i].bytes;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
data[index++] = priv->rx_ring[i].yields;
data[index++] = priv->rx_ring[i].misses;
data[index++] = priv->rx_ring[i].cleaned;
@@ -344,7 +344,7 @@ static void mlx4_en_get_strings(struct net_device *dev,
"rx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i);
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_napi_yield", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5eac871399d8..fa37b7a61213 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -68,7 +68,7 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
return 0;
}
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
static int mlx4_en_low_latency_recv(struct napi_struct *napi)
{
@@ -94,7 +94,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
return done;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_RFS_ACCEL
@@ -2140,7 +2140,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = mlx4_en_low_latency_recv,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 8873d6802c80..6fc6dabc78d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -845,16 +845,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
MLX4_CMD_NATIVE);
if (!err && dev->caps.function != slave) {
- /* if config MAC in DB use it */
- if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
- def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
- else {
- /* set slave default_mac address */
- MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
- def_mac += slave << 8;
- priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac;
- }
-
+ def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
/* get port type - currently only eth is enabled */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index e85af922dcdc..36be3208786a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -371,7 +371,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
- if (!enable_64b_cqe_eqe) {
+ if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
if (dev_cap->flags &
(MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 35fb60e2320c..5e0aa569306a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -292,7 +292,7 @@ struct mlx4_en_rx_ring {
void *rx_info;
unsigned long bytes;
unsigned long packets;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned long yields;
unsigned long misses;
unsigned long cleaned;
@@ -318,7 +318,7 @@ struct mlx4_en_cq {
struct mlx4_cqe *buf;
#define MLX4_EN_OPCODE_ERROR 0x1e
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define MLX4_EN_CQ_STATE_IDLE 0
#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
@@ -329,7 +329,7 @@ struct mlx4_en_cq {
#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
spinlock_t poll_lock; /* protects from LLS/napi conflicts */
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
};
struct mlx4_en_port_profile {
@@ -580,7 +580,7 @@ struct mlx4_mac_entry {
struct rcu_head rcu;
};
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
{
spin_lock_init(&cq->poll_lock);
@@ -687,7 +687,7 @@ static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
{
return false;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 205753a04cfc..5472cbd34028 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -46,7 +46,7 @@
#include "mlx5_core.h"
enum {
- CMD_IF_REV = 3,
+ CMD_IF_REV = 5,
};
enum {
@@ -282,6 +282,12 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_TEARDOWN_HCA:
return "TEARDOWN_HCA";
+ case MLX5_CMD_OP_ENABLE_HCA:
+ return "MLX5_CMD_OP_ENABLE_HCA";
+
+ case MLX5_CMD_OP_DISABLE_HCA:
+ return "MLX5_CMD_OP_DISABLE_HCA";
+
case MLX5_CMD_OP_QUERY_PAGES:
return "QUERY_PAGES";
@@ -1113,7 +1119,13 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
for (i = 0; i < (1 << cmd->log_sz); i++) {
if (test_bit(i, &vector)) {
+ struct semaphore *sem;
+
ent = cmd->ent_arr[i];
+ if (ent->page_queue)
+ sem = &cmd->pages_sem;
+ else
+ sem = &cmd->sem;
ktime_get_ts(&ent->ts2);
memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
dump_command(dev, ent, 0);
@@ -1136,10 +1148,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
} else {
complete(&ent->done);
}
- if (ent->page_queue)
- up(&cmd->pages_sem);
- else
- up(&cmd->sem);
+ up(sem);
}
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index c02cbcfd0fb8..443cc4d7b024 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
case MLX5_EVENT_TYPE_PAGE_REQUEST:
{
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
- s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
+ s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
mlx5_core_req_pages_handler(dev, func_id, npages);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 72a5222447f5..f012658b6a92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
caps->log_max_mcg = out->hca_cap.log_max_mcg;
- caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg);
+ caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 748f10a155c4..3e6670c4a7cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -55,33 +55,9 @@ enum {
};
static DEFINE_SPINLOCK(health_lock);
-
static LIST_HEAD(health_list);
static struct work_struct health_work;
-static health_handler_t reg_handler;
-int mlx5_register_health_report_handler(health_handler_t handler)
-{
- spin_lock_irq(&health_lock);
- if (reg_handler) {
- spin_unlock_irq(&health_lock);
- return -EEXIST;
- }
- reg_handler = handler;
- spin_unlock_irq(&health_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(mlx5_register_health_report_handler);
-
-void mlx5_unregister_health_report_handler(void)
-{
- spin_lock_irq(&health_lock);
- reg_handler = NULL;
- spin_unlock_irq(&health_lock);
-}
-EXPORT_SYMBOL(mlx5_unregister_health_report_handler);
-
static void health_care(struct work_struct *work)
{
struct mlx5_core_health *health, *n;
@@ -98,11 +74,8 @@ static void health_care(struct work_struct *work)
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_core_warn(dev, "handling bad device here\n");
+ /* nothing yet */
spin_lock_irq(&health_lock);
- if (reg_handler)
- reg_handler(dev->pdev, health->health,
- sizeof(health->health));
-
list_del_init(&health->list);
spin_unlock_irq(&health_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 12242de2b0e3..b47739b0b5f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -249,6 +249,44 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
return err;
}
+static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
+{
+ int err;
+ struct mlx5_enable_hca_mbox_in in;
+ struct mlx5_enable_hca_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return 0;
+}
+
+static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
+{
+ int err;
+ struct mlx5_disable_hca_mbox_in in;
+ struct mlx5_disable_hca_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return 0;
+}
+
int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -304,28 +342,41 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
}
mlx5_pagealloc_init(dev);
+
+ err = mlx5_core_enable_hca(dev);
+ if (err) {
+ dev_err(&pdev->dev, "enable hca failed\n");
+ goto err_pagealloc_cleanup;
+ }
+
+ err = mlx5_satisfy_startup_pages(dev, 1);
+ if (err) {
+ dev_err(&pdev->dev, "failed to allocate boot pages\n");
+ goto err_disable_hca;
+ }
+
err = set_hca_ctrl(dev);
if (err) {
dev_err(&pdev->dev, "set_hca_ctrl failed\n");
- goto err_pagealloc_cleanup;
+ goto reclaim_boot_pages;
}
err = handle_hca_cap(dev);
if (err) {
dev_err(&pdev->dev, "handle_hca_cap failed\n");
- goto err_pagealloc_cleanup;
+ goto reclaim_boot_pages;
}
- err = mlx5_satisfy_startup_pages(dev);
+ err = mlx5_satisfy_startup_pages(dev, 0);
if (err) {
- dev_err(&pdev->dev, "failed to allocate startup pages\n");
- goto err_pagealloc_cleanup;
+ dev_err(&pdev->dev, "failed to allocate init pages\n");
+ goto reclaim_boot_pages;
}
err = mlx5_pagealloc_start(dev);
if (err) {
dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
- goto err_reclaim_pages;
+ goto reclaim_boot_pages;
}
err = mlx5_cmd_init_hca(dev);
@@ -396,9 +447,12 @@ err_stop_poll:
err_pagealloc_stop:
mlx5_pagealloc_stop(dev);
-err_reclaim_pages:
+reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
+err_disable_hca:
+ mlx5_core_disable_hca(dev);
+
err_pagealloc_cleanup:
mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
@@ -434,6 +488,7 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cmd_teardown_hca(dev);
mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
+ mlx5_core_disable_hca(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
iounmap(dev->iseg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index f0bf46339b28..3a2408d44820 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -43,10 +43,16 @@ enum {
MLX5_PAGES_TAKE = 2
};
+enum {
+ MLX5_BOOT_PAGES = 1,
+ MLX5_INIT_PAGES = 2,
+ MLX5_POST_INIT_PAGES = 3
+};
+
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
u32 func_id;
- s16 npages;
+ s32 npages;
struct work_struct work;
};
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox {
struct mlx5_query_pages_outbox {
struct mlx5_outbox_hdr hdr;
- u8 reserved[2];
+ __be16 rsvd;
__be16 func_id;
- __be16 init_pages;
- __be16 num_pages;
+ __be32 num_pages;
};
struct mlx5_manage_pages_inbox {
struct mlx5_inbox_hdr hdr;
- __be16 rsvd0;
+ __be16 rsvd;
__be16 func_id;
- __be16 rsvd1;
- __be16 num_entries;
- u8 rsvd2[16];
+ __be32 num_entries;
__be64 pas[0];
};
struct mlx5_manage_pages_outbox {
struct mlx5_outbox_hdr hdr;
- u8 rsvd0[2];
- __be16 num_entries;
- u8 rsvd1[20];
+ __be32 num_entries;
+ u8 rsvd[4];
__be64 pas[0];
};
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
}
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
- s16 *pages, s16 *init_pages)
+ s32 *npages, int boot)
{
struct mlx5_query_pages_inbox in;
struct mlx5_query_pages_outbox out;
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
+ in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
+
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
@@ -162,10 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
- if (pages)
- *pages = be16_to_cpu(out.num_pages);
- if (init_pages)
- *init_pages = be16_to_cpu(out.init_pages);
+ *npages = be32_to_cpu(out.num_pages);
*func_id = be16_to_cpu(out.func_id);
return err;
@@ -219,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
in->func_id = cpu_to_be16(func_id);
- in->num_entries = cpu_to_be16(npages);
+ in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
mlx5_core_dbg(dev, "err %d\n", err);
if (err) {
@@ -287,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
in.func_id = cpu_to_be16(func_id);
- in.num_entries = cpu_to_be16(npages);
+ in.num_entries = cpu_to_be32(npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err) {
@@ -301,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
goto out_free;
}
- num_claimed = be16_to_cpu(out->num_entries);
+ num_claimed = be32_to_cpu(out->num_entries);
if (nclaimed)
*nclaimed = num_claimed;
@@ -340,7 +341,7 @@ static void pages_work_handler(struct work_struct *work)
}
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s16 npages)
+ s32 npages)
{
struct mlx5_pages_req *req;
@@ -357,19 +358,20 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
queue_work(dev->priv.pg_wq, &req->work);
}
-int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev)
+int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
{
- s16 uninitialized_var(init_pages);
u16 uninitialized_var(func_id);
+ s32 uninitialized_var(npages);
int err;
- err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages);
+ err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
if (err)
return err;
- mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id);
+ mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
+ npages, boot ? "boot" : "init", func_id);
- return give_pages(dev, func_id, init_pages, 0);
+ return give_pages(dev, func_id, npages, 0);
}
static int optimal_reclaimed_pages(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 71d4a3937200..68f5d9c77c7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -164,6 +164,7 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
if (!uuari->uars[i].map) {
mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+ err = -ENOMEM;
goto out_count;
}
mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index cb22341a14a8..a588ffde9700 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -4,7 +4,7 @@
config PCH_GBE
tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
- depends on PCI
+ depends on PCI && (X86 || COMPILE_TEST)
select MII
select PTP_1588_CLOCK_PCH
---help---
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index b00cf5665eab..221645e9f182 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1400,8 +1400,8 @@ void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
#define ADDR_IN_RANGE(addr, low, high) \
(((addr) < (high)) && ((addr) >= (low)))
-#define QLCRD32(adapter, off) \
- (adapter->ahw->hw_ops->read_reg)(adapter, off)
+#define QLCRD32(adapter, off, err) \
+ (adapter->ahw->hw_ops->read_reg)(adapter, off, err)
#define QLCWR32(adapter, off, val) \
adapter->ahw->hw_ops->write_reg(adapter, off, val)
@@ -1604,7 +1604,7 @@ struct qlcnic_nic_template {
struct qlcnic_hardware_ops {
void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
- int (*read_reg) (struct qlcnic_adapter *, ulong);
+ int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
void (*get_ocm_win) (struct qlcnic_hardware_context *);
int (*get_mac_address) (struct qlcnic_adapter *, u8 *);
@@ -1662,12 +1662,6 @@ static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
}
-static inline int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter,
- ulong off)
-{
- return adapter->ahw->hw_ops->read_reg(adapter, off);
-}
-
static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
ulong off, u32 data)
{
@@ -1869,7 +1863,8 @@ static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
{
- adapter->ahw->hw_ops->set_mac_filter_count(adapter);
+ if (adapter->ahw->hw_ops->set_mac_filter_count)
+ adapter->ahw->hw_ops->set_mac_filter_count(adapter);
}
static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 0913c623a67e..9d4bb7f83904 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -228,17 +228,17 @@ static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr)
return 0;
}
-int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr)
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+ int *err)
{
- int ret;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- ret = __qlcnic_set_win_base(adapter, (u32) addr);
- if (!ret) {
+ *err = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!*err) {
return QLCRDX(ahw, QLCNIC_WILDCARD);
} else {
dev_err(&adapter->pdev->dev,
- "%s failed, addr = 0x%x\n", __func__, (int)addr);
+ "%s failed, addr = 0x%lx\n", __func__, addr);
return -EIO;
}
}
@@ -561,7 +561,7 @@ void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
loff_t offset, size_t size)
{
- int ret;
+ int ret = 0;
u32 data;
if (qlcnic_api_lock(adapter)) {
@@ -571,7 +571,7 @@ void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
return;
}
- ret = qlcnic_83xx_rd_reg_indirect(adapter, (u32) offset);
+ data = QLCRD32(adapter, (u32) offset, &ret);
qlcnic_api_unlock(adapter);
if (ret == -EIO) {
@@ -580,7 +580,6 @@ void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
__func__, (u32)offset);
return;
}
- data = ret;
memcpy(buf, &data, size);
}
@@ -2075,18 +2074,25 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
u32 data[])
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
u8 link_status, duplex;
/* link speed */
link_status = LSB(data[3]) & 1;
- adapter->ahw->link_speed = MSW(data[2]);
- adapter->ahw->link_autoneg = MSB(MSW(data[3]));
- adapter->ahw->module_type = MSB(LSW(data[3]));
- duplex = LSB(MSW(data[3]));
- if (duplex)
- adapter->ahw->link_duplex = DUPLEX_FULL;
- else
- adapter->ahw->link_duplex = DUPLEX_HALF;
- adapter->ahw->has_link_events = 1;
+ if (link_status) {
+ ahw->link_speed = MSW(data[2]);
+ duplex = LSB(MSW(data[3]));
+ if (duplex)
+ ahw->link_duplex = DUPLEX_FULL;
+ else
+ ahw->link_duplex = DUPLEX_HALF;
+ } else {
+ ahw->link_speed = SPEED_UNKNOWN;
+ ahw->link_duplex = DUPLEX_UNKNOWN;
+ }
+
+ ahw->link_autoneg = MSB(MSW(data[3]));
+ ahw->module_type = MSB(LSW(data[3]));
+ ahw->has_link_events = 1;
qlcnic_advert_link_change(adapter, link_status);
}
@@ -2384,9 +2390,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
u32 flash_addr, u8 *p_data,
int count)
{
- int i, ret;
- u32 word, range, flash_offset, addr = flash_addr;
+ u32 word, range, flash_offset, addr = flash_addr, ret;
ulong indirect_add, direct_window;
+ int i, err = 0;
flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
if (addr & 0x3) {
@@ -2404,10 +2410,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
/* Multi sector read */
for (i = 0; i < count; i++) {
indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
- ret = qlcnic_83xx_rd_reg_indirect(adapter,
- indirect_add);
- if (ret == -EIO)
- return -EIO;
+ ret = QLCRD32(adapter, indirect_add, &err);
+ if (err == -EIO)
+ return err;
word = ret;
*(u32 *)p_data = word;
@@ -2428,10 +2433,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
/* Single sector read */
for (i = 0; i < count; i++) {
indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
- ret = qlcnic_83xx_rd_reg_indirect(adapter,
- indirect_add);
- if (ret == -EIO)
- return -EIO;
+ ret = QLCRD32(adapter, indirect_add, &err);
+ if (err == -EIO)
+ return err;
word = ret;
*(u32 *)p_data = word;
@@ -2447,10 +2451,13 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
{
u32 status;
int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
+ int err = 0;
do {
- status = qlcnic_83xx_rd_reg_indirect(adapter,
- QLC_83XX_FLASH_STATUS);
+ status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err);
+ if (err == -EIO)
+ return err;
+
if ((status & QLC_83XX_FLASH_STATUS_READY) ==
QLC_83XX_FLASH_STATUS_READY)
break;
@@ -2502,7 +2509,8 @@ int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
{
- int ret, mfg_id;
+ int ret, err = 0;
+ u32 mfg_id;
if (qlcnic_83xx_lock_flash(adapter))
return -EIO;
@@ -2517,9 +2525,11 @@ int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
return -EIO;
}
- mfg_id = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
- if (mfg_id == -EIO)
- return -EIO;
+ mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+ if (err == -EIO) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return err;
+ }
adapter->flash_mfg_id = (mfg_id & 0xFF);
qlcnic_83xx_unlock_flash(adapter);
@@ -2636,7 +2646,7 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
u32 *p_data, int count)
{
u32 temp;
- int ret = -EIO;
+ int ret = -EIO, err = 0;
if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
(count > QLC_83XX_FLASH_WRITE_MAX)) {
@@ -2645,8 +2655,10 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
return -EIO;
}
- temp = qlcnic_83xx_rd_reg_indirect(adapter,
- QLC_83XX_FLASH_SPI_CONTROL);
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+ if (err == -EIO)
+ return err;
+
qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
(temp | QLC_83XX_FLASH_SPI_CTRL));
qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
@@ -2695,13 +2707,18 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
return -EIO;
}
- ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_SPI_STATUS);
+ ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err);
+ if (err == -EIO)
+ return err;
+
if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
__func__, __LINE__);
/* Operation failed, clear error bit */
- temp = qlcnic_83xx_rd_reg_indirect(adapter,
- QLC_83XX_FLASH_SPI_CONTROL);
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+ if (err == -EIO)
+ return err;
+
qlcnic_83xx_wrt_reg_indirect(adapter,
QLC_83XX_FLASH_SPI_CONTROL,
(temp | QLC_83XX_FLASH_SPI_CTRL));
@@ -2823,6 +2840,7 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
{
int i, j, ret = 0;
u32 temp;
+ int err = 0;
/* Check alignment */
if (addr & 0xF)
@@ -2855,8 +2873,12 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
QLCNIC_TA_WRITE_START);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = qlcnic_83xx_rd_reg_indirect(adapter,
- QLCNIC_MS_CTRL);
+ temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err);
+ if (err == -EIO) {
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return err;
+ }
+
if ((temp & TA_CTL_BUSY) == 0)
break;
}
@@ -2878,9 +2900,9 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
u8 *p_data, int count)
{
- int i, ret;
- u32 word, addr = flash_addr;
+ u32 word, addr = flash_addr, ret;
ulong indirect_addr;
+ int i, err = 0;
if (qlcnic_83xx_lock_flash(adapter) != 0)
return -EIO;
@@ -2900,10 +2922,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
}
indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
- ret = qlcnic_83xx_rd_reg_indirect(adapter,
- indirect_addr);
- if (ret == -EIO)
- return -EIO;
+ ret = QLCRD32(adapter, indirect_addr, &err);
+ if (err == -EIO)
+ return err;
+
word = ret;
*(u32 *)p_data = word;
p_data = p_data + 4;
@@ -3014,8 +3036,8 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
}
if (ahw->port_type == QLCNIC_XGBE) {
- ecmd->supported = SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_1000baseT_Full;
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
} else {
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
@@ -3244,6 +3266,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
u8 val;
int ret, max_sds_rings = adapter->max_sds_rings;
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev, "Device is resetting\n");
+ return -EBUSY;
+ }
+
if (qlcnic_get_diag_lock(adapter)) {
netdev_info(netdev, "Device in diagnostics mode\n");
return -EBUSY;
@@ -3369,7 +3396,8 @@ int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
{
- int ret;
+ int ret, err = 0;
+ u32 temp;
qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
QLC_83XX_FLASH_OEM_READ_SIG);
@@ -3379,8 +3407,11 @@ static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
if (ret)
return -EIO;
- ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
- return ret & 0xFF;
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+ if (err == -EIO)
+ return err;
+
+ return temp & 0xFF;
}
int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 2548d1403d75..272f56a2e14b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -508,7 +508,7 @@ void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
-int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong);
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []);
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index f41dfab1e9a3..345d987aede4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -629,7 +629,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
return -EIO;
}
- qlcnic_set_drv_version(adapter);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
qlcnic_83xx_idc_attach_driver(adapter);
return 0;
@@ -1303,8 +1304,11 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
{
int i, j;
u32 val = 0, val1 = 0, reg = 0;
+ int err = 0;
- val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG);
+ val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err);
+ if (err == -EIO)
+ return;
dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val);
for (j = 0; j < 2; j++) {
@@ -1318,7 +1322,9 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
reg = QLC_83XX_PORT1_THRESHOLD;
}
for (i = 0; i < 8; i++) {
- val = QLCRD32(adapter, reg + (i * 0x4));
+ val = QLCRD32(adapter, reg + (i * 0x4), &err);
+ if (err == -EIO)
+ return;
dev_info(&adapter->pdev->dev, "0x%x ", val);
}
dev_info(&adapter->pdev->dev, "\n");
@@ -1335,8 +1341,10 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
reg = QLC_83XX_PORT1_TC_MC_REG;
}
for (i = 0; i < 4; i++) {
- val = QLCRD32(adapter, reg + (i * 0x4));
- dev_info(&adapter->pdev->dev, "0x%x ", val);
+ val = QLCRD32(adapter, reg + (i * 0x4), &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
}
dev_info(&adapter->pdev->dev, "\n");
}
@@ -1352,17 +1360,25 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
reg = QLC_83XX_PORT1_TC_STATS;
}
for (i = 7; i >= 0; i--) {
- val = QLCRD32(adapter, reg);
+ val = QLCRD32(adapter, reg, &err);
+ if (err == -EIO)
+ return;
val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
QLCWR32(adapter, reg, (val | (i << 29)));
- val = QLCRD32(adapter, reg);
+ val = QLCRD32(adapter, reg, &err);
+ if (err == -EIO)
+ return;
dev_info(&adapter->pdev->dev, "0x%x ", val);
}
dev_info(&adapter->pdev->dev, "\n");
}
- val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD);
- val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD);
+ val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err);
+ if (err == -EIO)
+ return;
+ val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err);
+ if (err == -EIO)
+ return;
dev_info(&adapter->pdev->dev,
"IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
val, val1);
@@ -1425,7 +1441,7 @@ static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter)
static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
{
u32 heartbeat, peg_status;
- int retries, ret = -EIO;
+ int retries, ret = -EIO, err = 0;
retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev,
@@ -1453,11 +1469,11 @@ static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
"PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
"PEG_NET_4_PC: 0x%x\n", peg_status,
QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2),
- QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0),
- QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1),
- QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2),
- QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3),
- QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4));
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err));
if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
dev_err(&p_dev->pdev->dev,
@@ -1501,18 +1517,22 @@ int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr,
int duration, u32 mask, u32 status)
{
+ int timeout_error, err = 0;
u32 value;
- int timeout_error;
u8 retries;
- value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ value = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return err;
retries = duration / 10;
do {
if ((value & mask) != status) {
timeout_error = 1;
msleep(duration / 10);
- value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ value = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return err;
} else {
timeout_error = 0;
break;
@@ -1606,9 +1626,12 @@ int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev,
u32 raddr, u32 waddr)
{
- int value;
+ int err = 0;
+ u32 value;
- value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
+ value = QLCRD32(p_dev, raddr, &err);
+ if (err == -EIO)
+ return;
qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
}
@@ -1617,12 +1640,16 @@ static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev,
u32 raddr, u32 waddr,
struct qlc_83xx_rmw *p_rmw_hdr)
{
- int value;
+ int err = 0;
+ u32 value;
- if (p_rmw_hdr->index_a)
+ if (p_rmw_hdr->index_a) {
value = p_dev->ahw->reset.array[p_rmw_hdr->index_a];
- else
- value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
+ } else {
+ value = QLCRD32(p_dev, raddr, &err);
+ if (err == -EIO)
+ return;
+ }
value &= p_rmw_hdr->mask;
value <<= p_rmw_hdr->shl;
@@ -1675,7 +1702,7 @@ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
long delay;
struct qlc_83xx_entry *entry;
struct qlc_83xx_poll *poll;
- int i;
+ int i, err = 0;
unsigned long arg1, arg2;
poll = (struct qlc_83xx_poll *)((char *)p_hdr +
@@ -1699,10 +1726,12 @@ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
arg1, delay,
poll->mask,
poll->status)){
- qlcnic_83xx_rd_reg_indirect(p_dev,
- arg1);
- qlcnic_83xx_rd_reg_indirect(p_dev,
- arg2);
+ QLCRD32(p_dev, arg1, &err);
+ if (err == -EIO)
+ return;
+ QLCRD32(p_dev, arg2, &err);
+ if (err == -EIO)
+ return;
}
}
}
@@ -1768,7 +1797,7 @@ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
struct qlc_83xx_entry_hdr *p_hdr)
{
long delay;
- int index, i, j;
+ int index, i, j, err;
struct qlc_83xx_quad_entry *entry;
struct qlc_83xx_poll *poll;
unsigned long addr;
@@ -1788,7 +1817,10 @@ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
poll->mask, poll->status)){
index = p_dev->ahw->reset.array_index;
addr = entry->dr_addr;
- j = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ j = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return;
+
p_dev->ahw->reset.array[index++] = j;
if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES)
@@ -2123,6 +2155,8 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
qlcnic_83xx_clear_function_resources(adapter);
+ INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
/* register for NIC IDC AEN Events */
qlcnic_83xx_register_nic_idc_func(adapter, 1);
@@ -2140,8 +2174,6 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
if (adapter->nic_ops->init_driver(adapter))
return -EIO;
- INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
-
/* Periodically monitor device status */
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 0581a484ceb5..d09389b33474 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -104,7 +104,7 @@ static u32
qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
{
u32 rsp;
- int timeout = 0;
+ int timeout = 0, err = 0;
do {
/* give atleast 1ms for firmware to respond */
@@ -113,7 +113,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
return QLCNIC_CDRP_RSP_TIMEOUT;
- rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
+ rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err);
} while (!QLCNIC_CDRP_IS_RSP(rsp));
return rsp;
@@ -122,7 +122,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
- int i;
+ int i, err = 0;
u32 rsp;
u32 signature;
struct pci_dev *pdev = adapter->pdev;
@@ -148,7 +148,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
dev_err(&pdev->dev, "card response timeout.\n");
cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
- cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1));
+ cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
switch (cmd->rsp.arg[0]) {
case QLCNIC_RCODE_INVALID_ARGS:
fmt = "CDRP invalid args: [%d]\n";
@@ -175,7 +175,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
for (i = 1; i < cmd->rsp.num; i++)
- cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i));
+ cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err);
/* Release semaphore */
qlcnic_api_unlock(adapter);
@@ -210,10 +210,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd)
if (err) {
dev_info(&adapter->pdev->dev,
"Failed to set driver version in firmware\n");
- return -EIO;
+ err = -EIO;
}
-
- return 0;
+ qlcnic_free_mbx_args(&cmd);
+ return err;
}
int
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 700a46324d09..7aac23ab31d1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -150,6 +150,7 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
"Link_Test_on_offline",
"Interrupt_Test_offline",
"Internal_Loopback_offline",
+ "External_Loopback_offline",
"EEPROM_Test_offline"
};
@@ -266,7 +267,7 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 speed, reg;
- int check_sfp_module = 0;
+ int check_sfp_module = 0, err = 0;
u16 pcifn = ahw->pci_func;
/* read which mode */
@@ -289,7 +290,7 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
} else if (adapter->ahw->port_type == QLCNIC_XGBE) {
u32 val = 0;
- val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err);
if (val == QLCNIC_PORT_MODE_802_3_AP) {
ecmd->supported = SUPPORTED_1000baseT_Full;
@@ -300,9 +301,13 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
}
if (netif_running(adapter->netdev) && ahw->has_link_events) {
- reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
- speed = P3P_LINK_SPEED_VAL(pcifn, reg);
- ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+ if (ahw->linkup) {
+ reg = QLCRD32(adapter,
+ P3P_LINK_SPEED_REG(pcifn), &err);
+ speed = P3P_LINK_SPEED_VAL(pcifn, reg);
+ ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+ }
+
ethtool_cmd_speed_set(ecmd, ahw->link_speed);
ecmd->autoneg = ahw->link_autoneg;
ecmd->duplex = ahw->link_duplex;
@@ -463,13 +468,14 @@ static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
u32 *regs_buff)
{
- int i, j = 0;
+ int i, j = 0, err = 0;
for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
j = 0;
while (ext_diag_registers[j] != -1)
- regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++]);
+ regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++],
+ &err);
return i;
}
@@ -519,13 +525,16 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
static u32 qlcnic_test_link(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int err = 0;
u32 val;
if (qlcnic_83xx_check(adapter)) {
val = qlcnic_83xx_test_link(adapter);
return (val & 1) ? 0 : 1;
}
- val = QLCRD32(adapter, CRB_XG_STATE_P3P);
+ val = QLCRD32(adapter, CRB_XG_STATE_P3P, &err);
+ if (err == -EIO)
+ return err;
val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
return (val == XG_LINK_UP_P3P) ? 0 : 1;
}
@@ -658,6 +667,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int port = adapter->ahw->physical_port;
+ int err = 0;
__u32 val;
if (qlcnic_83xx_check(adapter)) {
@@ -668,9 +678,13 @@ qlcnic_get_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return;
/* get flow control settings */
- val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+ if (err == -EIO)
+ return;
pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
- val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return;
switch (port) {
case 0:
pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
@@ -690,7 +704,9 @@ qlcnic_get_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
return;
pause->rx_pause = 1;
- val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return;
if (port == 0)
pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
else
@@ -707,6 +723,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int port = adapter->ahw->physical_port;
+ int err = 0;
__u32 val;
if (qlcnic_83xx_check(adapter))
@@ -717,7 +734,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return -EIO;
/* set flow control */
- val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+ if (err == -EIO)
+ return err;
if (pause->rx_pause)
qlcnic_gb_rx_flowctl(val);
@@ -728,7 +747,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
val);
QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
/* set autoneg */
- val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return err;
switch (port) {
case 0:
if (pause->tx_pause)
@@ -764,7 +785,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
return -EIO;
- val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return err;
if (port == 0) {
if (pause->tx_pause)
qlcnic_xg_unset_xg0_mask(val);
@@ -788,11 +811,14 @@ static int qlcnic_reg_test(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 data_read;
+ int err = 0;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_reg_test(adapter);
- data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
+ data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0), &err);
+ if (err == -EIO)
+ return err;
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
@@ -1026,8 +1052,15 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
if (data[3])
eth_test->flags |= ETH_TEST_FL_FAILED;
- data[4] = qlcnic_eeprom_test(dev);
- if (data[4])
+ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
+ if (data[4])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ }
+
+ data[5] = qlcnic_eeprom_test(dev);
+ if (data[5])
eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
@@ -1257,17 +1290,20 @@ qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
+ int err = 0;
if (qlcnic_83xx_check(adapter))
return;
wol->supported = 0;
wol->wolopts = 0;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+ if (err == -EIO)
+ return;
if (wol_cfg & (1UL << adapter->portnum))
wol->supported |= WAKE_MAGIC;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
if (wol_cfg & (1UL << adapter->portnum))
wol->wolopts |= WAKE_MAGIC;
}
@@ -1277,17 +1313,22 @@ qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
+ int err = 0;
if (qlcnic_83xx_check(adapter))
return -EOPNOTSUPP;
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+ if (err == -EIO)
+ return err;
if (!(wol_cfg & (1 << adapter->portnum)))
return -EOPNOTSUPP;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+ if (err == -EIO)
+ return err;
if (wol->wolopts & WAKE_MAGIC)
wol_cfg |= 1UL << adapter->portnum;
else
@@ -1540,7 +1581,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
return 0;
case QLCNIC_SET_QUIESCENT:
case QLCNIC_RESET_QUIESCENT:
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
netdev_info(netdev, "Device in FAILED state\n");
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 5b5d2edf125d..4d5f59b2d153 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -317,16 +317,20 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
int
qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
{
- int done = 0, timeout = 0;
+ int timeout = 0;
+ int err = 0;
+ u32 done = 0;
while (!done) {
- done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
+ done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
+ &err);
if (done == 1)
break;
if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
dev_err(&adapter->pdev->dev,
"Failed to acquire sem=%d lock; holdby=%d\n",
- sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
+ sem,
+ id_reg ? QLCRD32(adapter, id_reg, &err) : -1);
return -EIO;
}
msleep(1);
@@ -341,19 +345,22 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
void
qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
{
- QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+ int err = 0;
+
+ QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)), &err);
}
int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
{
+ int err = 0;
u32 data;
if (qlcnic_82xx_check(adapter))
qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
else {
- data = qlcnic_83xx_rd_reg_indirect(adapter, addr);
- if (data == -EIO)
- return -EIO;
+ data = QLCRD32(adapter, addr, &err);
+ if (err == -EIO)
+ return err;
}
return data;
}
@@ -516,20 +523,18 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
if (netdev->flags & IFF_PROMISC) {
if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
mode = VPORT_MISS_MODE_ACCEPT_ALL;
- } else if (netdev->flags & IFF_ALLMULTI) {
- if (netdev_mc_count(netdev) > ahw->max_mc_count) {
- mode = VPORT_MISS_MODE_ACCEPT_MULTI;
- } else if (!netdev_mc_empty(netdev) &&
- !qlcnic_sriov_vf_check(adapter)) {
- netdev_for_each_mc_addr(ha, netdev)
- qlcnic_nic_add_mac(adapter, ha->addr,
- vlan);
- }
- if (mode != VPORT_MISS_MODE_ACCEPT_MULTI &&
- qlcnic_sriov_vf_check(adapter))
- qlcnic_vf_add_mc_list(netdev, vlan);
+ } else if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ } else if (!netdev_mc_empty(netdev) &&
+ !qlcnic_sriov_vf_check(adapter)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ qlcnic_nic_add_mac(adapter, ha->addr, vlan);
}
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_vf_add_mc_list(netdev, vlan);
+
/* configure unicast MAC address, if there is not sufficient space
* to store all the unicast addresses then enable promiscuous mode
*/
@@ -1161,7 +1166,8 @@ int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off,
return -EIO;
}
-int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off,
+ int *err)
{
unsigned long flags;
int rv;
@@ -1417,7 +1423,7 @@ int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
{
- int offset, board_type, magic;
+ int offset, board_type, magic, err = 0;
struct pci_dev *pdev = adapter->pdev;
offset = QLCNIC_FW_MAGIC_OFFSET;
@@ -1437,7 +1443,9 @@ int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
adapter->ahw->board_type = board_type;
if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
- u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
+ u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I, &err);
+ if (err == -EIO)
+ return err;
if ((gpio & 0x8000) == 0)
board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
}
@@ -1477,10 +1485,13 @@ int
qlcnic_wol_supported(struct qlcnic_adapter *adapter)
{
u32 wol_cfg;
+ int err = 0;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
if (wol_cfg & (1UL << adapter->portnum)) {
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+ if (err == -EIO)
+ return err;
if (wol_cfg & (1 << adapter->portnum))
return 1;
}
@@ -1541,6 +1552,7 @@ void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
loff_t offset, size_t size)
{
+ int err = 0;
u32 data;
u64 qmdata;
@@ -1548,7 +1560,7 @@ void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
memcpy(buf, &qmdata, size);
} else {
- data = QLCRD32(adapter, offset);
+ data = QLCRD32(adapter, offset, &err);
memcpy(buf, &data, size);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 2c22504f57aa..4a71b28effcb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -154,7 +154,7 @@ struct qlcnic_hardware_context;
struct qlcnic_adapter;
int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
-int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong);
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index d28336fc65ab..974d62607e13 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -142,7 +142,7 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
buffrag->length, PCI_DMA_TODEVICE);
buffrag->dma = 0ULL;
}
- for (j = 0; j < cmd_buf->frag_count; j++) {
+ for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
pci_unmap_page(adapter->pdev, buffrag->dma,
@@ -286,10 +286,11 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
{
long timeout = 0;
long done = 0;
+ int err = 0;
cond_resched();
while (done == 0) {
- done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
+ done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS, &err);
done &= 2;
if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
dev_err(&adapter->pdev->dev,
@@ -304,6 +305,8 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
static int do_rom_fast_read(struct qlcnic_adapter *adapter,
u32 addr, u32 *valp)
{
+ int err = 0;
+
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
@@ -317,7 +320,9 @@ static int do_rom_fast_read(struct qlcnic_adapter *adapter,
udelay(10);
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
- *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
+ *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA, &err);
+ if (err == -EIO)
+ return err;
return 0;
}
@@ -369,11 +374,11 @@ int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
{
- int addr, val;
+ int addr, err = 0;
int i, n, init_delay;
struct crb_addr_pair *buf;
unsigned offset;
- u32 off;
+ u32 off, val;
struct pci_dev *pdev = adapter->pdev;
QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0);
@@ -402,7 +407,9 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00);
/* halt sre */
- val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000);
+ val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000, &err);
+ if (err == -EIO)
+ return err;
QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1)));
/* halt epg */
@@ -719,10 +726,12 @@ qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
static int
qlcnic_has_mn(struct qlcnic_adapter *adapter)
{
- u32 capability;
- capability = 0;
+ u32 capability = 0;
+ int err = 0;
- capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
+ capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY, &err);
+ if (err == -EIO)
+ return err;
if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
return 1;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index d3f8797efcc3..6946d354f44f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -161,36 +161,68 @@ static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
}
+static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
+ struct qlcnic_filter *fil,
+ void *addr, u16 vlan_id)
+{
+ int ret;
+ u8 op;
+
+ op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+ if (ret)
+ return;
+
+ op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+ ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+ if (!ret) {
+ hlist_del(&fil->fnode);
+ adapter->rx_fhash.fnum--;
+ }
+}
+
+static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
+ void *addr, u16 vlan_id)
+{
+ struct qlcnic_filter *tmp_fil = NULL;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
+ tmp_fil->vlan_id == vlan_id)
+ return tmp_fil;
+ }
+
+ return NULL;
+}
+
void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
int loopback_pkt, u16 vlan_id)
{
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil;
- struct hlist_node *n;
struct hlist_head *head;
unsigned long time;
u64 src_addr = 0;
- u8 hindex, found = 0, op;
+ u8 hindex, op;
int ret;
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+ hindex = qlcnic_mac_hash(src_addr) &
+ (adapter->fhash.fbucket_size - 1);
if (loopback_pkt) {
if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
return;
- hindex = qlcnic_mac_hash(src_addr) &
- (adapter->fhash.fbucket_size - 1);
head = &(adapter->rx_fhash.fhead[hindex]);
- hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
- tmp_fil->vlan_id == vlan_id) {
- time = tmp_fil->ftime;
- if (jiffies > (QLCNIC_READD_AGE * HZ + time))
- tmp_fil->ftime = jiffies;
- return;
- }
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil) {
+ time = tmp_fil->ftime;
+ if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
+ tmp_fil->ftime = jiffies;
+ return;
}
fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
@@ -205,36 +237,37 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
adapter->rx_fhash.fnum++;
spin_unlock(&adapter->rx_mac_learn_lock);
} else {
- hindex = qlcnic_mac_hash(src_addr) &
- (adapter->fhash.fbucket_size - 1);
- head = &(adapter->rx_fhash.fhead[hindex]);
- spin_lock(&adapter->rx_mac_learn_lock);
- hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
- tmp_fil->vlan_id == vlan_id) {
- found = 1;
- break;
- }
- }
+ head = &adapter->fhash.fhead[hindex];
- if (!found) {
- spin_unlock(&adapter->rx_mac_learn_lock);
- return;
- }
+ spin_lock(&adapter->mac_learn_lock);
- op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
- ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr,
- vlan_id, op);
- if (!ret) {
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil) {
op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
ret = qlcnic_sre_macaddr_change(adapter,
(u8 *)&src_addr,
vlan_id, op);
if (!ret) {
- hlist_del(&(tmp_fil->fnode));
- adapter->rx_fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ adapter->fhash.fnum--;
}
+
+ spin_unlock(&adapter->mac_learn_lock);
+
+ return;
}
+
+ spin_unlock(&adapter->mac_learn_lock);
+
+ head = &adapter->rx_fhash.fhead[hindex];
+
+ spin_lock(&adapter->rx_mac_learn_lock);
+
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil)
+ qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
+ vlan_id);
+
spin_unlock(&adapter->rx_mac_learn_lock);
}
}
@@ -262,7 +295,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
- memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
+ memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
vlan_req->vlan_id = cpu_to_le16(vlan_id);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 4528f8ec333b..bc05d016c859 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -977,8 +977,8 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
static int
qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
{
- int err;
struct qlcnic_info nic_info;
+ int err = 0;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
@@ -993,7 +993,9 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
u32 temp;
- temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
+ temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2, &err);
+ if (err == -EIO)
+ return err;
adapter->ahw->extra_capability[0] = temp;
}
adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
@@ -1383,6 +1385,8 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
if (qlcnic_82xx_check(adapter))
handler = qlcnic_tmp_intr;
+ else
+ handler = qlcnic_83xx_tmp_intr;
if (!QLCNIC_IS_MSI_FAMILY(adapter))
flags |= IRQF_SHARED;
@@ -1531,12 +1535,12 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (netdev->features & NETIF_F_LRO)
qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
qlcnic_napi_enable(adapter);
qlcnic_linkevent_request(adapter, 1);
adapter->ahw->reset_context = 0;
- set_bit(__QLCNIC_DEV_UP, &adapter->state);
return 0;
}
@@ -2139,7 +2143,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
!!qlcnic_use_msi)
dev_warn(&pdev->dev,
- "83xx adapter do not support MSI interrupts\n");
+ "Device does not support MSI interrupts\n");
err = qlcnic_setup_intr(adapter, 0);
if (err) {
@@ -2161,7 +2165,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_out_disable_mbx_intr;
- qlcnic_set_drv_version(adapter);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
pci_set_drvdata(pdev, adapter);
@@ -3081,7 +3086,8 @@ done:
adapter->fw_fail_cnt = 0;
adapter->flags &= ~QLCNIC_FW_HANG;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- qlcnic_set_drv_version(adapter);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
if (!qlcnic_clr_drv_state(adapter))
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
@@ -3093,6 +3099,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
{
u32 state = 0, heartbeat;
u32 peg_status;
+ int err = 0;
if (qlcnic_check_temp(adapter))
goto detach;
@@ -3139,11 +3146,11 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
"PEG_NET_4_PC: 0x%x\n",
peg_status,
QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, &err));
if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
dev_err(&adapter->pdev->dev,
"Firmware aborted with error code 0x00006700. "
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index ab8a6744d402..79e54efe07b9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1084,7 +1084,7 @@ flash_temp:
tmpl_hdr = ahw->fw_dump.tmpl_hdr;
tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
- if ((tmpl_hdr->version & 0xffffff) >= 0x20001)
+ if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
ahw->fw_dump.use_pex_dma = true;
else
ahw->fw_dump.use_pex_dma = false;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 62380ce89905..5d40045b3cea 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -562,7 +562,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
INIT_LIST_HEAD(&adapter->vf_mc_list);
if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
dev_warn(&adapter->pdev->dev,
- "83xx adapter do not support MSI interrupts\n");
+ "Device does not support MSI interrupts\n");
err = qlcnic_setup_intr(adapter, 1);
if (err) {
@@ -762,6 +762,7 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
mbx->req.arg[0] = (type | (mbx->req.num << 16) |
(3 << 29));
+ mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
return 0;
}
}
@@ -813,6 +814,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
cmd->req.num = trans->req_pay_size / 4;
cmd->rsp.num = trans->rsp_pay_size / 4;
hdr = trans->rsp_hdr;
+ cmd->op_type = trans->req_hdr->op_type;
}
trans->trans_id = seq;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index ee0c1d307966..eb49cd65378c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -635,12 +635,12 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
- struct qlcnic_adapter *adapter = vf->adapter;
- int err;
+ struct qlcnic_vport *vp = vf->vp;
+ struct qlcnic_adapter *adapter;
u16 func = vf->pci_func;
+ int err;
- cmd->rsp.arg[0] = trans->req_hdr->cmd_op;
- cmd->rsp.arg[0] |= (1 << 16);
+ adapter = vf->adapter;
if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
@@ -650,6 +650,8 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
} else {
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
+ vp->vlan = 0;
err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
@@ -1183,7 +1185,7 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
u8 cmd_op, mode = vp->vlan_mode;
cmd_op = trans->req_hdr->cmd_op;
- cmd->rsp.arg[0] = (cmd_op & 0xffff) | 14 << 16 | 1 << 25;
+ cmd->rsp.arg[0] |= 1 << 25;
switch (mode) {
case QLC_GUEST_VLAN_MODE:
@@ -1561,6 +1563,7 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
struct qlcnic_vf_info *vf)
{
struct net_device *dev = vf->adapter->netdev;
+ struct qlcnic_vport *vp = vf->vp;
if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) {
clear_bit(QLC_BC_VF_FLR, &vf->state);
@@ -1573,6 +1576,9 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
return;
}
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
+ vp->vlan = 0;
+
qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
}
@@ -1621,13 +1627,15 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
- int i, num_vfs = sriov->num_vfs;
+ int i, num_vfs;
struct qlcnic_vf_info *vf_info;
u8 *curr_mac;
if (!qlcnic_sriov_pf_check(adapter))
return -EOPNOTSUPP;
+ num_vfs = sriov->num_vfs;
+
if (!is_valid_ether_addr(mac) || vf >= num_vfs)
return -EINVAL;
@@ -1741,6 +1749,7 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
switch (vlan) {
case 4095:
+ vp->vlan = 0;
vp->vlan_mode = QLC_GUEST_VLAN_MODE;
break;
case 0:
@@ -1759,6 +1768,29 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
return 0;
}
+static inline __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vport *vp, int vf)
+{
+ __u32 vlan = 0;
+
+ switch (vp->vlan_mode) {
+ case QLC_PVID_MODE:
+ vlan = vp->vlan;
+ break;
+ case QLC_GUEST_VLAN_MODE:
+ vlan = MAX_VLAN_ID;
+ break;
+ case QLC_NO_VLAN_MODE:
+ vlan = 0;
+ break;
+ default:
+ netdev_info(adapter->netdev, "Invalid VLAN mode = %d for VF %d\n",
+ vp->vlan_mode, vf);
+ }
+
+ return vlan;
+}
+
int qlcnic_sriov_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
@@ -1774,7 +1806,7 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
vp = sriov->vf_info[vf].vp;
memcpy(&ivi->mac, vp->mac, ETH_ALEN);
- ivi->vlan = vp->vlan;
+ ivi->vlan = qlcnic_sriov_get_vf_vlan(adapter, vp, vf);
ivi->qos = vp->qos;
ivi->spoofchk = vp->spoofchk;
if (vp->max_tx_bw == MAX_BW)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 10ed82b3baca..660c3f5b2237 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
- if (!err) {
- dev_info(&adapter->pdev->dev,
- "Failed to get current beacon state\n");
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to get current beacon state\n");
} else {
if (h_beacon_state == QLCNIC_BEACON_DISABLE)
ahw->beacon_state = 0;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e6acb9fa5767..d2e591955bdd 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -478,7 +478,7 @@ rx_status_loop:
while (1) {
u32 status, len;
- dma_addr_t mapping;
+ dma_addr_t mapping, new_mapping;
struct sk_buff *skb, *new_skb;
struct cp_desc *desc;
const unsigned buflen = cp->rx_buf_sz;
@@ -520,6 +520,14 @@ rx_status_loop:
goto rx_next;
}
+ new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
+ dev->stats.rx_dropped++;
+ kfree_skb(new_skb);
+ goto rx_next;
+ }
+
dma_unmap_single(&cp->pdev->dev, mapping,
buflen, PCI_DMA_FROMDEVICE);
@@ -531,12 +539,11 @@ rx_status_loop:
skb_put(skb, len);
- mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
- PCI_DMA_FROMDEVICE);
cp->rx_skb[rx_tail] = new_skb;
cp_rx_skb(cp, skb, desc);
rx++;
+ mapping = new_mapping;
rx_next:
cp->rx_ring[rx_tail].opts2 = 0;
@@ -716,6 +723,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
}
+static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
+ int first, int entry_last)
+{
+ int frag, index;
+ struct cp_desc *txd;
+ skb_frag_t *this_frag;
+ for (frag = 0; frag+first < entry_last; frag++) {
+ index = first+frag;
+ cp->tx_skb[index] = NULL;
+ txd = &cp->tx_ring[index];
+ this_frag = &skb_shinfo(skb)->frags[frag];
+ dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
+ skb_frag_size(this_frag), PCI_DMA_TODEVICE);
+ }
+}
+
static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
struct net_device *dev)
{
@@ -749,6 +772,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
len = skb->len;
mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, mapping))
+ goto out_dma_error;
+
txd->opts2 = opts2;
txd->addr = cpu_to_le64(mapping);
wmb();
@@ -786,6 +812,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
first_len = skb_headlen(skb);
first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
first_len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, first_mapping))
+ goto out_dma_error;
+
cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry);
@@ -799,6 +828,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
mapping = dma_map_single(&cp->pdev->dev,
skb_frag_address(this_frag),
len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, mapping)) {
+ unwind_tx_frag_mapping(cp, skb, first_entry, entry);
+ goto out_dma_error;
+ }
+
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
ctrl = eor | len | DescOwn;
@@ -859,11 +893,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
+out_unlock:
spin_unlock_irqrestore(&cp->lock, intr_flags);
cpw8(TxPoll, NormalTxPoll);
return NETDEV_TX_OK;
+out_dma_error:
+ kfree_skb(skb);
+ cp->dev->stats.tx_dropped++;
+ goto out_unlock;
}
/* Set or clear the multicast filter for this adaptor.
@@ -1054,6 +1093,10 @@ static int cp_refill_rx(struct cp_private *cp)
mapping = dma_map_single(&cp->pdev->dev, skb->data,
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, mapping)) {
+ kfree_skb(skb);
+ goto err_out;
+ }
cp->rx_skb[i] = skb;
cp->rx_ring[i].opts2 = 0;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 4106a743ca74..85e5c97191dd 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3689,7 +3689,7 @@ static void rtl_phy_work(struct rtl8169_private *tp)
if (tp->link_ok(ioaddr))
return;
- netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
+ netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
tp->phy_reset_enable(tp);
@@ -6468,6 +6468,8 @@ static int rtl8169_close(struct net_device *dev)
rtl8169_down(dev);
rtl_unlock_work(tp);
+ cancel_work_sync(&tp->wk.work);
+
free_irq(pdev->irq, dev);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
@@ -6793,8 +6795,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
rtl8168_driver_stop(tp);
}
- cancel_work_sync(&tp->wk.work);
-
netif_napi_del(&tp->napi);
unregister_netdev(dev);
@@ -7088,7 +7088,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
RTL_W8(Cfg9346, Cfg9346_Unlock);
RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
- RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+ RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
tp->features |= RTL_FEATURE_WOL;
if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index b74a60ab9ac7..30d744235d27 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -675,7 +675,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
- rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF;
+ rep_index = spec->type - EFX_FILTER_UC_DEF;
ins_index = rep_index;
spin_lock_bh(&state->lock);
@@ -1209,7 +1209,9 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
+ efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
+ rxq_index);
rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
ip->daddr, ports[1], ip->saddr, ports[0]);
if (rc)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index eb4aea3fe793..f5d7ad75e479 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1318,7 +1318,7 @@ static void sis900_timer(unsigned long data)
if (duplex){
sis900_set_mode(sis_priv, speed, duplex);
sis630_set_eq(net_dev, sis_priv->chipset_rev);
- netif_start_queue(net_dev);
+ netif_carrier_on(net_dev);
}
sis_priv->timer.expires = jiffies + HZ;
@@ -1336,10 +1336,8 @@ static void sis900_timer(unsigned long data)
status = sis900_default_phy(net_dev);
mii_phy = sis_priv->mii;
- if (status & MII_STAT_LINK){
+ if (status & MII_STAT_LINK)
sis900_check_mode(net_dev, mii_phy);
- netif_carrier_on(net_dev);
- }
} else {
/* Link ON -> OFF */
if (!(status & MII_STAT_LINK)){
@@ -1612,12 +1610,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
unsigned int index_cur_tx, index_dirty_tx;
unsigned int count_dirty_tx;
- /* Don't transmit data before the complete of auto-negotiation */
- if(!sis_priv->autong_complete){
- netif_stop_queue(net_dev);
- return NETDEV_TX_BUSY;
- }
-
spin_lock_irqsave(&sis_priv->lock, flags);
/* Calculate the next Tx descriptor entry. */
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index c9d942a5c335..1ef9d8a555aa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
struct stmmac_priv *priv = (struct stmmac_priv *)p;
unsigned int txsize = priv->dma_tx_size;
unsigned int entry = priv->cur_tx % txsize;
- struct dma_desc *desc = priv->dma_tx + entry;
+ struct dma_desc *desc;
unsigned int nopaged_len = skb_headlen(skb);
unsigned int bmax, len;
+ if (priv->extend_desc)
+ desc = (struct dma_desc *)(priv->dma_etx + entry);
+ else
+ desc = priv->dma_tx + entry;
+
if (priv->plat->enh_desc)
bmax = BUF_SIZE_8KiB;
else
@@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
STMMAC_RING_MODE);
wmb();
entry = (++priv->cur_tx) % txsize;
- desc = priv->dma_tx + entry;
+
+ if (priv->extend_desc)
+ desc = (struct dma_desc *)(priv->dma_etx + entry);
+ else
+ desc = priv->dma_tx + entry;
desc->des2 = dma_map_single(priv->device, skb->data + bmax,
len, DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f2ccb36e8685..0a9bb9d30c3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
GFP_KERNEL);
- if (unlikely(skb == NULL)) {
+ if (!skb) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
- return 1;
+ return -ENOMEM;
}
skb_reserve(skb, NET_IP_ALIGN);
priv->rx_skbuff[i] = skb;
priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
priv->dma_buf_sz,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
+ pr_err("%s: DMA mapping error\n", __func__);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
p->des2 = priv->rx_skbuff_dma[i];
@@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return 0;
}
+static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
+{
+ if (priv->rx_skbuff[i]) {
+ dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ }
+ priv->rx_skbuff[i] = NULL;
+}
+
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
@@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
* and allocates the socket buffers. It suppors the chained and ring
* modes.
*/
-static void init_dma_desc_rings(struct net_device *dev)
+static int init_dma_desc_rings(struct net_device *dev)
{
int i;
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int txsize = priv->dma_tx_size;
unsigned int rxsize = priv->dma_rx_size;
unsigned int bfsize = 0;
+ int ret = -ENOMEM;
/* Set the max buffer size according to the DESC mode
* and the MTU. Note that RING mode allows 16KiB bsize.
@@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev)
dma_extended_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
+ if (!priv->dma_erx)
+ goto err_dma;
+
priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
sizeof(struct
dma_extended_desc),
&priv->dma_tx_phy,
GFP_KERNEL);
- if ((!priv->dma_erx) || (!priv->dma_etx))
- return;
+ if (!priv->dma_etx) {
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
+ goto err_dma;
+ }
} else {
priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
sizeof(struct dma_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
+ if (!priv->dma_rx)
+ goto err_dma;
+
priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
sizeof(struct dma_desc),
&priv->dma_tx_phy,
GFP_KERNEL);
- if ((!priv->dma_rx) || (!priv->dma_tx))
- return;
+ if (!priv->dma_tx) {
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ goto err_dma;
+ }
}
priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
GFP_KERNEL);
+ if (!priv->rx_skbuff_dma)
+ goto err_rx_skbuff_dma;
+
priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
GFP_KERNEL);
+ if (!priv->rx_skbuff)
+ goto err_rx_skbuff;
+
priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
GFP_KERNEL);
+ if (!priv->tx_skbuff_dma)
+ goto err_tx_skbuff_dma;
+
priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
GFP_KERNEL);
+ if (!priv->tx_skbuff)
+ goto err_tx_skbuff;
+
if (netif_msg_probe(priv)) {
pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
(u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev)
else
p = priv->dma_rx + i;
- if (stmmac_init_rx_buffers(priv, p, i))
- break;
+ ret = stmmac_init_rx_buffers(priv, p, i);
+ if (ret)
+ goto err_init_rx_buffers;
if (netif_msg_probe(priv))
pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
@@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev)
if (netif_msg_hw(priv))
stmmac_display_rings(priv);
+
+ return 0;
+err_init_rx_buffers:
+ while (--i >= 0)
+ stmmac_free_rx_buffers(priv, i);
+ kfree(priv->tx_skbuff);
+err_tx_skbuff:
+ kfree(priv->tx_skbuff_dma);
+err_tx_skbuff_dma:
+ kfree(priv->rx_skbuff);
+err_rx_skbuff:
+ kfree(priv->rx_skbuff_dma);
+err_rx_skbuff_dma:
+ if (priv->extend_desc) {
+ dma_free_coherent(priv->device, priv->dma_tx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_etx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
+ } else {
+ dma_free_coherent(priv->device,
+ priv->dma_tx_size * sizeof(struct dma_desc),
+ priv->dma_tx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device,
+ priv->dma_rx_size * sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ }
+err_dma:
+ return ret;
}
static void dma_free_rx_skbufs(struct stmmac_priv *priv)
{
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
- if (priv->rx_skbuff[i]) {
- dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
- priv->dma_buf_sz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(priv->rx_skbuff[i]);
- }
- priv->rx_skbuff[i] = NULL;
- }
+ for (i = 0; i < priv->dma_rx_size; i++)
+ stmmac_free_rx_buffers(priv, i);
}
static void dma_free_tx_skbufs(struct stmmac_priv *priv)
@@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev)
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
- init_dma_desc_rings(dev);
+
+ ret = init_dma_desc_rings(dev);
+ if (ret < 0) {
+ pr_err("%s: DMA descriptors initialization failed\n", __func__);
+ goto dma_desc_error;
+ }
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
- pr_err("%s: DMA initialization failed\n", __func__);
+ pr_err("%s: DMA engine initialization failed\n", __func__);
goto init_error;
}
@@ -1672,6 +1744,7 @@ wolirq_error:
init_error:
free_dma_desc_resources(priv);
+dma_desc_error:
if (priv->phydev)
phy_disconnect(priv->phydev);
phy_error:
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 05a1674e204f..22a7a4336211 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1867,7 +1867,7 @@ static int cpsw_probe(struct platform_device *pdev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
- if (request_irq(i, cpsw_interrupt, IRQF_DISABLED,
+ if (request_irq(i, cpsw_interrupt, 0,
dev_name(&pdev->dev), priv)) {
dev_err(priv->dev, "error attaching irq\n");
goto clean_ale_ret;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 07b176bcf929..1a222bce4bd7 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1568,8 +1568,7 @@ static int emac_dev_open(struct net_device *ndev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
- IRQF_DISABLED,
- ndev->name, ndev))
+ 0, ndev->name, ndev))
goto rollback;
}
k++;
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 1d6dc41f755d..d01cacf8a7c2 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
- netif_rx(skb);
+ netif_receive_skb(skb);
stats->rx_bytes += pkt_len;
stats->rx_packets++;
@@ -2884,6 +2884,7 @@ out:
return ret;
err_iounmap:
+ netif_napi_del(&vptr->napi);
iounmap(regs);
err_free_dev:
free_netdev(netdev);
@@ -2904,6 +2905,7 @@ static int velocity_remove(struct device *dev)
struct velocity_info *vptr = netdev_priv(netdev);
unregister_netdev(netdev);
+ netif_napi_del(&vptr->napi);
iounmap(vptr->mac_regs);
free_netdev(netdev);
velocity_nics--;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 51f2bc376101..2dcc60fb37f1 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -210,8 +210,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
pci_write_config_byte(pcidev,0x5a,0xc0);
WriteLPCReg(0x28, 0x70 );
- if (via_ircc_open(pcidev, &info, 0x3076) == 0)
- rc=0;
+ rc = via_ircc_open(pcidev, &info, 0x3076);
} else
rc = -ENODEV; //IR not turn on
} else { //Not VT1211
@@ -249,8 +248,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
info.irq=FirIRQ;
info.dma=FirDRQ1;
info.dma2=FirDRQ0;
- if (via_ircc_open(pcidev, &info, 0x3096) == 0)
- rc=0;
+ rc = via_ircc_open(pcidev, &info, 0x3096);
} else
rc = -ENODEV; //IR not turn on !!!!!
}//Not VT1211
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 18373b6ae37d..16b43bf544b7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -337,8 +337,11 @@ static int macvlan_open(struct net_device *dev)
int err;
if (vlan->port->passthru) {
- if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
- dev_set_promiscuity(lowerdev, 1);
+ if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
+ err = dev_set_promiscuity(lowerdev, 1);
+ if (err < 0)
+ goto out;
+ }
goto hash_add;
}
@@ -736,6 +739,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
return -EADDRNOTAVAIL;
}
+ if (data && data[IFLA_MACVLAN_FLAGS] &&
+ nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
+ return -EINVAL;
+
if (data && data[IFLA_MACVLAN_MODE]) {
switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
case MACVLAN_MODE_PRIVATE:
@@ -863,6 +870,18 @@ static int macvlan_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ enum macvlan_mode mode;
+ bool set_mode = false;
+
+ /* Validate mode, but don't set yet: setting flags may fail. */
+ if (data && data[IFLA_MACVLAN_MODE]) {
+ set_mode = true;
+ mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+ /* Passthrough mode can't be set or cleared dynamically */
+ if ((mode == MACVLAN_MODE_PASSTHRU) !=
+ (vlan->mode == MACVLAN_MODE_PASSTHRU))
+ return -EINVAL;
+ }
if (data && data[IFLA_MACVLAN_FLAGS]) {
__u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
@@ -879,8 +898,8 @@ static int macvlan_changelink(struct net_device *dev,
}
vlan->flags = flags;
}
- if (data && data[IFLA_MACVLAN_MODE])
- vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+ if (set_mode)
+ vlan->mode = mode;
return 0;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index a98fb0ed6aef..ea53abb20988 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -68,6 +68,8 @@ static const struct proto_ops macvtap_socket_ops;
#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
NETIF_F_TSO6 | NETIF_F_UFO)
#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+
/*
* RCU usage:
* The macvtap_queue and the macvlan_dev are loosely coupled, the
@@ -278,7 +280,8 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvtap_queue *q = macvtap_get_queue(dev, skb);
- netdev_features_t features;
+ netdev_features_t features = TAP_FEATURES;
+
if (!q)
goto drop;
@@ -287,9 +290,11 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
skb->dev = dev;
/* Apply the forward feature mask so that we perform segmentation
- * according to users wishes.
+ * according to users wishes. This only works if VNET_HDR is
+ * enabled.
*/
- features = netif_skb_features(skb) & vlan->tap_features;
+ if (q->flags & IFF_VNET_HDR)
+ features |= vlan->tap_features;
if (netif_needs_gso(skb, features)) {
struct sk_buff *segs = __skb_gso_segment(skb, features, false);
@@ -818,10 +823,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
- if (vlan)
+ if (vlan) {
+ local_bh_disable();
macvlan_start_xmit(skb, vlan->dev);
- else
+ local_bh_enable();
+ } else {
kfree_skb(skb);
+ }
rcu_read_unlock();
return total_len;
@@ -912,8 +920,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
done:
rcu_read_lock();
vlan = rcu_dereference(q->vlan);
- if (vlan)
+ if (vlan) {
+ preempt_disable();
macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
+ preempt_enable();
+ }
rcu_read_unlock();
return ret ? ret : copied;
@@ -1058,8 +1069,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
/* tap_features are the same as features on tun/tap and
* reflect user expectations.
*/
- vlan->tap_features = vlan->dev->features &
- (feature_mask | ~TUN_OFFLOADS);
+ vlan->tap_features = feature_mask;
vlan->set_features = features;
netdev_update_features(vlan->dev);
@@ -1155,10 +1165,6 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
- /* TODO: only accept frames with the features that
- got enabled for forwarded frames */
- if (!(q->flags & IFF_VNET_HDR))
- return -EINVAL;
rtnl_lock();
ret = set_offload(q, arg);
rtnl_unlock();
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 61d3f4ebf52e..7f25e49ae37f 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -40,7 +40,7 @@ struct sun4i_mdio_data {
static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct sun4i_mdio_data *data = bus->priv;
- unsigned long start_jiffies;
+ unsigned long timeout_jiffies;
int value;
/* issue the phy address and reg */
@@ -49,10 +49,9 @@ static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
writel(0x1, data->membase + EMAC_MAC_MCMD_REG);
/* Wait read complete */
- start_jiffies = jiffies;
+ timeout_jiffies = jiffies + MDIO_TIMEOUT;
while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) {
- if (time_after(start_jiffies,
- start_jiffies + MDIO_TIMEOUT))
+ if (time_is_before_jiffies(timeout_jiffies))
return -ETIMEDOUT;
msleep(1);
}
@@ -69,7 +68,7 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
struct sun4i_mdio_data *data = bus->priv;
- unsigned long start_jiffies;
+ unsigned long timeout_jiffies;
/* issue the phy address and reg */
writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG);
@@ -77,10 +76,9 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
writel(0x1, data->membase + EMAC_MAC_MCMD_REG);
/* Wait read complete */
- start_jiffies = jiffies;
+ timeout_jiffies = jiffies + MDIO_TIMEOUT;
while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) {
- if (time_after(start_jiffies,
- start_jiffies + MDIO_TIMEOUT))
+ if (time_is_before_jiffies(timeout_jiffies))
return -ETIMEDOUT;
msleep(1);
}
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 8e7af8354342..138de837977f 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -23,7 +23,7 @@
#define RTL821x_INER_INIT 0x6400
#define RTL821x_INSR 0x13
-#define RTL8211E_INER_LINK_STAT 0x10
+#define RTL8211E_INER_LINK_STATUS 0x400
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
@@ -57,7 +57,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, RTL821x_INER,
- RTL8211E_INER_LINK_STAT);
+ RTL8211E_INER_LINK_STATUS);
else
err = phy_write(phydev, RTL821x_INER, 0);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index db690a372260..71af122edf2d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1074,8 +1074,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
u32 rxhash;
if (!(tun->flags & TUN_NO_PI)) {
- if ((len -= sizeof(pi)) > total_len)
+ if (len < sizeof(pi))
return -EINVAL;
+ len -= sizeof(pi);
if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
return -EFAULT;
@@ -1083,8 +1084,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
if (tun->flags & TUN_VNET_HDR) {
- if ((len -= tun->vnet_hdr_sz) > total_len)
+ if (len < tun->vnet_hdr_sz)
return -EINVAL;
+ len -= tun->vnet_hdr_sz;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
return -EFAULT;
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 1e3c302d94fe..2bc87e3a8141 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1029,10 +1029,10 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.supports_gmii = 1;
dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+ NETIF_F_RXCSUM;
dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+ NETIF_F_RXCSUM;
/* Enable checksum offload */
*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
@@ -1173,7 +1173,6 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
if (((skb->len + 8) % frame_size) == 0)
tx_hdr2 |= 0x80008000; /* Enable padding */
- skb_linearize(skb);
headroom = skb_headroom(skb);
tailroom = skb_tailroom(skb);
@@ -1317,10 +1316,10 @@ static int ax88179_reset(struct usbnet *dev)
1, 1, tmp);
dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+ NETIF_F_RXCSUM;
dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+ NETIF_F_RXCSUM;
/* Enable checksum offload */
*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index cba1d46e672e..86292e6aaf49 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2816,13 +2816,16 @@ exit:
static int hso_get_config_data(struct usb_interface *interface)
{
struct usb_device *usbdev = interface_to_usbdev(interface);
- u8 config_data[17];
+ u8 *config_data = kmalloc(17, GFP_KERNEL);
u32 if_num = interface->altsetting->desc.bInterfaceNumber;
s32 result;
+ if (!config_data)
+ return -ENOMEM;
if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
0x86, 0xC0, 0, 0, config_data, 17,
USB_CTRL_SET_TIMEOUT) != 0x11) {
+ kfree(config_data);
return -EIO;
}
@@ -2873,6 +2876,7 @@ static int hso_get_config_data(struct usb_interface *interface)
if (config_data[16] & 0x1)
result |= HSO_INFO_CRC_BUG;
+ kfree(config_data);
return result;
}
@@ -2886,6 +2890,11 @@ static int hso_probe(struct usb_interface *interface,
struct hso_shared_int *shared_int;
struct hso_device *tmp_dev = NULL;
+ if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
+ dev_err(&interface->dev, "Not our interface\n");
+ return -ENODEV;
+ }
+
if_num = interface->altsetting->desc.bInterfaceNumber;
/* Get the interface/port specification from either driver_info or from
@@ -2895,10 +2904,6 @@ static int hso_probe(struct usb_interface *interface,
else
port_spec = hso_get_config_data(interface);
- if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
- dev_err(&interface->dev, "Not our interface\n");
- return -ENODEV;
- }
/* Check if we need to switch to alt interfaces prior to port
* configuration */
if (interface->num_altsetting > 1)
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ee13f9eb740c..11c51f275366 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -344,17 +344,41 @@ static const int multicast_filter_limit = 32;
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
- return usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
+ int ret;
+ void *tmp;
+
+ tmp = kmalloc(size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
- value, index, data, size, 500);
+ value, index, tmp, size, 500);
+
+ memcpy(data, tmp, size);
+ kfree(tmp);
+
+ return ret;
}
static
int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
- return usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
+ int ret;
+ void *tmp;
+
+ tmp = kmalloc(size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ memcpy(tmp, data, size);
+
+ ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
- value, index, data, size, 500);
+ value, index, tmp, size, 500);
+
+ kfree(tmp);
+ return ret;
}
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
@@ -490,37 +514,31 @@ int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data)
static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index)
{
- u32 data;
+ __le32 data;
- if (type == MCU_TYPE_PLA)
- pla_ocp_read(tp, index, sizeof(data), &data);
- else
- usb_ocp_read(tp, index, sizeof(data), &data);
+ generic_ocp_read(tp, index, sizeof(data), &data, type);
return __le32_to_cpu(data);
}
static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data)
{
- if (type == MCU_TYPE_PLA)
- pla_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data);
- else
- usb_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data);
+ __le32 tmp = __cpu_to_le32(data);
+
+ generic_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(tmp), &tmp, type);
}
static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index)
{
u32 data;
+ __le32 tmp;
u8 shift = index & 2;
index &= ~3;
- if (type == MCU_TYPE_PLA)
- pla_ocp_read(tp, index, sizeof(data), &data);
- else
- usb_ocp_read(tp, index, sizeof(data), &data);
+ generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
- data = __le32_to_cpu(data);
+ data = __le32_to_cpu(tmp);
data >>= (shift * 8);
data &= 0xffff;
@@ -529,7 +547,8 @@ static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index)
static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
{
- u32 tmp, mask = 0xffff;
+ u32 mask = 0xffff;
+ __le32 tmp;
u16 byen = BYTE_EN_WORD;
u8 shift = index & 2;
@@ -542,34 +561,25 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
- if (type == MCU_TYPE_PLA)
- pla_ocp_read(tp, index, sizeof(tmp), &tmp);
- else
- usb_ocp_read(tp, index, sizeof(tmp), &tmp);
+ generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
- tmp = __le32_to_cpu(tmp) & ~mask;
- tmp |= data;
- tmp = __cpu_to_le32(tmp);
+ data |= __le32_to_cpu(tmp) & ~mask;
+ tmp = __cpu_to_le32(data);
- if (type == MCU_TYPE_PLA)
- pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
- else
- usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
+ generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
}
static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index)
{
u32 data;
+ __le32 tmp;
u8 shift = index & 3;
index &= ~3;
- if (type == MCU_TYPE_PLA)
- pla_ocp_read(tp, index, sizeof(data), &data);
- else
- usb_ocp_read(tp, index, sizeof(data), &data);
+ generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
- data = __le32_to_cpu(data);
+ data = __le32_to_cpu(tmp);
data >>= (shift * 8);
data &= 0xff;
@@ -578,7 +588,8 @@ static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index)
static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
{
- u32 tmp, mask = 0xff;
+ u32 mask = 0xff;
+ __le32 tmp;
u16 byen = BYTE_EN_BYTE;
u8 shift = index & 3;
@@ -591,19 +602,12 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
- if (type == MCU_TYPE_PLA)
- pla_ocp_read(tp, index, sizeof(tmp), &tmp);
- else
- usb_ocp_read(tp, index, sizeof(tmp), &tmp);
+ generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
- tmp = __le32_to_cpu(tmp) & ~mask;
- tmp |= data;
- tmp = __cpu_to_le32(tmp);
+ data |= __le32_to_cpu(tmp) & ~mask;
+ tmp = __cpu_to_le32(data);
- if (type == MCU_TYPE_PLA)
- pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
- else
- usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
+ generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
}
static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
@@ -685,21 +689,14 @@ static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
static inline void set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
- u8 *node_id;
-
- node_id = kmalloc(sizeof(u8) * 8, GFP_KERNEL);
- if (!node_id) {
- netif_err(tp, probe, dev, "out of memory");
- return;
- }
+ u8 node_id[8] = {0};
- if (pla_ocp_read(tp, PLA_IDR, sizeof(u8) * 8, node_id) < 0)
+ if (pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id) < 0)
netif_notice(tp, probe, dev, "inet addr fail\n");
else {
memcpy(dev->dev_addr, node_id, dev->addr_len);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
}
- kfree(node_id);
}
static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
@@ -882,15 +879,10 @@ static void rtl8152_set_rx_mode(struct net_device *netdev)
static void _rtl8152_set_rx_mode(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- u32 tmp, *mc_filter; /* Multicast hash filter */
+ u32 mc_filter[2]; /* Multicast hash filter */
+ __le32 tmp[2];
u32 ocp_data;
- mc_filter = kmalloc(sizeof(u32) * 2, GFP_KERNEL);
- if (!mc_filter) {
- netif_err(tp, link, netdev, "out of memory");
- return;
- }
-
clear_bit(RTL8152_SET_RX_MODE, &tp->flags);
netif_stop_queue(netdev);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -918,14 +910,12 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
}
}
- tmp = mc_filter[0];
- mc_filter[0] = __cpu_to_le32(swab32(mc_filter[1]));
- mc_filter[1] = __cpu_to_le32(swab32(tmp));
+ tmp[0] = __cpu_to_le32(swab32(mc_filter[1]));
+ tmp[1] = __cpu_to_le32(swab32(mc_filter[0]));
- pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(u32) * 2, mc_filter);
+ pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(tmp), tmp);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
netif_wake_queue(netdev);
- kfree(mc_filter);
}
static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c
index 852392269718..2df2f4fb42a7 100644
--- a/drivers/net/usb/r815x.c
+++ b/drivers/net/usb/r815x.c
@@ -24,34 +24,43 @@
static int pla_read_word(struct usb_device *udev, u16 index)
{
- int data, ret;
+ int ret;
u8 shift = index & 2;
- __le32 ocp_data;
+ __le32 *tmp;
+
+ tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
index &= ~3;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
- index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data),
- 500);
+ index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
if (ret < 0)
- return ret;
+ goto out2;
- data = __le32_to_cpu(ocp_data);
- data >>= (shift * 8);
- data &= 0xffff;
+ ret = __le32_to_cpu(*tmp);
+ ret >>= (shift * 8);
+ ret &= 0xffff;
- return data;
+out2:
+ kfree(tmp);
+ return ret;
}
static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
{
- __le32 ocp_data;
+ __le32 *tmp;
u32 mask = 0xffff;
u16 byen = BYTE_EN_WORD;
u8 shift = index & 2;
int ret;
+ tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
data &= mask;
if (shift) {
@@ -63,19 +72,20 @@ static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
- index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data),
- 500);
+ index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
if (ret < 0)
- return ret;
+ goto out3;
- data |= __le32_to_cpu(ocp_data) & ~mask;
- ocp_data = __cpu_to_le32(data);
+ data |= __le32_to_cpu(*tmp) & ~mask;
+ *tmp = __cpu_to_le32(data);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE,
- index, MCU_TYPE_PLA | byen, &ocp_data,
- sizeof(ocp_data), 500);
+ index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp),
+ 500);
+out3:
+ kfree(tmp);
return ret;
}
@@ -116,11 +126,18 @@ out1:
static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg)
{
struct usbnet *dev = netdev_priv(netdev);
+ int ret;
if (phy_id != R815x_PHY_ID)
return -EINVAL;
- return ocp_reg_read(dev, BASE_MII + reg * 2);
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return -ENODEV;
+
+ ret = ocp_reg_read(dev, BASE_MII + reg * 2);
+
+ usb_autopm_put_interface(dev->intf);
+ return ret;
}
static
@@ -131,7 +148,12 @@ void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
if (phy_id != R815x_PHY_ID)
return;
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return;
+
ocp_reg_write(dev, BASE_MII + reg * 2, val);
+
+ usb_autopm_put_interface(dev->intf);
}
static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -150,7 +172,7 @@ static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id = R815x_PHY_ID;
dev->mii.supports_gmii = 1;
- return 0;
+ return status;
}
static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -169,7 +191,7 @@ static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id = R815x_PHY_ID;
dev->mii.supports_gmii = 0;
- return 0;
+ return status;
}
static const struct driver_info r8152_info = {
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 75409748c774..66ebbacf066f 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -45,7 +45,6 @@
#define EEPROM_MAC_OFFSET (0x01)
#define DEFAULT_TX_CSUM_ENABLE (true)
#define DEFAULT_RX_CSUM_ENABLE (true)
-#define DEFAULT_TSO_ENABLE (true)
#define SMSC75XX_INTERNAL_PHY_ID (1)
#define SMSC75XX_TX_OVERHEAD (8)
#define MAX_RX_FIFO_SIZE (20 * 1024)
@@ -1410,17 +1409,14 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
- if (DEFAULT_TX_CSUM_ENABLE) {
+ if (DEFAULT_TX_CSUM_ENABLE)
dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- if (DEFAULT_TSO_ENABLE)
- dev->net->features |= NETIF_F_SG |
- NETIF_F_TSO | NETIF_F_TSO6;
- }
+
if (DEFAULT_RX_CSUM_ENABLE)
dev->net->features |= NETIF_F_RXCSUM;
dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM;
ret = smsc75xx_wait_ready(dev, 0);
if (ret < 0) {
@@ -2200,8 +2196,6 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
{
u32 tx_cmd_a, tx_cmd_b;
- skb_linearize(skb);
-
if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
struct sk_buff *skb2 =
skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index da866523cf20..eee1f19ef1e9 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -269,6 +269,7 @@ static void veth_setup(struct net_device *dev)
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
dev->features |= VETH_FEATURES;
+ dev->vlan_features = dev->features;
dev->destructor = veth_dev_free;
dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a5ba8dd7e6be..767f7af3bd40 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -136,7 +136,8 @@ struct vxlan_dev {
u32 flags; /* VXLAN_F_* below */
struct work_struct sock_work;
- struct work_struct igmp_work;
+ struct work_struct igmp_join;
+ struct work_struct igmp_leave;
unsigned long age_interval;
struct timer_list age_timer;
@@ -736,7 +737,6 @@ static bool vxlan_snoop(struct net_device *dev,
return false;
}
-
/* See if multicast group is already in use by other ID */
static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
{
@@ -770,12 +770,13 @@ static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs)
queue_work(vxlan_wq, &vs->del_work);
}
-/* Callback to update multicast group membership.
- * Scheduled when vxlan goes up/down.
+/* Callback to update multicast group membership when first VNI on
+ * multicast asddress is brought up
+ * Done as workqueue because ip_mc_join_group acquires RTNL.
*/
-static void vxlan_igmp_work(struct work_struct *work)
+static void vxlan_igmp_join(struct work_struct *work)
{
- struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_work);
+ struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
struct vxlan_sock *vs = vxlan->vn_sock;
struct sock *sk = vs->sock->sk;
@@ -785,10 +786,27 @@ static void vxlan_igmp_work(struct work_struct *work)
};
lock_sock(sk);
- if (vxlan_group_used(vn, vxlan->default_dst.remote_ip))
- ip_mc_join_group(sk, &mreq);
- else
- ip_mc_leave_group(sk, &mreq);
+ ip_mc_join_group(sk, &mreq);
+ release_sock(sk);
+
+ vxlan_sock_release(vn, vs);
+ dev_put(vxlan->dev);
+}
+
+/* Inverse of vxlan_igmp_join when last VNI is brought down */
+static void vxlan_igmp_leave(struct work_struct *work)
+{
+ struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
+ struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
+ struct vxlan_sock *vs = vxlan->vn_sock;
+ struct sock *sk = vs->sock->sk;
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
+ .imr_ifindex = vxlan->default_dst.remote_ifindex,
+ };
+
+ lock_sock(sk);
+ ip_mc_leave_group(sk, &mreq);
release_sock(sk);
vxlan_sock_release(vn, vs);
@@ -1359,6 +1377,7 @@ static void vxlan_uninit(struct net_device *dev)
/* Start ageing timer and join group when device is brought up */
static int vxlan_open(struct net_device *dev)
{
+ struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_sock *vs = vxlan->vn_sock;
@@ -1366,10 +1385,11 @@ static int vxlan_open(struct net_device *dev)
if (!vs)
return -ENOTCONN;
- if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
+ if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
+ vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
vxlan_sock_hold(vs);
dev_hold(dev);
- queue_work(vxlan_wq, &vxlan->igmp_work);
+ queue_work(vxlan_wq, &vxlan->igmp_join);
}
if (vxlan->age_interval)
@@ -1400,13 +1420,15 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
/* Cleanup timer and forwarding table on shutdown */
static int vxlan_stop(struct net_device *dev)
{
+ struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_sock *vs = vxlan->vn_sock;
- if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
+ if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
+ ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
vxlan_sock_hold(vs);
dev_hold(dev);
- queue_work(vxlan_wq, &vxlan->igmp_work);
+ queue_work(vxlan_wq, &vxlan->igmp_leave);
}
del_timer_sync(&vxlan->age_timer);
@@ -1471,7 +1493,8 @@ static void vxlan_setup(struct net_device *dev)
INIT_LIST_HEAD(&vxlan->next);
spin_lock_init(&vxlan->hash_lock);
- INIT_WORK(&vxlan->igmp_work, vxlan_igmp_work);
+ INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
+ INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
init_timer_deferrable(&vxlan->age_timer);
@@ -1770,8 +1793,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
- flush_workqueue(vxlan_wq);
-
spin_lock(&vn->sock_lock);
hlist_del_rcu(&vxlan->hlist);
spin_unlock(&vn->sock_lock);
@@ -1878,10 +1899,12 @@ static __net_exit void vxlan_exit_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan;
+ LIST_HEAD(list);
rtnl_lock();
list_for_each_entry(vxlan, &vn->vxlan_list, next)
- dev_close(vxlan->dev);
+ unregister_netdevice_queue(vxlan->dev, &list);
+ unregister_netdevice_many(&list);
rtnl_unlock();
}
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index cde58fe96254..82e8088ca9b4 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -1,6 +1,6 @@
config ATH10K
tristate "Atheros 802.11ac wireless cards support"
- depends on MAC80211
+ depends on MAC80211 && HAS_DMA
select ATH_COMMON
---help---
This module adds support for wireless adapters based on
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 81b686c6a376..40825d43322e 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -325,7 +325,7 @@ ath5k_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr *ha;
mfilt[0] = 0;
- mfilt[1] = 1;
+ mfilt[1] = 0;
netdev_hw_addr_list_for_each(ha, mc_list) {
/* calculate XOR of eight 6-bit values */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index d1acfe98918a..1576d58291d4 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -610,7 +610,15 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
if (AR_SREV_9280_20_OR_LATER(ah)) {
- val = REG_READ(ah, AR_PCU_MISC_MODE2);
+ /*
+ * For AR9280 and above, there is a new feature that allows
+ * Multicast search based on both MAC Address and Key ID.
+ * By default, this feature is enabled. But since the driver
+ * is not using this feature, we switch it off; otherwise
+ * multicast search based on MAC addr only will fail.
+ */
+ val = REG_READ(ah, AR_PCU_MISC_MODE2) &
+ (~AR_ADHOC_MCAST_KEYID_ENABLE);
if (!AR_SREV_9271(ah))
val &= ~AR_PCU_MISC_MODE2_HWWAR1;
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 9e582e14da74..5205a3625e84 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1082,7 +1082,7 @@ static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
struct device *dev = &hif_dev->udev->dev;
struct device *parent = dev->parent;
- complete(&hif_dev->fw_done);
+ complete_all(&hif_dev->fw_done);
if (parent)
device_lock(parent);
@@ -1131,7 +1131,7 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
release_firmware(fw);
hif_dev->flags |= HIF_USB_READY;
- complete(&hif_dev->fw_done);
+ complete_all(&hif_dev->fw_done);
return;
@@ -1295,7 +1295,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
usb_set_intfdata(interface, NULL);
- if (!unplugged && (hif_dev->flags & HIF_USB_START))
+ /* If firmware was loaded we should drop it
+ * go back to first stage bootloader. */
+ if (!unplugged && (hif_dev->flags & HIF_USB_READY))
ath9k_hif_usb_reboot(udev);
kfree(hif_dev);
@@ -1316,7 +1318,10 @@ static int ath9k_hif_usb_suspend(struct usb_interface *interface,
if (!(hif_dev->flags & HIF_USB_START))
ath9k_htc_suspend(hif_dev->htc_handle);
- ath9k_hif_usb_dealloc_urbs(hif_dev);
+ wait_for_completion(&hif_dev->fw_done);
+
+ if (hif_dev->flags & HIF_USB_READY)
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 71a183ffc77f..c3676bf1d6c4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -861,6 +861,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
if (error != 0)
goto err_rx;
+ ath9k_hw_disable(priv->ah);
#ifdef CONFIG_MAC80211_LEDS
/* must be initialized before ieee80211_register_hw */
priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw,
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index c59ae43b9b35..927992732620 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -146,6 +146,28 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
ARRAY_SIZE(bf->rates));
}
+static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
+ struct sk_buff *skb)
+{
+ int q;
+
+ q = skb_get_queue_mapping(skb);
+ if (txq == sc->tx.uapsdq)
+ txq = sc->tx.txq_map[q];
+
+ if (txq != sc->tx.txq_map[q])
+ return;
+
+ if (WARN_ON(--txq->pending_frames < 0))
+ txq->pending_frames = 0;
+
+ if (txq->stopped &&
+ txq->pending_frames < sc->tx.txq_max_pending[q]) {
+ ieee80211_wake_queue(sc->hw, q);
+ txq->stopped = false;
+ }
+}
+
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->ac->txq;
@@ -167,6 +189,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
if (!bf) {
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
+ ath_txq_skb_done(sc, txq, skb);
ieee80211_free_txskb(sc->hw, skb);
continue;
}
@@ -811,6 +834,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
if (!bf) {
__skb_unlink(skb, &tid->buf_q);
+ ath_txq_skb_done(sc, txq, skb);
ieee80211_free_txskb(sc->hw, skb);
continue;
}
@@ -1824,6 +1848,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
+ ath_txq_skb_done(sc, txq, skb);
ieee80211_free_txskb(sc->hw, skb);
return;
}
@@ -2090,6 +2115,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
+ ath_txq_skb_done(sc, txq, skb);
if (txctl->paprd)
dev_kfree_skb_any(skb);
else
@@ -2189,7 +2215,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
- int q, padpos, padsize;
+ int padpos, padsize;
unsigned long flags;
ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
@@ -2225,21 +2251,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
__skb_queue_tail(&txq->complete_q, skb);
-
- q = skb_get_queue_mapping(skb);
- if (txq == sc->tx.uapsdq)
- txq = sc->tx.txq_map[q];
-
- if (txq == sc->tx.txq_map[q]) {
- if (WARN_ON(--txq->pending_frames < 0))
- txq->pending_frames = 0;
-
- if (txq->stopped &&
- txq->pending_frames < sc->tx.txq_max_pending[q]) {
- ieee80211_wake_queue(sc->hw, q);
- txq->stopped = false;
- }
- }
+ ath_txq_skb_done(sc, txq, skb);
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index e8308ec30970..ab636767fbde 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -145,7 +145,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
le16_to_cpu(hdr.type), hdr.flags);
if (len <= MAX_MBOXITEM_SIZE) {
int n = 0;
- unsigned char printbuf[16 * 3 + 2];
+ char printbuf[16 * 3 + 2];
unsigned char databuf[MAX_MBOXITEM_SIZE];
void __iomem *src = wmi_buffer(wil, d.addr) +
sizeof(struct wil6210_mbox_hdr);
@@ -416,7 +416,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, " SKB = %p\n", skb);
if (skb) {
- unsigned char printbuf[16 * 3 + 2];
+ char printbuf[16 * 3 + 2];
int i = 0;
int len = le16_to_cpu(d->dma.length);
void *p = skb->data;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 8e8975562ec3..80099016d21f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -242,7 +242,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
{
unsigned long flags;
- if (!ifp)
+ if (!ifp || !ifp->ndev)
return;
brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index f0d9f7f6c83d..29b1f24c2d0f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -1744,13 +1744,14 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
ulong flags;
int fifo = BRCMF_FWS_FIFO_BCMC;
bool multicast = is_multicast_ether_addr(eh->h_dest);
+ bool pae = eh->h_proto == htons(ETH_P_PAE);
/* determine the priority */
if (!skb->priority)
skb->priority = cfg80211_classify8021d(skb);
drvr->tx_multicast += !!multicast;
- if (ntohs(eh->h_proto) == ETH_P_PAE)
+ if (pae)
atomic_inc(&ifp->pend_8021x_cnt);
if (!brcmf_fws_fc_active(fws)) {
@@ -1781,6 +1782,11 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
brcmf_fws_schedule_deq(fws);
} else {
brcmf_err("drop skb: no hanger slot\n");
+ if (pae) {
+ atomic_dec(&ifp->pend_8021x_cnt);
+ if (waitqueue_active(&ifp->pend_8021x_wait))
+ wake_up(&ifp->pend_8021x_wait);
+ }
brcmu_pkt_buf_free_skb(skb);
}
brcmf_fws_unlock(drvr, flags);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 277b37ae7126..7fa71f73cfe8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1093,8 +1093,11 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n ");
err = brcmf_fil_cmd_data_set(vif->ifp,
BRCMF_C_DISASSOC, NULL, 0);
- if (err)
+ if (err) {
brcmf_err("WLC_DISASSOC failed (%d)\n", err);
+ cfg80211_disconnected(vif->wdev.netdev, 0,
+ NULL, 0, GFP_KERNEL);
+ }
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
}
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 7365674366f4..010b252be584 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv)
if (!priv->join_status)
goto done;
- if (priv->join_status > CW1200_JOIN_STATUS_IBSS) {
- wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n",
- priv->join_status);
- BUG_ON(1);
- }
+ if (priv->join_status == CW1200_JOIN_STATUS_AP)
+ goto done;
cancel_work_sync(&priv->update_filtering_work);
cancel_work_sync(&priv->set_beacon_wakeup_period_work);
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c
index 5862c373d714..e824d4d4a18d 100644
--- a/drivers/net/wireless/cw1200/txrx.c
+++ b/drivers/net/wireless/cw1200/txrx.c
@@ -1165,7 +1165,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
if (cw1200_handle_action_rx(priv, skb))
return;
} else if (ieee80211_is_beacon(frame->frame_control) &&
- !arg->status &&
+ !arg->status && priv->vif &&
!memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid,
ETH_ALEN)) {
const u8 *tim_ie;
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index ac074731335a..e5090309824e 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
- memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
+ memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
data->flags = 1; /* has quality information */
- memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
+ memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
sizeof(struct iw_quality) * data->length);
kfree(addr);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index b9b2bb51e605..f2ed62e37340 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -4460,12 +4460,12 @@ il4965_irq_tasklet(struct il_priv *il)
* is killed. Hence update the killswitch state here. The
* rfkill handler will care about restarting if needed.
*/
- if (!test_bit(S_ALIVE, &il->status)) {
- if (hw_rf_kill)
- set_bit(S_RFKILL, &il->status);
- else
- clear_bit(S_RFKILL, &il->status);
+ if (hw_rf_kill) {
+ set_bit(S_RFKILL, &il->status);
+ } else {
+ clear_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
+ il_force_reset(il, true);
}
handled |= CSR_INT_BIT_RF_KILL;
@@ -5334,6 +5334,9 @@ il4965_alive_start(struct il_priv *il)
il->active_rate = RATES_MASK;
+ il_power_update_mode(il, true);
+ D_INFO("Updated power mode\n");
+
if (il_is_associated(il)) {
struct il_rxon_cmd *active_rxon =
(struct il_rxon_cmd *)&il->active;
@@ -5364,9 +5367,6 @@ il4965_alive_start(struct il_priv *il)
D_INFO("ALIVE processing complete.\n");
wake_up(&il->wait_command_queue);
- il_power_update_mode(il, true);
- D_INFO("Updated power mode\n");
-
return;
restart:
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 3195aad440dd..b03e22ef5462 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external)
return 0;
}
+EXPORT_SYMBOL(il_force_reset);
int
il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 822f1a00efbb..319387263e12 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1068,7 +1068,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ return;
+
+ if (ctx->vif)
ieee80211_chswitch_done(ctx->vif, is_success);
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 3952ddf2ddb2..1531a4fc0960 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -758,7 +758,7 @@ int iwl_alive_start(struct iwl_priv *priv)
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
- } else {
+ } else if (priv->lib->bt_params) {
/*
* default is 2-wire BT coexexistence support
*/
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 7e5e5c2f9f87..83da884cf303 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -134,7 +134,7 @@ struct wowlan_key_data {
struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
struct iwl_wowlan_tkip_params_cmd *tkip;
bool error, use_rsc_tsc, use_tkip;
- int gtk_key_idx;
+ int wep_key_idx;
};
static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
@@ -188,8 +188,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
wkc.wep_key.key_offset = 0;
} else {
/* others start at 1 */
- data->gtk_key_idx++;
- wkc.wep_key.key_offset = data->gtk_key_idx;
+ data->wep_key_idx++;
+ wkc.wep_key.key_offset = data->wep_key_idx;
}
ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC,
@@ -316,8 +316,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
mvm->ptk_ivlen = key->iv_len;
mvm->ptk_icvlen = key->icv_len;
} else {
- data->gtk_key_idx++;
- key->hw_key_idx = data->gtk_key_idx;
+ /*
+ * firmware only supports TSC/RSC for a single key,
+ * so if there are multiple keep overwriting them
+ * with new ones -- this relies on mac80211 doing
+ * list_add_tail().
+ */
+ key->hw_key_idx = 1;
mvm->gtk_ivlen = key->iv_len;
mvm->gtk_icvlen = key->icv_len;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index e56ed2a84888..c24a744910ac 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -988,7 +988,11 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
char buf[100];
- if (!dbgfs_dir)
+ /*
+ * Check if debugfs directory already exist before creating it.
+ * This may happen when, for example, resetting hw or suspend-resume
+ */
+ if (!dbgfs_dir || mvmvif->dbgfs_dir)
return;
mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index b60d14151721..365095a0c3b3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -69,7 +69,6 @@
/* Scan Commands, Responses, Notifications */
/* Masks for iwl_scan_channel.type flags */
-#define SCAN_CHANNEL_TYPE_PASSIVE 0
#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0)
#define SCAN_CHANNEL_NARROW_BAND BIT(22)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index e08683b20531..f19baf0dea6b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -257,7 +257,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (ret)
return ret;
- return ieee80211_register_hw(mvm->hw);
+ ret = ieee80211_register_hw(mvm->hw);
+ if (ret)
+ iwl_mvm_leds_exit(mvm);
+
+ return ret;
}
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
@@ -385,6 +389,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_wake_queues(mvm->hw);
mvm->vif_count = 0;
+ mvm->rx_ba_sessions = 0;
}
static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
@@ -507,6 +512,27 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_unlock;
/*
+ * TODO: remove this temporary code.
+ * Currently MVM FW supports power management only on single MAC.
+ * If new interface added, disable PM on existing interface.
+ * P2P device is a special case, since it is handled by FW similary to
+ * scan. If P2P deviced is added, PM remains enabled on existing
+ * interface.
+ * Note: the method below does not count the new interface being added
+ * at this moment.
+ */
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ mvm->vif_count++;
+ if (mvm->vif_count > 1) {
+ IWL_DEBUG_MAC80211(mvm,
+ "Disable power on existing interfaces\n");
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_pm_disable_iterator, mvm);
+ }
+
+ /*
* The AP binding flow can be done only after the beacon
* template is configured (which happens only in the mac80211
* start_ap() flow), and adding the broadcast station can happen
@@ -529,27 +555,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_unlock;
}
- /*
- * TODO: remove this temporary code.
- * Currently MVM FW supports power management only on single MAC.
- * If new interface added, disable PM on existing interface.
- * P2P device is a special case, since it is handled by FW similary to
- * scan. If P2P deviced is added, PM remains enabled on existing
- * interface.
- * Note: the method below does not count the new interface being added
- * at this moment.
- */
- if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
- mvm->vif_count++;
- if (mvm->vif_count > 1) {
- IWL_DEBUG_MAC80211(mvm,
- "Disable power on existing interfaces\n");
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_pm_disable_iterator, mvm);
- }
-
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret)
goto out_release;
@@ -1006,6 +1011,21 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
+ /*
+ * Firmware bug - it'll crash if the beacon interval is less
+ * than 16. We can't avoid connecting at all, so refuse the
+ * station state change, this will cause mac80211 to abandon
+ * attempts to connect to this AP, and eventually wpa_s will
+ * blacklist the AP...
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ vif->bss_conf.beacon_int < 16) {
+ IWL_ERR(mvm,
+ "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
+ sta->addr, vif->bss_conf.beacon_int);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
ret = iwl_mvm_add_sta(mvm, vif, sta);
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_AUTH) {
@@ -1038,6 +1058,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else {
ret = -EIO;
}
+ out_unlock:
mutex_unlock(&mvm->mutex);
return ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d40d7db185d6..420e82d379d9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -419,6 +419,7 @@ struct iwl_mvm {
struct work_struct sta_drained_wk;
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
atomic_t pending_frames[IWL_MVM_STATION_COUNT];
+ u8 rx_ba_sessions;
/* configured by mac80211 */
u32 rts_threshold;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 2157b0f8ced5..acdff6b67e04 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -137,8 +137,8 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
{
int fw_idx, req_idx;
- fw_idx = 0;
- for (req_idx = req->n_ssids - 1; req_idx > 0; req_idx--) {
+ for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0;
+ req_idx--, fw_idx++) {
cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
memcpy(cmd->direct_scan[fw_idx].ssid,
@@ -153,7 +153,9 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
* just to notify that this scan is active and not passive.
* In order to notify the FW of the number of SSIDs we wish to scan (including
* the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first).
+ * one for each SSID, and set the active bit (first). The first SSID is already
+ * included in the probe template, so we need to set only req->n_ssids - 1 bits
+ * in addition to the first bit.
*/
static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
{
@@ -176,19 +178,12 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
(cmd->data + le16_to_cpu(cmd->tx_cmd.len));
int i;
- __le32 chan_type_value;
-
- if (req->n_ssids > 0)
- chan_type_value = cpu_to_le32(BIT(req->n_ssids + 1) - 1);
- else
- chan_type_value = SCAN_CHANNEL_TYPE_PASSIVE;
for (i = 0; i < cmd->channel_count; i++) {
chan->channel = cpu_to_le16(req->channels[i]->hw_value);
+ chan->type = cpu_to_le32(BIT(req->n_ssids) - 1);
if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- chan->type = SCAN_CHANNEL_TYPE_PASSIVE;
- else
- chan->type = chan_type_value;
+ chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
chan->active_dwell = cpu_to_le16(active_dwell);
chan->passive_dwell = cpu_to_le16(passive_dwell);
chan->iteration_count = cpu_to_le16(1);
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 62fe5209093b..563f559b902d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -608,6 +608,8 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta)
return ret;
}
+#define IWL_MAX_RX_BA_SESSIONS 16
+
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
@@ -618,11 +620,20 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lockdep_assert_held(&mvm->mutex);
+ if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
+ IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
+ return -ENOSPC;
+ }
+
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
cmd.sta_id = mvm_sta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
- cmd.add_immediate_ba_tid = (u8) tid;
- cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ if (start) {
+ cmd.add_immediate_ba_tid = (u8) tid;
+ cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ } else {
+ cmd.remove_immediate_ba_tid = (u8) tid;
+ }
cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
STA_MODIFY_REMOVE_BA_TID;
@@ -648,6 +659,14 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
+ if (!ret) {
+ if (start)
+ mvm->rx_ba_sessions++;
+ else if (mvm->rx_ba_sessions > 0)
+ /* check that restart flow didn't zero the counter */
+ mvm->rx_ba_sessions--;
+ }
+
return ret;
}
@@ -896,6 +915,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
u16 txq_id;
+ enum iwl_mvm_agg_state old_state;
/*
* First set the agg state to OFF to avoid calling
@@ -905,13 +925,17 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
txq_id = tid_data->txq_id;
IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
mvmsta->sta_id, tid, txq_id, tid_data->state);
+ old_state = tid_data->state;
tid_data->state = IWL_AGG_OFF;
spin_unlock_bh(&mvmsta->lock);
- if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
- IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+ if (old_state >= IWL_AGG_ON) {
+ if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
+ IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+
+ iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
+ }
- iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
mvm->queue_to_mac80211[tid_data->txq_id] =
IWL_INVALID_MAC80211_QUEUE;
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index ad9bbca99213..7fd6fbfbc1b3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -138,6 +138,20 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
schedule_work(&mvm->roc_done_wk);
}
+static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const char *errmsg)
+{
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return false;
+ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
+ return false;
+ if (errmsg)
+ IWL_ERR(mvm, "%s\n", errmsg);
+ ieee80211_connection_loss(vif);
+ return true;
+}
+
/*
* Handles a FW notification for an event that is known to the driver.
*
@@ -163,8 +177,13 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
* P2P Device discoveribility, while there are other higher priority
* events in the system).
*/
- WARN_ONCE(!le32_to_cpu(notif->status),
- "Failed to schedule time event\n");
+ if (WARN_ONCE(!le32_to_cpu(notif->status),
+ "Failed to schedule time event\n")) {
+ if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
+ iwl_mvm_te_clear_data(mvm, te_data);
+ return;
+ }
+ }
if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) {
IWL_DEBUG_TE(mvm,
@@ -180,14 +199,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
* By now, we should have finished association
* and know the dtim period.
*/
- if (te_data->vif->type == NL80211_IFTYPE_STATION &&
- (!te_data->vif->bss_conf.assoc ||
- !te_data->vif->bss_conf.dtim_period)) {
- IWL_ERR(mvm,
- "No assocation and the time event is over already...\n");
- ieee80211_connection_loss(te_data->vif);
- }
-
+ iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+ "No assocation and the time event is over already...");
iwl_mvm_te_clear_data(mvm, te_data);
} else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) {
te_data->running = true;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 81f3ea5b09a4..ff13458efc27 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -130,6 +130,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 826c15602c46..390e2f058aff 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -670,6 +670,11 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return err;
}
+ /* Reset the entire device */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ usleep_range(10, 15);
+
iwl_pcie_apm_init(trans);
/* From now on, the op_mode will be kept updated about RF kill state */
@@ -1497,16 +1502,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
spin_lock_init(&trans_pcie->reg_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
- /* W/A - seems to solve weird behavior. We need to remove this if we
- * don't want to stay in L1 all the time. This wastes a lot of power */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
if (pci_enable_device(pdev)) {
err = -ENODEV;
goto out_no_pci;
}
+ /* W/A - seems to solve weird behavior. We need to remove this if we
+ * don't want to stay in L1 all the time. This wastes a lot of power */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index ef5fa890a286..89459db4c53b 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1716,9 +1716,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int ret;
- if (priv->bss_mode != NL80211_IFTYPE_STATION) {
+ if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
wiphy_err(wiphy,
- "%s: reject infra assoc request in non-STA mode\n",
+ "%s: reject infra assoc request in non-STA role\n",
dev->name);
return -EINVAL;
}
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 988552dece75..5178c4630d89 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -415,7 +415,8 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
u32 k = 0;
struct mwifiex_adapter *adapter = priv->adapter;
- if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+ if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+ priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
switch (adapter->config_bands) {
case BAND_B:
dev_dbg(adapter->dev, "info: infra band=%d "
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index caaf4bd56b30..2cf8b964e966 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -693,7 +693,7 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
if (!ret) {
dev_notice(adapter->dev,
"WLAN FW already running! Skip FW dnld\n");
- goto done;
+ return 0;
}
poll_num = MAX_FIRMWARE_POLL_TRIES;
@@ -719,14 +719,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
poll_fw:
/* Check if the firmware is downloaded successfully or not */
ret = adapter->if_ops.check_fw_status(adapter, poll_num);
- if (ret) {
+ if (ret)
dev_err(adapter->dev, "FW failed to be active in time\n");
- return -1;
- }
-done:
- /* re-enable host interrupt for mwifiex after fw dnld is successful */
- if (adapter->if_ops.enable_int)
- adapter->if_ops.enable_int(adapter);
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 1c8a771e8e81..12e778159ec5 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1291,8 +1291,10 @@ int mwifiex_associate(struct mwifiex_private *priv,
{
u8 current_bssid[ETH_ALEN];
- /* Return error if the adapter or table entry is not marked as infra */
- if ((priv->bss_mode != NL80211_IFTYPE_STATION) ||
+ /* Return error if the adapter is not STA role or table entry
+ * is not marked as infra.
+ */
+ if ((GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) ||
(bss_desc->bss_mode != NL80211_IFTYPE_STATION))
return -1;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index e15ab72fb03d..1753431de361 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -427,6 +427,10 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
"Cal data request_firmware() failed\n");
}
+ /* enable host interrupt after fw dnld is successful */
+ if (adapter->if_ops.enable_int)
+ adapter->if_ops.enable_int(adapter);
+
adapter->init_wait_q_woken = false;
ret = mwifiex_init_fw(adapter);
if (ret == -1) {
@@ -478,6 +482,8 @@ err_add_intf:
mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
rtnl_unlock();
err_init_fw:
+ if (adapter->if_ops.disable_int)
+ adapter->if_ops.disable_int(adapter);
pr_debug("info: %s: unregister device\n", __func__);
adapter->if_ops.unregister_dev(adapter);
done:
@@ -855,7 +861,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
INIT_WORK(&adapter->main_work, mwifiex_main_work_queue);
/* Register the device. Fill up the private data structure with relevant
- information from the card and request for the required IRQ. */
+ information from the card. */
if (adapter->if_ops.register_dev(adapter)) {
pr_err("%s: failed to register mwifiex device\n", __func__);
goto err_registerdev;
@@ -919,6 +925,11 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
if (!adapter)
goto exit_remove;
+ /* We can no longer handle interrupts once we start doing the teardown
+ * below. */
+ if (adapter->if_ops.disable_int)
+ adapter->if_ops.disable_int(adapter);
+
adapter->surprise_removed = true;
/* Stop data */
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 3da73d36acdf..253e0bd38e25 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -601,6 +601,7 @@ struct mwifiex_if_ops {
int (*register_dev) (struct mwifiex_adapter *);
void (*unregister_dev) (struct mwifiex_adapter *);
int (*enable_int) (struct mwifiex_adapter *);
+ void (*disable_int) (struct mwifiex_adapter *);
int (*process_int_status) (struct mwifiex_adapter *);
int (*host_to_card) (struct mwifiex_adapter *, u8, struct sk_buff *,
struct mwifiex_tx_param *);
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 5ee5ed02eccd..09185c963248 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -51,6 +51,7 @@ static struct mwifiex_if_ops sdio_ops;
static struct semaphore add_remove_card_sem;
static int mwifiex_sdio_resume(struct device *dev);
+static void mwifiex_sdio_interrupt(struct sdio_func *func);
/*
* SDIO probe.
@@ -296,6 +297,15 @@ static struct sdio_driver mwifiex_sdio = {
}
};
+/* Write data into SDIO card register. Caller claims SDIO device. */
+static int
+mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data)
+{
+ int ret = -1;
+ sdio_writeb(func, data, reg, &ret);
+ return ret;
+}
+
/*
* This function writes data into SDIO card register.
*/
@@ -303,10 +313,10 @@ static int
mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data)
{
struct sdio_mmc_card *card = adapter->card;
- int ret = -1;
+ int ret;
sdio_claim_host(card->func);
- sdio_writeb(card->func, data, reg, &ret);
+ ret = mwifiex_write_reg_locked(card->func, reg, data);
sdio_release_host(card->func);
return ret;
@@ -685,23 +695,15 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
* The host interrupt mask is read, the disable bit is reset and
* written back to the card host interrupt mask register.
*/
-static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
+static void mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
{
- u8 host_int_mask, host_int_disable = HOST_INT_DISABLE;
-
- /* Read back the host_int_mask register */
- if (mwifiex_read_reg(adapter, HOST_INT_MASK_REG, &host_int_mask))
- return -1;
-
- /* Update with the mask and write back to the register */
- host_int_mask &= ~host_int_disable;
-
- if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, host_int_mask)) {
- dev_err(adapter->dev, "disable host interrupt failed\n");
- return -1;
- }
+ struct sdio_mmc_card *card = adapter->card;
+ struct sdio_func *func = card->func;
- return 0;
+ sdio_claim_host(func);
+ mwifiex_write_reg_locked(func, HOST_INT_MASK_REG, 0);
+ sdio_release_irq(func);
+ sdio_release_host(func);
}
/*
@@ -713,14 +715,29 @@ static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
+ struct sdio_func *func = card->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ /* Request the SDIO IRQ */
+ ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
+ if (ret) {
+ dev_err(adapter->dev, "claim irq failed: ret=%d\n", ret);
+ goto out;
+ }
/* Simply write the mask to the register */
- if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG,
- card->reg->host_int_enable)) {
+ ret = mwifiex_write_reg_locked(func, HOST_INT_MASK_REG,
+ card->reg->host_int_enable);
+ if (ret) {
dev_err(adapter->dev, "enable host interrupt failed\n");
- return -1;
+ sdio_release_irq(func);
}
- return 0;
+
+out:
+ sdio_release_host(func);
+ return ret;
}
/*
@@ -997,9 +1014,6 @@ mwifiex_sdio_interrupt(struct sdio_func *func)
}
adapter = card->adapter;
- if (adapter->surprise_removed)
- return;
-
if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
adapter->ps_state = PS_STATE_AWAKE;
@@ -1625,8 +1639,8 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
/* Allocate buffer and copy payload */
blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
buf_block_len = (pkt_len + blk_size - 1) / blk_size;
- *(u16 *) &payload[0] = (u16) pkt_len;
- *(u16 *) &payload[2] = type;
+ *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
+ *(__le16 *)&payload[2] = cpu_to_le16(type);
/*
* This is SDIO specific header
@@ -1728,9 +1742,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
struct sdio_mmc_card *card = adapter->card;
if (adapter->card) {
- /* Release the SDIO IRQ */
sdio_claim_host(card->func);
- sdio_release_irq(card->func);
sdio_disable_func(card->func);
sdio_release_host(card->func);
sdio_set_drvdata(card->func, NULL);
@@ -1744,7 +1756,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
*/
static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
{
- int ret = 0;
+ int ret;
struct sdio_mmc_card *card = adapter->card;
struct sdio_func *func = card->func;
@@ -1753,22 +1765,14 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
sdio_claim_host(func);
- /* Request the SDIO IRQ */
- ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
- if (ret) {
- pr_err("claim irq failed: ret=%d\n", ret);
- goto disable_func;
- }
-
/* Set block size */
ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
+ sdio_release_host(func);
if (ret) {
pr_err("cannot set SDIO block size\n");
- ret = -1;
- goto release_irq;
+ return ret;
}
- sdio_release_host(func);
sdio_set_drvdata(func, card);
adapter->dev = &func->dev;
@@ -1776,15 +1780,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
strcpy(adapter->fw_name, card->firmware);
return 0;
-
-release_irq:
- sdio_release_irq(func);
-disable_func:
- sdio_disable_func(func);
- sdio_release_host(func);
- adapter->card = NULL;
-
- return -1;
}
/*
@@ -1813,9 +1808,6 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
*/
mwifiex_read_reg(adapter, HOST_INTSTATUS_REG, &sdio_ireg);
- /* Disable host interrupt mask register for SDIO */
- mwifiex_sdio_disable_host_int(adapter);
-
/* Get SDIO ioport */
mwifiex_init_sdio_ioport(adapter);
@@ -1957,6 +1949,7 @@ static struct mwifiex_if_ops sdio_ops = {
.register_dev = mwifiex_register_dev,
.unregister_dev = mwifiex_unregister_dev,
.enable_int = mwifiex_sdio_enable_host_int,
+ .disable_int = mwifiex_sdio_disable_host_int,
.process_int_status = mwifiex_process_int_status,
.host_to_card = mwifiex_sdio_host_to_card,
.wakeup = mwifiex_pm_wakeup_card,
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 6d51dfdd8251..532ae0ac4dfb 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -92,9 +92,6 @@
/* Host Control Registers : Download host interrupt mask */
#define DN_LD_HOST_INT_MASK (0x2U)
-/* Disable Host interrupt mask */
-#define HOST_INT_DISABLE 0xff
-
/* Host Control Registers : Host interrupt status */
#define HOST_INTSTATUS_REG 0x03
/* Host Control Registers : Upload host interrupt status */
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 206c3e038072..8af97abf7108 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -257,10 +257,10 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
goto done;
}
- if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+ if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+ priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
u8 config_bands;
- /* Infra mode */
ret = mwifiex_deauthenticate(priv, NULL);
if (ret)
goto done;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 9b915d3a44be..3e60a31582f8 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -1,6 +1,6 @@
menuconfig RT2X00
tristate "Ralink driver support"
- depends on MAC80211
+ depends on MAC80211 && HAS_DMA
---help---
This will enable the support for the Ralink drivers,
developed in the rt2x00 project <http://rt2x00.serialmonkey.com>.
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 6c0a91ff963c..aa95c6cf3545 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -936,13 +936,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
-void rt2x00queue_pause_queue(struct data_queue *queue)
+void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
{
- if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
- !test_bit(QUEUE_STARTED, &queue->flags) ||
- test_and_set_bit(QUEUE_PAUSED, &queue->flags))
- return;
-
switch (queue->qid) {
case QID_AC_VO:
case QID_AC_VI:
@@ -958,6 +953,15 @@ void rt2x00queue_pause_queue(struct data_queue *queue)
break;
}
}
+void rt2x00queue_pause_queue(struct data_queue *queue)
+{
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ !test_bit(QUEUE_STARTED, &queue->flags) ||
+ test_and_set_bit(QUEUE_PAUSED, &queue->flags))
+ return;
+
+ rt2x00queue_pause_queue_nocheck(queue);
+}
EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
void rt2x00queue_unpause_queue(struct data_queue *queue)
@@ -1019,7 +1023,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue)
return;
}
- rt2x00queue_pause_queue(queue);
+ rt2x00queue_pause_queue_nocheck(queue);
queue->rt2x00dev->ops->lib->stop_queue(queue);
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 7253de3d8c66..c2ffce7a907c 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -1,27 +1,20 @@
-config RTLWIFI
- tristate "Realtek wireless card support"
- depends on MAC80211
- select FW_LOADER
- ---help---
- This is common code for RTL8192CE/RTL8192CU/RTL8192SE/RTL8723AE
- drivers. This module does nothing by itself - the various front-end
- drivers need to be enabled to support any desired devices.
-
- If you choose to build as a module, it'll be called rtlwifi.
-
-config RTLWIFI_DEBUG
- bool "Debugging output for rtlwifi driver family"
- depends on RTLWIFI
+menuconfig RTL_CARDS
+ tristate "Realtek rtlwifi family of devices"
+ depends on MAC80211 && (PCI || USB)
default y
---help---
- To use the module option that sets the dynamic-debugging level for,
- the front-end driver, this parameter must be "Y". For memory-limited
- systems, choose "N". If in doubt, choose "Y".
+ This option will enable support for the Realtek mac80211-based
+ wireless drivers. Drivers rtl8192ce, rtl8192cu, rtl8192se, rtl8192de,
+ rtl8723eu, and rtl8188eu share some common code.
+
+if RTL_CARDS
config RTL8192CE
tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
select RTL8192C_COMMON
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
wireless network adapters.
@@ -30,7 +23,9 @@ config RTL8192CE
config RTL8192SE
tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe
wireless network adapters.
@@ -39,7 +34,9 @@ config RTL8192SE
config RTL8192DE
tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe
wireless network adapters.
@@ -48,7 +45,9 @@ config RTL8192DE
config RTL8723AE
tristate "Realtek RTL8723AE PCIe Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8723AE 802.11n PCIe
wireless network adapters.
@@ -57,7 +56,9 @@ config RTL8723AE
config RTL8188EE
tristate "Realtek RTL8188EE Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8188EE 802.11n PCIe
wireless network adapters.
@@ -66,7 +67,9 @@ config RTL8188EE
config RTL8192CU
tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
- depends on RTLWIFI && USB
+ depends on USB
+ select RTLWIFI
+ select RTLWIFI_USB
select RTL8192C_COMMON
---help---
This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
@@ -74,7 +77,28 @@ config RTL8192CU
If you choose to build it as a module, it will be called rtl8192cu
+config RTLWIFI
+ tristate
+ select FW_LOADER
+
+config RTLWIFI_PCI
+ tristate
+
+config RTLWIFI_USB
+ tristate
+
+config RTLWIFI_DEBUG
+ bool "Debugging output for rtlwifi driver family"
+ depends on RTLWIFI
+ default y
+ ---help---
+ To use the module option that sets the dynamic-debugging level for,
+ the front-end driver, this parameter must be "Y". For memory-limited
+ systems, choose "N". If in doubt, choose "Y".
+
config RTL8192C_COMMON
tristate
depends on RTL8192CE || RTL8192CU
- default m
+ default y
+
+endif
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index ff02b874f8d8..d56f023a4b90 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -12,13 +12,11 @@ rtlwifi-objs := \
rtl8192c_common-objs += \
-ifneq ($(CONFIG_PCI),)
-rtlwifi-objs += pci.o
-endif
+obj-$(CONFIG_RTLWIFI_PCI) += rtl_pci.o
+rtl_pci-objs := pci.o
-ifneq ($(CONFIG_USB),)
-rtlwifi-objs += usb.o
-endif
+obj-$(CONFIG_RTLWIFI_USB) += rtl_usb.o
+rtl_usb-objs := usb.o
obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
obj-$(CONFIG_RTL8192CE) += rtl8192ce/
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 9d558ac77b0c..7651f5acc14b 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -172,6 +172,7 @@ u8 rtl_tid_to_ac(u8 tid)
{
return tid_to_ac[tid];
}
+EXPORT_SYMBOL_GPL(rtl_tid_to_ac);
static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
struct ieee80211_sta_ht_cap *ht_cap)
@@ -406,6 +407,7 @@ void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
cancel_delayed_work(&rtlpriv->works.fwevt_wq);
}
+EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
void rtl_init_rfkill(struct ieee80211_hw *hw)
{
@@ -439,6 +441,7 @@ void rtl_deinit_rfkill(struct ieee80211_hw *hw)
{
wiphy_rfkill_stop_polling(hw->wiphy);
}
+EXPORT_SYMBOL_GPL(rtl_deinit_rfkill);
int rtl_init_core(struct ieee80211_hw *hw)
{
@@ -489,10 +492,12 @@ int rtl_init_core(struct ieee80211_hw *hw)
return 0;
}
+EXPORT_SYMBOL_GPL(rtl_init_core);
void rtl_deinit_core(struct ieee80211_hw *hw)
{
}
+EXPORT_SYMBOL_GPL(rtl_deinit_core);
void rtl_init_rx_config(struct ieee80211_hw *hw)
{
@@ -501,6 +506,7 @@ void rtl_init_rx_config(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
}
+EXPORT_SYMBOL_GPL(rtl_init_rx_config);
/*********************************************************
*
@@ -879,6 +885,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
return true;
}
+EXPORT_SYMBOL_GPL(rtl_tx_mgmt_proc);
void rtl_get_tcb_desc(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,
@@ -1052,6 +1059,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
return true;
}
+EXPORT_SYMBOL_GPL(rtl_action_proc);
/*should call before software enc*/
u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
@@ -1125,6 +1133,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
return false;
}
+EXPORT_SYMBOL_GPL(rtl_is_special_data);
/*********************************************************
*
@@ -1300,6 +1309,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
rtlpriv->link_info.bcn_rx_inperiod++;
}
+EXPORT_SYMBOL_GPL(rtl_beacon_statistic);
void rtl_watchdog_wq_callback(void *data)
{
@@ -1793,6 +1803,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
mac->vendor = vendor;
}
+EXPORT_SYMBOL_GPL(rtl_recognize_peer);
/*********************************************************
*
@@ -1849,6 +1860,7 @@ struct attribute_group rtl_attribute_group = {
.name = "rtlsysfs",
.attrs = rtl_sysfs_entries,
};
+EXPORT_SYMBOL_GPL(rtl_attribute_group);
MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
@@ -1856,7 +1868,8 @@ MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
-struct rtl_global_var global_var = {};
+struct rtl_global_var rtl_global_var = {};
+EXPORT_SYMBOL_GPL(rtl_global_var);
static int __init rtl_core_module_init(void)
{
@@ -1864,8 +1877,8 @@ static int __init rtl_core_module_init(void)
pr_err("Unable to register rtl_rc, use default RC !!\n");
/* init some global vars */
- INIT_LIST_HEAD(&global_var.glb_priv_list);
- spin_lock_init(&global_var.glb_list_lock);
+ INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
+ spin_lock_init(&rtl_global_var.glb_list_lock);
return 0;
}
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 8576bc34b032..0e5fe0902daf 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -147,7 +147,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
u8 rtl_tid_to_ac(u8 tid);
extern struct attribute_group rtl_attribute_group;
void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
-extern struct rtl_global_var global_var;
+extern struct rtl_global_var rtl_global_var;
int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
bool isht, u8 desc_rate, bool first_ampdu);
bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index ee84844be008..733b7ce7f0e2 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1330,3 +1330,4 @@ const struct ieee80211_ops rtl_ops = {
.rfkill_poll = rtl_op_rfkill_poll,
.flush = rtl_op_flush,
};
+EXPORT_SYMBOL_GPL(rtl_ops);
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/rtlwifi/debug.c
index 7d52d3d7769f..76e2086e137e 100644
--- a/drivers/net/wireless/rtlwifi/debug.c
+++ b/drivers/net/wireless/rtlwifi/debug.c
@@ -51,3 +51,4 @@ void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
/*Init Debug flag enable condition */
}
+EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init);
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 9e3894178e77..838a1ed3f194 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -229,6 +229,7 @@ void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
*pbuf = (u8) (value32 & 0xff);
}
+EXPORT_SYMBOL_GPL(read_efuse_byte);
void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
{
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index c97e9d327331..703f839af6ca 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -35,6 +35,13 @@
#include "efuse.h"
#include <linux/export.h>
#include <linux/kmemleak.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCI basic driver for rtlwifi");
static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
PCI_VENDOR_ID_INTEL,
@@ -1008,19 +1015,6 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
return;
}
-static void rtl_lps_change_work_callback(struct work_struct *work)
-{
- struct rtl_works *rtlworks =
- container_of(work, struct rtl_works, lps_change_work);
- struct ieee80211_hw *hw = rtlworks->hw;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->enter_ps)
- rtl_lps_enter(hw);
- else
- rtl_lps_leave(hw);
-}
-
static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1899,7 +1893,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
rtlpriv->rtlhal.interface = INTF_PCI;
rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
rtlpriv->intf_ops = &rtl_pci_ops;
- rtlpriv->glb_var = &global_var;
+ rtlpriv->glb_var = &rtl_global_var;
/*
*init dbgp flags before all
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 884bceae38a9..298b615964e8 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -269,6 +269,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags);
}
+EXPORT_SYMBOL_GPL(rtl_ips_nic_on);
/*for FW LPS*/
@@ -518,6 +519,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
"u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
}
}
+EXPORT_SYMBOL_GPL(rtl_swlps_beacon);
void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
{
@@ -611,6 +613,19 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40));
}
+void rtl_lps_change_work_callback(struct work_struct *work)
+{
+ struct rtl_works *rtlworks =
+ container_of(work, struct rtl_works, lps_change_work);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->enter_ps)
+ rtl_lps_enter(hw);
+ else
+ rtl_lps_leave(hw);
+}
+EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback);
void rtl_swlps_wq_callback(void *data)
{
@@ -922,3 +937,4 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
else
rtl_p2p_noa_ie(hw, data, len - FCS_LEN);
}
+EXPORT_SYMBOL_GPL(rtl_p2p_info);
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index 4d682b753f50..88bd76ea88f7 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -49,5 +49,6 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len);
+void rtl_lps_change_work_callback(struct work_struct *work);
#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index a3532e077871..e56778cac9bf 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -32,6 +32,13 @@
#include "ps.h"
#include "rtl8192c/fw_common.h"
#include <linux/export.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB basic driver for rtlwifi");
#define REALTEK_USB_VENQT_READ 0xC0
#define REALTEK_USB_VENQT_WRITE 0x40
@@ -1070,6 +1077,8 @@ int rtl_usb_probe(struct usb_interface *intf,
spin_lock_init(&rtlpriv->locks.usb_lock);
INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
rtl_fill_h2c_cmd_work_callback);
+ INIT_WORK(&rtlpriv->works.lps_change_work,
+ rtl_lps_change_work_callback);
rtlpriv->usb_data_index = 0;
init_completion(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 4941f201d6c8..b8ba1f925e75 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
goto exit;
err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
- USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
+ USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
if (err < 0)
goto exit;
+ memcpy(&ret, buf, sizeof(ret));
+
if (ret & 0x80) {
err = -EIO;
goto exit;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 6bb7cf2de556..b10ba00cc3e6 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -392,6 +392,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
mem = (unsigned long)
dt_alloc(size + 4, __alignof__(struct device_node));
+ memset((void *)mem, 0, size);
+
((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
pr_debug(" unflattening %lx...\n", mem);
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index e79e006eb9ab..9ee04b4b68bf 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -811,18 +811,28 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
return pcidev->irq;
}
-static struct iosapic_info *first_isi = NULL;
+static struct iosapic_info *iosapic_list;
#ifdef CONFIG_64BIT
-int iosapic_serial_irq(int num)
+int iosapic_serial_irq(struct parisc_device *dev)
{
- struct iosapic_info *isi = first_isi;
- struct irt_entry *irte = NULL; /* only used if PAT PDC */
+ struct iosapic_info *isi;
+ struct irt_entry *irte;
struct vector_info *vi;
- int isi_line; /* line used by device */
+ int cnt;
+ int intin;
+
+ intin = (dev->mod_info >> 24) & 15;
/* lookup IRT entry for isi/slot/pin set */
- irte = &irt_cell[num];
+ for (cnt = 0; cnt < irt_num_entry; cnt++) {
+ irte = &irt_cell[cnt];
+ if (COMPARE_IRTE_ADDR(irte, dev->mod0) &&
+ irte->dest_iosapic_intin == intin)
+ break;
+ }
+ if (cnt >= irt_num_entry)
+ return 0; /* no irq found, force polling */
DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n",
irte,
@@ -834,11 +844,17 @@ int iosapic_serial_irq(int num)
irte->src_seg_id,
irte->dest_iosapic_intin,
(u32) irte->dest_iosapic_addr);
- isi_line = irte->dest_iosapic_intin;
+
+ /* search for iosapic */
+ for (isi = iosapic_list; isi; isi = isi->isi_next)
+ if (isi->isi_hpa == dev->mod0)
+ break;
+ if (!isi)
+ return 0; /* no iosapic found, force polling */
/* get vector info for this input line */
- vi = isi->isi_vector + isi_line;
- DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi);
+ vi = isi->isi_vector + intin;
+ DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", iosapic_intin, vi);
/* If this IRQ line has already been setup, skip it */
if (vi->irte)
@@ -941,8 +957,8 @@ void *iosapic_register(unsigned long hpa)
vip->irqline = (unsigned char) cnt;
vip->iosapic = isi;
}
- if (!first_isi)
- first_isi = isi;
+ isi->isi_next = iosapic_list;
+ iosapic_list = isi;
return isi;
}
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 13a633b1612e..7bf3926aecc0 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -86,10 +86,6 @@ struct mvebu_sw_pci_bridge {
u16 secondary_status;
u16 membase;
u16 memlimit;
- u16 prefmembase;
- u16 prefmemlimit;
- u32 prefbaseupper;
- u32 preflimitupper;
u16 iobaseupper;
u16 iolimitupper;
u8 cappointer;
@@ -419,15 +415,7 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
break;
case PCI_PREF_MEMORY_BASE:
- *value = (bridge->prefmemlimit << 16 | bridge->prefmembase);
- break;
-
- case PCI_PREF_BASE_UPPER32:
- *value = bridge->prefbaseupper;
- break;
-
- case PCI_PREF_LIMIT_UPPER32:
- *value = bridge->preflimitupper;
+ *value = 0;
break;
case PCI_IO_BASE_UPPER16:
@@ -501,19 +489,6 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
mvebu_pcie_handle_membase_change(port);
break;
- case PCI_PREF_MEMORY_BASE:
- bridge->prefmembase = value & 0xffff;
- bridge->prefmemlimit = value >> 16;
- break;
-
- case PCI_PREF_BASE_UPPER32:
- bridge->prefbaseupper = value;
- break;
-
- case PCI_PREF_LIMIT_UPPER32:
- bridge->preflimitupper = value;
- break;
-
case PCI_IO_BASE_UPPER16:
bridge->iobaseupper = value & 0xffff;
bridge->iolimitupper = value >> 16;
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index bb7ebb22db01..d85009de713d 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -3,16 +3,13 @@
#
menuconfig HOTPLUG_PCI
- tristate "Support for PCI Hotplug"
+ bool "Support for PCI Hotplug"
depends on PCI && SYSFS
---help---
Say Y here if you have a motherboard with a PCI Hotplug controller.
This allows you to add and remove PCI cards while the machine is
powered up and running.
- To compile this driver as a module, choose M here: the
- module will be called pci_hotplug.
-
When in doubt, say N.
if HOTPLUG_PCI
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index aac7a40e4a4a..0e0d0f7f63fd 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -92,7 +92,14 @@ int pciehp_unconfigure_device(struct slot *p_slot)
if (ret)
presence = 0;
- list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) {
+ /*
+ * Stopping an SR-IOV PF device removes all the associated VFs,
+ * which will update the bus->devices list and confuse the
+ * iterator. Therefore, iterate in reverse so we remove the VFs
+ * first, then the PF. We do the same in pci_stop_bus_device().
+ */
+ list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
+ bus_list) {
pci_dev_get(dev);
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index dbdc5f7e2b29..01e264fb50e0 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -317,13 +317,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
/* ACPI bus type */
static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
{
- struct pci_dev * pci_dev;
- u64 addr;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ bool is_bridge;
+ u64 addr;
- pci_dev = to_pci_dev(dev);
+ /*
+ * pci_is_bridge() is not suitable here, because pci_dev->subordinate
+ * is set only after acpi_pci_find_device() has been called for the
+ * given device.
+ */
+ is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
+ || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
/* Please ref to ACPI spec for the syntax of _ADR */
addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
- *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr);
+ *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge);
if (!*handle)
return -ENODEV;
return 0;
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 569f82fc9e22..3b94cfcfa03b 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -14,15 +14,12 @@ config PCIEPORTBUS
# Include service Kconfig here
#
config HOTPLUG_PCI_PCIE
- tristate "PCI Express Hotplug driver"
+ bool "PCI Express Hotplug driver"
depends on HOTPLUG_PCI && PCIEPORTBUS
help
Say Y here if you have a motherboard that supports PCI Express Native
Hotplug
- To compile this driver as a module, choose M here: the
- module will be called pciehp.
-
When in doubt, say N.
source "drivers/pci/pcie/aer/Kconfig"
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index d254e2379533..64a7de22d9af 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -300,6 +300,47 @@ static void assign_requested_resources_sorted(struct list_head *head,
}
}
+static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
+{
+ struct pci_dev_resource *fail_res;
+ unsigned long mask = 0;
+
+ /* check failed type */
+ list_for_each_entry(fail_res, fail_head, list)
+ mask |= fail_res->flags;
+
+ /*
+ * one pref failed resource will set IORESOURCE_MEM,
+ * as we can allocate pref in non-pref range.
+ * Will release all assigned non-pref sibling resources
+ * according to that bit.
+ */
+ return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
+}
+
+static bool pci_need_to_release(unsigned long mask, struct resource *res)
+{
+ if (res->flags & IORESOURCE_IO)
+ return !!(mask & IORESOURCE_IO);
+
+ /* check pref at first */
+ if (res->flags & IORESOURCE_PREFETCH) {
+ if (mask & IORESOURCE_PREFETCH)
+ return true;
+ /* count pref if its parent is non-pref */
+ else if ((mask & IORESOURCE_MEM) &&
+ !(res->parent->flags & IORESOURCE_PREFETCH))
+ return true;
+ else
+ return false;
+ }
+
+ if (res->flags & IORESOURCE_MEM)
+ return !!(mask & IORESOURCE_MEM);
+
+ return false; /* should not get here */
+}
+
static void __assign_resources_sorted(struct list_head *head,
struct list_head *realloc_head,
struct list_head *fail_head)
@@ -312,11 +353,24 @@ static void __assign_resources_sorted(struct list_head *head,
* if could do that, could get out early.
* if could not do that, we still try to assign requested at first,
* then try to reassign add_size for some resources.
+ *
+ * Separate three resource type checking if we need to release
+ * assigned resource after requested + add_size try.
+ * 1. if there is io port assign fail, will release assigned
+ * io port.
+ * 2. if there is pref mmio assign fail, release assigned
+ * pref mmio.
+ * if assigned pref mmio's parent is non-pref mmio and there
+ * is non-pref mmio assign fail, will release that assigned
+ * pref mmio.
+ * 3. if there is non-pref mmio assign fail or pref mmio
+ * assigned fail, will release assigned non-pref mmio.
*/
LIST_HEAD(save_head);
LIST_HEAD(local_fail_head);
struct pci_dev_resource *save_res;
- struct pci_dev_resource *dev_res;
+ struct pci_dev_resource *dev_res, *tmp_res;
+ unsigned long fail_type;
/* Check if optional add_size is there */
if (!realloc_head || list_empty(realloc_head))
@@ -348,6 +402,19 @@ static void __assign_resources_sorted(struct list_head *head,
return;
}
+ /* check failed type */
+ fail_type = pci_fail_res_type_mask(&local_fail_head);
+ /* remove not need to be released assigned res from head list etc */
+ list_for_each_entry_safe(dev_res, tmp_res, head, list)
+ if (dev_res->res->parent &&
+ !pci_need_to_release(fail_type, dev_res->res)) {
+ /* remove it from realloc_head list */
+ remove_from_list(realloc_head, dev_res->res);
+ remove_from_list(&save_head, dev_res->res);
+ list_del(&dev_res->list);
+ kfree(dev_res);
+ }
+
free_list(&local_fail_head);
/* Release assigned resource */
list_for_each_entry(dev_res, head, list)
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
index c47fd1e5450b..94716c779800 100644
--- a/drivers/pinctrl/pinctrl-sunxi.c
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -278,6 +278,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct sunxi_pinctrl_group *g = &pctl->groups[group];
+ unsigned long flags;
u32 val, mask;
u16 strength;
u8 dlevel;
@@ -295,22 +296,35 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
* 3: 40mA
*/
dlevel = strength / 10 - 1;
+
+ spin_lock_irqsave(&pctl->lock, flags);
+
val = readl(pctl->membase + sunxi_dlevel_reg(g->pin));
mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin);
writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin),
pctl->membase + sunxi_dlevel_reg(g->pin));
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
break;
case PIN_CONFIG_BIAS_PULL_UP:
+ spin_lock_irqsave(&pctl->lock, flags);
+
val = readl(pctl->membase + sunxi_pull_reg(g->pin));
mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin),
pctl->membase + sunxi_pull_reg(g->pin));
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
+ spin_lock_irqsave(&pctl->lock, flags);
+
val = readl(pctl->membase + sunxi_pull_reg(g->pin));
mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin),
pctl->membase + sunxi_pull_reg(g->pin));
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
break;
default:
break;
@@ -360,11 +374,17 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
u8 config)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned long flags;
+ u32 val, mask;
+
+ spin_lock_irqsave(&pctl->lock, flags);
- u32 val = readl(pctl->membase + sunxi_mux_reg(pin));
- u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
+ val = readl(pctl->membase + sunxi_mux_reg(pin));
+ mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
writel((val & ~mask) | config << sunxi_mux_offset(pin),
pctl->membase + sunxi_mux_reg(pin));
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static int sunxi_pmx_enable(struct pinctrl_dev *pctldev,
@@ -464,8 +484,21 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
u32 reg = sunxi_data_reg(offset);
u8 index = sunxi_data_offset(offset);
+ unsigned long flags;
+ u32 regval;
+
+ spin_lock_irqsave(&pctl->lock, flags);
+
+ regval = readl(pctl->membase + reg);
- writel((value & DATA_PINS_MASK) << index, pctl->membase + reg);
+ if (value)
+ regval |= BIT(index);
+ else
+ regval &= ~(BIT(index));
+
+ writel(regval, pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
@@ -526,6 +559,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
u32 reg = sunxi_irq_cfg_reg(d->hwirq);
u8 index = sunxi_irq_cfg_offset(d->hwirq);
+ unsigned long flags;
+ u32 regval;
u8 mode;
switch (type) {
@@ -548,7 +583,13 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
return -EINVAL;
}
- writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg);
+ spin_lock_irqsave(&pctl->lock, flags);
+
+ regval = readl(pctl->membase + reg);
+ regval &= ~IRQ_CFG_IRQ_MASK;
+ writel(regval | (mode << index), pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
return 0;
}
@@ -560,14 +601,19 @@ static void sunxi_pinctrl_irq_mask_ack(struct irq_data *d)
u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq);
u32 status_reg = sunxi_irq_status_reg(d->hwirq);
u8 status_idx = sunxi_irq_status_offset(d->hwirq);
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&pctl->lock, flags);
+
/* Mask the IRQ */
val = readl(pctl->membase + ctrl_reg);
writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg);
/* Clear the IRQ */
writel(1 << status_idx, pctl->membase + status_reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static void sunxi_pinctrl_irq_mask(struct irq_data *d)
@@ -575,11 +621,16 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&pctl->lock, flags);
+
/* Mask the IRQ */
val = readl(pctl->membase + reg);
writel(val & ~(1 << idx), pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
@@ -588,6 +639,7 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
struct sunxi_desc_function *func;
u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+ unsigned long flags;
u32 val;
func = sunxi_pinctrl_desc_find_function_by_pin(pctl,
@@ -597,9 +649,13 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
/* Change muxing to INT mode */
sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval);
+ spin_lock_irqsave(&pctl->lock, flags);
+
/* Unmask the IRQ */
val = readl(pctl->membase + reg);
writel(val | (1 << idx), pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static struct irq_chip sunxi_pinctrl_irq_chip = {
@@ -752,6 +808,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, pctl);
+ spin_lock_init(&pctl->lock);
+
pctl->membase = of_iomap(node, 0);
if (!pctl->membase)
return -ENOMEM;
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
index d68047d8f699..01c494f8a14f 100644
--- a/drivers/pinctrl/pinctrl-sunxi.h
+++ b/drivers/pinctrl/pinctrl-sunxi.h
@@ -14,6 +14,7 @@
#define __PINCTRL_SUNXI_H
#include <linux/kernel.h>
+#include <linux/spinlock.h>
#define PA_BASE 0
#define PB_BASE 32
@@ -407,6 +408,7 @@ struct sunxi_pinctrl {
unsigned ngroups;
int irq;
int irq_array[SUNXI_IRQ_NUMBER];
+ spinlock_t lock;
struct pinctrl_dev *pctl_dev;
};
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index 0f9f8596b300..f9119525f557 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -330,7 +330,7 @@ static int __init olpc_ec_init_module(void)
return platform_driver_register(&olpc_ec_plat_driver);
}
-module_init(olpc_ec_init_module);
+arch_initcall(olpc_ec_init_module);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 97bb05edcb5a..d6970f47ae72 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -53,7 +53,6 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_ALS_QUERY 0x3
#define HPWMI_HARDWARE_QUERY 0x4
#define HPWMI_WIRELESS_QUERY 0x5
-#define HPWMI_BIOS_QUERY 0x9
#define HPWMI_HOTKEY_QUERY 0xc
#define HPWMI_WIRELESS2_QUERY 0x1b
#define HPWMI_POSTCODEERROR_QUERY 0x2a
@@ -293,19 +292,6 @@ static int hp_wmi_tablet_state(void)
return (state & 0x4) ? 1 : 0;
}
-static int hp_wmi_enable_hotkeys(void)
-{
- int ret;
- int query = 0x6e;
-
- ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
- 0);
-
- if (ret)
- return -EINVAL;
- return 0;
-}
-
static int hp_wmi_set_block(void *data, bool blocked)
{
enum hp_wmi_radio r = (enum hp_wmi_radio) data;
@@ -1009,8 +995,6 @@ static int __init hp_wmi_init(void)
err = hp_wmi_input_setup();
if (err)
return err;
-
- hp_wmi_enable_hotkeys();
}
if (bios_capable) {
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 2ac045f27f10..3a1b6bf326a8 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -2440,7 +2440,10 @@ static ssize_t sony_nc_gfx_switch_status_show(struct device *dev,
if (pos < 0)
return pos;
- return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina");
+ return snprintf(buffer, PAGE_SIZE, "%s\n",
+ pos == SPEED ? "speed" :
+ pos == STAMINA ? "stamina" :
+ pos == AUTO ? "auto" : "unknown");
}
static int sony_nc_gfx_switch_setup(struct platform_device *pd,
@@ -4320,7 +4323,8 @@ static int sony_pic_add(struct acpi_device *device)
goto err_free_resources;
}
- if (sonypi_compat_init())
+ result = sonypi_compat_init();
+ if (result)
goto err_remove_input;
/* request io port */
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index f4f30af2df68..2e8a20cac588 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1715,11 +1715,13 @@ int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops)
(mport_id == RIO_MPORT_ANY && port->nscan == scan_ops))
port->nscan = NULL;
- list_for_each_entry(scan, &rio_scans, node)
+ list_for_each_entry(scan, &rio_scans, node) {
if (scan->mport_id == mport_id) {
list_del(&scan->node);
kfree(scan);
+ break;
}
+ }
mutex_unlock(&rio_mport_list_lock);
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 767fee2ab340..26019531db15 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/of_device.h>
@@ -119,24 +120,39 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
}
#endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */
-static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
+static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
{
+ int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */
/*
- * The datasheet doesn't say which way round the
- * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0,
- * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS
+ * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010
+ * states:
+ * | The order in which registers are updated is
+ * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds.
+ * | (This list is in bitfield order, from LSB to MSB, as they would
+ * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT
+ * | register. For example, the Seconds register corresponds to
+ * | STALE_REGS or NEW_REGS containing 0x80.)
*/
- while (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
- (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT))
- cpu_relax();
+ do {
+ if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) &
+ (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)))
+ return 0;
+ udelay(1);
+ } while (--timeout > 0);
+ return (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
+ (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0;
}
/* Time read/write */
static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
+ int ret;
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
- stmp3xxx_wait_time(rtc_data);
+ ret = stmp3xxx_wait_time(rtc_data);
+ if (ret)
+ return ret;
+
rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm);
return 0;
}
@@ -146,8 +162,7 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t)
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS);
- stmp3xxx_wait_time(rtc_data);
- return 0;
+ return stmp3xxx_wait_time(rtc_data);
}
/* interrupt(s) handler */
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 02faf3c4e0d5..c2e80d7ca5e2 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -524,6 +524,8 @@ static int twl_rtc_probe(struct platform_device *pdev)
if (ret < 0)
goto out1;
+ device_init_wakeup(&pdev->dev, 1);
+
rtc = rtc_device_register(pdev->name,
&pdev->dev, &twl_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
@@ -542,7 +544,6 @@ static int twl_rtc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rtc);
- device_init_wakeup(&pdev->dev, 1);
return 0;
out2:
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 17150a778984..451bf99582ff 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2392,6 +2392,12 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
rc = cqr->intrc;
else
rc = -EIO;
+
+ /* kick tasklets */
+ dasd_schedule_device_bh(device);
+ if (device->block)
+ dasd_schedule_block_bh(device->block);
+
return rc;
}
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 1d4c8fe72752..c82fe65c4128 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&port->erp_action);
- else
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ else {
+ spin_lock(port->adapter->scsi_host->host_lock);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
zfcp_erp_action_dismiss_lun(sdev);
+ spin_unlock(port->adapter->scsi_host->host_lock);
+ }
}
static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
{
struct scsi_device *sdev;
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ spin_lock(port->adapter->scsi_host->host_lock);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
_zfcp_erp_lun_reopen(sdev, clear, id, 0);
+ spin_unlock(port->adapter->scsi_host->host_lock);
}
static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -1434,8 +1439,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
atomic_set_mask(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
- shost_for_each_device(sdev, adapter->scsi_host)
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, adapter->scsi_host)
atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
@@ -1469,11 +1476,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
- shost_for_each_device(sdev, adapter->scsi_host) {
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, adapter->scsi_host) {
atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
@@ -1487,16 +1496,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
{
struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+ unsigned long flags;
atomic_set_mask(mask, &port->status);
if (!common_mask)
return;
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
atomic_set_mask(common_mask,
&sdev_to_zfcp(sdev)->status);
+ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
/**
@@ -1511,6 +1523,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
+ unsigned long flags;
atomic_clear_mask(mask, &port->status);
@@ -1520,13 +1533,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
if (clear_counter)
atomic_set(&port->erp_counter, 0);
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) {
atomic_clear_mask(common_mask,
&sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
+ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
/**
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 665e3cfaaf85..de0598eaacd2 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
{
- spin_lock_irq(&qdio->req_q_lock);
if (atomic_read(&qdio->req_q_free) ||
!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return 1;
- spin_unlock_irq(&qdio->req_q_lock);
return 0;
}
@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
{
long ret;
- spin_unlock_irq(&qdio->req_q_lock);
- ret = wait_event_interruptible_timeout(qdio->req_q_wq,
- zfcp_qdio_sbal_check(qdio), 5 * HZ);
+ ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
+ zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return -EIO;
@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
}
- spin_lock_irq(&qdio->req_q_lock);
return -EIO;
}
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 3f01bbf0609f..890639274bcf 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -27,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
+#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \
+static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
+ struct device_attribute *at,\
+ char *buf) \
+{ \
+ return sprintf(buf, _format, _value); \
+} \
+static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
+ zfcp_sysfs_##_feat##_##_name##_show, NULL);
+
#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
struct device_attribute *at,\
@@ -75,6 +85,8 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
+ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
+ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
struct device_attribute *attr,
@@ -347,6 +359,8 @@ static struct attribute *zfcp_unit_attrs[] = {
&dev_attr_unit_in_recovery.attr,
&dev_attr_unit_status.attr,
&dev_attr_unit_access_denied.attr,
+ &dev_attr_unit_access_shared.attr,
+ &dev_attr_unit_access_readonly.attr,
NULL
};
static struct attribute_group zfcp_unit_attr_group = {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 48b2918e0d65..92ff027746f2 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1353,7 +1353,6 @@ config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI
select SCSI_FC_ATTRS
- select GENERIC_CSUM
select CRC_T10DIF
help
This lpfc driver supports the Emulex LightPulse
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index b6d1f92ed33c..c18c68150e9f 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -38,7 +38,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.5.0.22"
+#define DRV_VERSION "1.5.0.23"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 5f09d1814d26..42e15ee6e1bb 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -642,19 +642,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event);
skb_queue_head_init(&fnic->fip_frame_queue);
- spin_lock_irqsave(&fnic_list_lock, flags);
- if (!fnic_fip_queue) {
- fnic_fip_queue =
- create_singlethread_workqueue("fnic_fip_q");
- if (!fnic_fip_queue) {
- spin_unlock_irqrestore(&fnic_list_lock, flags);
- printk(KERN_ERR PFX "fnic FIP work queue "
- "create failed\n");
- err = -ENOMEM;
- goto err_out_free_max_pool;
- }
- }
- spin_unlock_irqrestore(&fnic_list_lock, flags);
INIT_LIST_HEAD(&fnic->evlist);
INIT_LIST_HEAD(&fnic->vlans);
} else {
@@ -960,6 +947,13 @@ static int __init fnic_init_module(void)
spin_lock_init(&fnic_list_lock);
INIT_LIST_HEAD(&fnic_list);
+ fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
+ if (!fnic_fip_queue) {
+ printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
+ err = -ENOMEM;
+ goto err_create_fip_workq;
+ }
+
fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
if (!fnic_fc_transport) {
printk(KERN_ERR PFX "fc_attach_transport error\n");
@@ -978,6 +972,8 @@ static int __init fnic_init_module(void)
err_pci_register:
fc_release_transport(fnic_fc_transport);
err_fc_transport:
+ destroy_workqueue(fnic_fip_queue);
+err_create_fip_workq:
destroy_workqueue(fnic_event_queue);
err_create_fnic_workq:
kmem_cache_destroy(fnic_io_req_cache);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0177295599e0..1f0ca68409d4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3547,11 +3547,21 @@ static int megasas_init_fw(struct megasas_instance *instance)
break;
}
- /*
- * We expect the FW state to be READY
- */
- if (megasas_transition_to_ready(instance, 0))
- goto fail_ready_state;
+ if (megasas_transition_to_ready(instance, 0)) {
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset
+ (instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+ dev_info(&instance->pdev->dev,
+ "megasas: FW restarted successfully from %s!\n",
+ __func__);
+
+ /*waitting for about 30 second before retry*/
+ ssleep(30);
+
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+ }
/*
* MSI-X host index 0 is common for all adapter.
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3b1ea34e1f5a..eaa808e6ba91 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1031,6 +1031,9 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
{
int i, result;
+ if (sdev->skip_vpd_pages)
+ goto fail;
+
/* Ask for all the pages supported by this device */
result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
if (result)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 2168258fb2c3..74b88efde6ad 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -751,7 +751,7 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
vscsi->affinity_hint_set = true;
} else {
- for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++)
+ for (i = 0; i < vscsi->num_queues; i++)
virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
vscsi->affinity_hint_set = false;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 222d3e37fc28..707966bd5610 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -609,7 +609,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
else
buf = (void *)t->tx_buf;
t->tx_dma = dma_map_single(&spi->dev, buf,
- t->len, DMA_FROM_DEVICE);
+ t->len, DMA_TO_DEVICE);
if (!t->tx_dma) {
ret = -EFAULT;
goto err_tx_map;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index e25eba5713c1..b3b5125faa72 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -482,7 +482,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
ret = comedi_device_postconfig(dev);
if (ret < 0) {
comedi_device_detach(dev);
- module_put(dev->driver->module);
+ module_put(driv->module);
}
/* On success, the driver module count has been incremented. */
return ret;
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index dcceed29d31a..81972fa47beb 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1811,10 +1811,12 @@ static int zcache_comp_init(void)
#else
if (*zcache_comp_name != '\0') {
ret = crypto_has_comp(zcache_comp_name, 0, 0);
- if (!ret)
+ if (!ret) {
pr_info("zcache: %s not supported\n",
zcache_comp_name);
- goto out;
+ ret = 1;
+ goto out;
+ }
}
if (!ret)
strcpy(zcache_comp_name, "lzo");
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index bb91b4713ebd..2e3ea1a70d7b 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -31,9 +31,8 @@ static int __init serial_init_chip(struct parisc_device *dev)
int err;
#ifdef CONFIG_64BIT
- extern int iosapic_serial_irq(int cellnum);
if (!dev->irq && (dev->id.sversion == 0xad))
- dev->irq = iosapic_serial_irq(dev->mod_index-1);
+ dev->irq = iosapic_serial_irq(dev);
#endif
if (!dev->irq) {
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index cbf1d155b7b2..22f280aa4f2c 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -773,6 +773,6 @@ module_init(arc_serial_init);
module_exit(arc_serial_exit);
MODULE_LICENSE("GPL");
-MODULE_ALIAS("plat-arcfpga/uart");
+MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Vineet Gupta");
MODULE_DESCRIPTION("ARC(Synopsys) On-Chip(fpga) serial driver");
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 4f5f161896a1..f85b8e6d0346 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -678,11 +678,18 @@ static void mxs_auart_settermios(struct uart_port *u,
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
- u32 istatus, istat;
+ u32 istat;
struct mxs_auart_port *s = context;
u32 stat = readl(s->port.membase + AUART_STAT);
- istatus = istat = readl(s->port.membase + AUART_INTR);
+ istat = readl(s->port.membase + AUART_INTR);
+
+ /* ack irq */
+ writel(istat & (AUART_INTR_RTIS
+ | AUART_INTR_TXIS
+ | AUART_INTR_RXIS
+ | AUART_INTR_CTSMIS),
+ s->port.membase + AUART_INTR_CLR);
if (istat & AUART_INTR_CTSMIS) {
uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
@@ -702,12 +709,6 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
istat &= ~AUART_INTR_TXIS;
}
- writel(istatus & (AUART_INTR_RTIS
- | AUART_INTR_TXIS
- | AUART_INTR_RXIS
- | AUART_INTR_CTSMIS),
- s->port.membase + AUART_INTR_CLR);
-
return IRQ_HANDLED;
}
@@ -850,7 +851,7 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
struct mxs_auart_port *s;
struct uart_port *port;
unsigned int old_ctrl0, old_ctrl2;
- unsigned int to = 1000;
+ unsigned int to = 20000;
if (co->index >= MXS_AUART_PORTS || co->index < 0)
return;
@@ -871,18 +872,23 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
uart_console_write(port, str, count, mxs_auart_console_putchar);
- /*
- * Finally, wait for transmitter to become empty
- * and restore the TCR
- */
+ /* Finally, wait for transmitter to become empty ... */
while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
+ udelay(1);
if (!to--)
break;
- udelay(1);
}
- writel(old_ctrl0, port->membase + AUART_CTRL0);
- writel(old_ctrl2, port->membase + AUART_CTRL2);
+ /*
+ * ... and restore the TCR if we waited long enough for the transmitter
+ * to be idle. This might keep the transmitter enabled although it is
+ * unused, but that is better than to disable it while it is still
+ * transmitting.
+ */
+ if (!(readl(port->membase + AUART_STAT) & AUART_STAT_BUSY)) {
+ writel(old_ctrl0, port->membase + AUART_CTRL0);
+ writel(old_ctrl2, port->membase + AUART_CTRL2);
+ }
clk_disable(s->clk);
}
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 121aeb9393e1..f597e88a705d 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -256,10 +256,9 @@ void tty_port_tty_hangup(struct tty_port *port, bool check_clocal)
{
struct tty_struct *tty = tty_port_tty_get(port);
- if (tty && (!check_clocal || !C_CLOCAL(tty))) {
+ if (tty && (!check_clocal || !C_CLOCAL(tty)))
tty_hangup(tty);
- tty_kref_put(tty);
- }
+ tty_kref_put(tty);
}
EXPORT_SYMBOL_GPL(tty_port_tty_hangup);
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index eb2aa2e5a842..d1bd8ef1f9c1 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -12,7 +12,7 @@ if USB_CHIPIDEA
config USB_CHIPIDEA_UDC
bool "ChipIdea device controller"
- depends on USB_GADGET=y || USB_CHIPIDEA=m
+ depends on USB_GADGET=y || (USB_CHIPIDEA=m && USB_GADGET=m)
help
Say Y here to enable device controller functionality of the
ChipIdea driver.
@@ -20,7 +20,7 @@ config USB_CHIPIDEA_UDC
config USB_CHIPIDEA_HOST
bool "ChipIdea host controller"
depends on USB=y
- depends on USB_EHCI_HCD=y || USB_CHIPIDEA=m
+ depends on USB_EHCI_HCD=y || (USB_CHIPIDEA=m && USB_EHCI_HCD=m)
select USB_EHCI_ROOT_HUB_TT
help
Say Y here to enable host controller functionality of the
diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h
index aefa0261220c..1b23e354f9fb 100644
--- a/drivers/usb/chipidea/bits.h
+++ b/drivers/usb/chipidea/bits.h
@@ -50,7 +50,7 @@
#define PORTSC_PTC (0x0FUL << 16)
/* PTS and PTW for non lpm version only */
#define PORTSC_PTS(d) \
- ((((d) & 0x3) << 30) | (((d) & 0x4) ? BIT(25) : 0))
+ (u32)((((d) & 0x3) << 30) | (((d) & 0x4) ? BIT(25) : 0))
#define PORTSC_PTW BIT(28)
#define PORTSC_STS BIT(29)
@@ -59,7 +59,7 @@
#define DEVLC_PSPD_HS (0x02UL << 25)
#define DEVLC_PTW BIT(27)
#define DEVLC_STS BIT(28)
-#define DEVLC_PTS(d) (((d) & 0x7) << 29)
+#define DEVLC_PTS(d) (u32)(((d) & 0x7) << 29)
/* Encoding for DEVLC_PTS and PORTSC_PTS */
#define PTS_UTMI 0
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 609dbc2f7151..83b4ef4dfcf8 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1119,11 +1119,11 @@ static int usbtmc_probe(struct usb_interface *intf,
/* Determine if it is a Rigol or not */
data->rigol_quirk = 0;
dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n",
- data->usb_dev->descriptor.idVendor,
- data->usb_dev->descriptor.idProduct);
+ le16_to_cpu(data->usb_dev->descriptor.idVendor),
+ le16_to_cpu(data->usb_dev->descriptor.idProduct));
for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) {
- if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) &&
- (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) {
+ if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) &&
+ (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) {
dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n");
data->rigol_quirk = 1;
break;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4a8a1d68002c..558313de4911 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4798,7 +4798,8 @@ static void hub_events(void)
hub->ports[i - 1]->child;
dev_dbg(hub_dev, "warm reset port %d\n", i);
- if (!udev) {
+ if (!udev || !(portstatus &
+ USB_PORT_STAT_CONNECTION)) {
status = hub_port_reset(hub, i,
NULL, HUB_BH_RESET_TIME,
true);
@@ -4808,8 +4809,8 @@ static void hub_events(void)
usb_lock_device(udev);
status = usb_reset_device(udev);
usb_unlock_device(udev);
+ connect_change = 0;
}
- connect_change = 0;
}
if (connect_change)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a63598895077..5b44cd47da5b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x04d8, 0x000c), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* CarrolTouch 4000U */
+ { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* CarrolTouch 4500U */
+ { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Samsung Android phone modem - ID conflict with SPH-I500 */
{ USB_DEVICE(0x04e8, 0x6601), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index f48712ffe261..c1c113ef950c 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -449,14 +449,20 @@ fail:
static int __exit eth_unbind(struct usb_composite_dev *cdev)
{
- if (has_rndis())
+ if (has_rndis()) {
+ usb_put_function(f_rndis);
usb_put_function_instance(fi_rndis);
- if (use_eem)
+ }
+ if (use_eem) {
+ usb_put_function(f_eem);
usb_put_function_instance(fi_eem);
- else if (can_support_ecm(cdev->gadget))
+ } else if (can_support_ecm(cdev->gadget)) {
+ usb_put_function(f_ecm);
usb_put_function_instance(fi_ecm);
- else
+ } else {
+ usb_put_function(f_geth);
usb_put_function_instance(fi_geth);
+ }
return 0;
}
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 1bf26e9f38cd..eb3aa817a662 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -488,7 +488,6 @@ static int pn_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_ep *ep;
int status, i;
-#ifndef USBF_PHONET_INCLUDED
struct f_phonet_opts *phonet_opts;
phonet_opts = container_of(f->fi, struct f_phonet_opts, func_inst);
@@ -507,7 +506,6 @@ static int pn_bind(struct usb_configuration *c, struct usb_function *f)
return status;
phonet_opts->bound = true;
}
-#endif
/* Reserve interface IDs */
status = usb_interface_id(c, f);
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 032b96a51ce4..2a1ebefd8f9e 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -160,10 +160,8 @@ static __init int rndis_do_config(struct usb_configuration *c)
return ret;
f_acm_rndis = usb_get_function(fi_acm);
- if (IS_ERR(f_acm_rndis)) {
- ret = PTR_ERR(f_acm_rndis);
- goto err_func_acm;
- }
+ if (IS_ERR(f_acm_rndis))
+ return PTR_ERR(f_acm_rndis);
ret = usb_add_function(c, f_acm_rndis);
if (ret)
@@ -178,7 +176,6 @@ err_fsg:
usb_remove_function(c, f_acm_rndis);
err_conf:
usb_put_function(f_acm_rndis);
-err_func_acm:
return ret;
}
@@ -226,7 +223,7 @@ static __init int cdc_do_config(struct usb_configuration *c)
/* implicit port_num is zero */
f_acm_multi = usb_get_function(fi_acm);
if (IS_ERR(f_acm_multi))
- goto err_func_acm;
+ return PTR_ERR(f_acm_multi);
ret = usb_add_function(c, f_acm_multi);
if (ret)
@@ -241,7 +238,6 @@ err_fsg:
usb_remove_function(c, f_acm_multi);
err_conf:
usb_put_function(f_acm_multi);
-err_func_acm:
return ret;
}
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index c28ac9872030..13e25f80fc20 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -109,7 +109,7 @@ void usb_gadget_set_state(struct usb_gadget *gadget,
enum usb_device_state state)
{
gadget->state = state;
- sysfs_notify(&gadget->dev.kobj, NULL, "status");
+ sysfs_notify(&gadget->dev.kobj, NULL, "state");
}
EXPORT_SYMBOL_GPL(usb_gadget_set_state);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index f80d0330d548..8e3c878f38cf 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1391,21 +1391,20 @@ iso_stream_schedule (
/* Behind the scheduling threshold? */
if (unlikely(start < next)) {
+ unsigned now2 = (now - base) & (mod - 1);
/* USB_ISO_ASAP: Round up to the first available slot */
if (urb->transfer_flags & URB_ISO_ASAP)
start += (next - start + period - 1) & -period;
/*
- * Not ASAP: Use the next slot in the stream. If
- * the entire URB falls before the threshold, fail.
+ * Not ASAP: Use the next slot in the stream,
+ * no matter what.
*/
- else if (start + span - period < next) {
- ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n",
+ else if (start + span - period < now2) {
+ ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n",
urb, start + base,
- span - period, next + base);
- status = -EXDEV;
- goto fail;
+ span - period, now2 + base);
}
}
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 08613e241894..0f1d193fef02 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -304,6 +304,11 @@ static int __init ohci_pci_init(void)
pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
+
+ /* Entries for the PCI suspend/resume callbacks are special */
+ ohci_pci_hc_driver.pci_suspend = ohci_suspend;
+ ohci_pci_hc_driver.pci_resume = ohci_resume;
+
return pci_register_driver(&ohci_pci_driver);
}
module_init(ohci_pci_init);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index df6978abd7e6..6f8c2fd47675 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -24,6 +24,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
#include "xhci.h"
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 41eb4fc33453..9478caa2f71f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -27,6 +27,7 @@
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/dmi.h>
+#include <linux/dma-mapping.h>
#include "xhci.h"
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index eb3c8c142fa9..eeb27208c0d1 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -830,7 +830,7 @@ static int adu_probe(struct usb_interface *interface,
/* let the user know what node this device is now attached to */
dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n",
- udev->descriptor.idProduct, dev->serial_number,
+ le16_to_cpu(udev->descriptor.idProduct), dev->serial_number,
(dev->minor - ADU_MINOR_BASE));
exit:
dbg(2, " %s : leave, return value %p (dev)", __func__, dev);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 6708a3b78ad8..f44e8b5e00c9 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -481,7 +481,7 @@ static u64 omap2430_dmamask = DMA_BIT_MASK(32);
static int omap2430_probe(struct platform_device *pdev)
{
- struct resource musb_resources[2];
+ struct resource musb_resources[3];
struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
struct omap_musb_board_data *data;
struct platform_device *musb;
@@ -581,6 +581,11 @@ static int omap2430_probe(struct platform_device *pdev)
musb_resources[1].end = pdev->resource[1].end;
musb_resources[1].flags = pdev->resource[1].flags;
+ musb_resources[2].name = pdev->resource[2].name;
+ musb_resources[2].start = pdev->resource[2].start;
+ musb_resources[2].end = pdev->resource[2].end;
+ musb_resources[2].flags = pdev->resource[2].flags;
+
ret = platform_device_add_resources(musb, musb_resources,
ARRAY_SIZE(musb_resources));
if (ret) {
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 2c06a8969a9f..6f8a9ca96ae7 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1156,7 +1156,7 @@ static u64 tusb_dmamask = DMA_BIT_MASK(32);
static int tusb_probe(struct platform_device *pdev)
{
- struct resource musb_resources[2];
+ struct resource musb_resources[3];
struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
struct platform_device *musb;
struct tusb6010_glue *glue;
@@ -1199,6 +1199,11 @@ static int tusb_probe(struct platform_device *pdev)
musb_resources[1].end = pdev->resource[1].end;
musb_resources[1].flags = pdev->resource[1].flags;
+ musb_resources[2].name = pdev->resource[2].name;
+ musb_resources[2].start = pdev->resource[2].start;
+ musb_resources[2].end = pdev->resource[2].end;
+ musb_resources[2].flags = pdev->resource[2].flags;
+
ret = platform_device_add_resources(musb, musb_resources,
ARRAY_SIZE(musb_resources));
if (ret) {
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index ca266280895d..e1859b8ef567 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -15,7 +15,7 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include "otg_fsm.h"
+#include "phy-fsm-usb.h"
#include <linux/usb/otg.h>
#include <linux/ioctl.h>
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index c520b3548e7c..7f4596606e18 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -29,7 +29,7 @@
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
-#include "phy-otg-fsm.h"
+#include "phy-fsm-usb.h"
/* Change USB protocol when there is a protocol change */
static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 8c3a42ea910c..7eef9b33fde6 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -719,6 +719,13 @@ config USB_SERIAL_FLASHLOADER
To compile this driver as a module, choose M here: the
module will be called flashloader.
+config USB_SERIAL_SUUNTO
+ tristate "USB Suunto ANT+ driver"
+ help
+ Say Y here if you want to use the Suunto ANT+ USB device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called suunto.
config USB_SERIAL_DEBUG
tristate "USB Debugging Device"
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index f7130114488f..a14a870d993f 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o
obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o
obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o
obj-$(CONFIG_USB_SERIAL_SSU100) += ssu100.o
+obj-$(CONFIG_USB_SERIAL_SUUNTO) += suunto.o
obj-$(CONFIG_USB_SERIAL_SYMBOL) += symbolserial.o
obj-$(CONFIG_USB_SERIAL_WWAN) += usb_wwan.o
obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7260ec660347..b65e657c641d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -735,9 +735,34 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29A_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29F_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S01_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29C_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_81B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_82B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5D_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K4Y_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5G_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S05_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_60_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_61_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_64_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_65_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92D_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_W5R_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_A5R_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_PW1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 6dd79253205d..1b8af461b522 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -815,11 +815,35 @@
/*
* RT Systems programming cables for various ham radios
*/
-#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
-#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
-#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
-#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */
-
+#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
+#define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */
+#define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */
+#define RTSYSTEMS_USB_57A_PID 0x9e51 /* USB-57A USB to 4pin 3.5mm plug */
+#define RTSYSTEMS_USB_57B_PID 0x9e52 /* USB-57B USB to extended 4pin 3.5mm plug */
+#define RTSYSTEMS_USB_29A_PID 0x9e53 /* USB-29A USB to 3.5mm stereo plug */
+#define RTSYSTEMS_USB_29B_PID 0x9e54 /* USB-29B USB to 6 pin mini din */
+#define RTSYSTEMS_USB_29F_PID 0x9e55 /* USB-29F USB to 6 pin modular plug */
+#define RTSYSTEMS_USB_62B_PID 0x9e56 /* USB-62B USB to 8 pin mini din plug*/
+#define RTSYSTEMS_USB_S01_PID 0x9e57 /* USB-RTS01 USB to 3.5 mm stereo plug*/
+#define RTSYSTEMS_USB_63_PID 0x9e58 /* USB-63 USB to 9 pin female*/
+#define RTSYSTEMS_USB_29C_PID 0x9e59 /* USB-29C USB to 4 pin modular plug*/
+#define RTSYSTEMS_USB_81B_PID 0x9e5A /* USB-81 USB to 8 pin mini din plug*/
+#define RTSYSTEMS_USB_82B_PID 0x9e5B /* USB-82 USB to 2.5 mm stereo plug*/
+#define RTSYSTEMS_USB_K5D_PID 0x9e5C /* USB-K5D USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_K4Y_PID 0x9e5D /* USB-K4Y USB to 2.5/3.5 mm plugs*/
+#define RTSYSTEMS_USB_K5G_PID 0x9e5E /* USB-K5G USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_S05_PID 0x9e5F /* USB-RTS05 USB to 2.5 mm stereo plug*/
+#define RTSYSTEMS_USB_60_PID 0x9e60 /* USB-60 USB to 6 pin din*/
+#define RTSYSTEMS_USB_61_PID 0x9e61 /* USB-61 USB to 6 pin mini din*/
+#define RTSYSTEMS_USB_62_PID 0x9e62 /* USB-62 USB to 8 pin mini din*/
+#define RTSYSTEMS_USB_63B_PID 0x9e63 /* USB-63 USB to 9 pin female*/
+#define RTSYSTEMS_USB_64_PID 0x9e64 /* USB-64 USB to 9 pin male*/
+#define RTSYSTEMS_USB_65_PID 0x9e65 /* USB-65 USB to 9 pin female null modem*/
+#define RTSYSTEMS_USB_92_PID 0x9e66 /* USB-92 USB to 12 pin plug*/
+#define RTSYSTEMS_USB_92D_PID 0x9e67 /* USB-92D USB to 12 pin plug data*/
+#define RTSYSTEMS_USB_W5R_PID 0x9e68 /* USB-W5R USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_A5R_PID 0x9e69 /* USB-A5R USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_PW1_PID 0x9e6A /* USB-PW1 USB to 8 pin modular plug*/
/*
* Physik Instrumente
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 5a979729f8ec..58c17fdc85eb 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -2303,7 +2303,7 @@ static int keyspan_startup(struct usb_serial *serial)
if (d_details == NULL) {
dev_err(&serial->dev->dev, "%s - unknown product id %x\n",
__func__, le16_to_cpu(serial->dev->descriptor.idProduct));
- return 1;
+ return -ENODEV;
}
/* Setup private data for serial driver */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 51da424327b0..b01300164fc0 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -90,6 +90,7 @@ struct urbtracker {
struct list_head urblist_entry;
struct kref ref_count;
struct urb *urb;
+ struct usb_ctrlrequest *setup;
};
enum mos7715_pp_modes {
@@ -271,6 +272,7 @@ static void destroy_urbtracker(struct kref *kref)
struct mos7715_parport *mos_parport = urbtrack->mos_parport;
usb_free_urb(urbtrack->urb);
+ kfree(urbtrack->setup);
kfree(urbtrack);
kref_put(&mos_parport->ref_count, destroy_mos_parport);
}
@@ -355,7 +357,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
struct urbtracker *urbtrack;
int ret_val;
unsigned long flags;
- struct usb_ctrlrequest setup;
struct usb_serial *serial = mos_parport->serial;
struct usb_device *usbdev = serial->dev;
@@ -373,14 +374,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
kfree(urbtrack);
return -ENOMEM;
}
- setup.bRequestType = (__u8)0x40;
- setup.bRequest = (__u8)0x0e;
- setup.wValue = get_reg_value(reg, dummy);
- setup.wIndex = get_reg_index(reg);
- setup.wLength = 0;
+ urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
+ if (!urbtrack->setup) {
+ usb_free_urb(urbtrack->urb);
+ kfree(urbtrack);
+ return -ENOMEM;
+ }
+ urbtrack->setup->bRequestType = (__u8)0x40;
+ urbtrack->setup->bRequest = (__u8)0x0e;
+ urbtrack->setup->wValue = get_reg_value(reg, dummy);
+ urbtrack->setup->wIndex = get_reg_index(reg);
+ urbtrack->setup->wLength = 0;
usb_fill_control_urb(urbtrack->urb, usbdev,
usb_sndctrlpipe(usbdev, 0),
- (unsigned char *)&setup,
+ (unsigned char *)urbtrack->setup,
NULL, 0, async_complete, urbtrack);
kref_init(&urbtrack->ref_count);
INIT_LIST_HEAD(&urbtrack->urblist_entry);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 603fb70dde80..3bac4693c038 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -183,7 +183,10 @@
#define LED_ON_MS 500
#define LED_OFF_MS 500
-static int device_type;
+enum mos7840_flag {
+ MOS7840_FLAG_CTRL_BUSY,
+ MOS7840_FLAG_LED_BUSY,
+};
static const struct usb_device_id id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
@@ -238,9 +241,12 @@ struct moschip_port {
/* For device(s) with LED indicator */
bool has_led;
- bool led_flag;
struct timer_list led_timer1; /* Timer for LED on */
struct timer_list led_timer2; /* Timer for LED off */
+ struct urb *led_urb;
+ struct usb_ctrlrequest *led_dr;
+
+ unsigned long flags;
};
/*
@@ -460,10 +466,10 @@ static void mos7840_control_callback(struct urb *urb)
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status);
- return;
+ goto out;
default:
dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
- return;
+ goto out;
}
dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
@@ -476,6 +482,8 @@ static void mos7840_control_callback(struct urb *urb)
mos7840_handle_new_msr(mos7840_port, regval);
else if (mos7840_port->MsrLsr == 1)
mos7840_handle_new_lsr(mos7840_port, regval);
+out:
+ clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags);
}
static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
@@ -486,6 +494,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
unsigned char *buffer = mcs->ctrl_buf;
int ret;
+ if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags))
+ return -EBUSY;
+
dr->bRequestType = MCS_RD_RTYPE;
dr->bRequest = MCS_RDREQ;
dr->wValue = cpu_to_le16(Wval); /* 0 */
@@ -497,6 +508,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
mos7840_control_callback, mcs);
mcs->control_urb->transfer_buffer_length = 2;
ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
+ if (ret)
+ clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags);
+
return ret;
}
@@ -523,7 +537,7 @@ static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval,
__u16 reg)
{
struct usb_device *dev = mcs->port->serial->dev;
- struct usb_ctrlrequest *dr = mcs->dr;
+ struct usb_ctrlrequest *dr = mcs->led_dr;
dr->bRequestType = MCS_WR_RTYPE;
dr->bRequest = MCS_WRREQ;
@@ -531,10 +545,10 @@ static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval,
dr->wIndex = cpu_to_le16(reg);
dr->wLength = cpu_to_le16(0);
- usb_fill_control_urb(mcs->control_urb, dev, usb_sndctrlpipe(dev, 0),
+ usb_fill_control_urb(mcs->led_urb, dev, usb_sndctrlpipe(dev, 0),
(unsigned char *)dr, NULL, 0, mos7840_set_led_callback, NULL);
- usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
+ usb_submit_urb(mcs->led_urb, GFP_ATOMIC);
}
static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg,
@@ -560,7 +574,19 @@ static void mos7840_led_flag_off(unsigned long arg)
{
struct moschip_port *mcs = (struct moschip_port *) arg;
- mcs->led_flag = false;
+ clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
+}
+
+static void mos7840_led_activity(struct usb_serial_port *port)
+{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
+
+ if (test_and_set_bit_lock(MOS7840_FLAG_LED_BUSY, &mos7840_port->flags))
+ return;
+
+ mos7840_set_led_async(mos7840_port, 0x0301, MODEM_CONTROL_REGISTER);
+ mod_timer(&mos7840_port->led_timer1,
+ jiffies + msecs_to_jiffies(LED_ON_MS));
}
/*****************************************************************************
@@ -758,14 +784,8 @@ static void mos7840_bulk_in_callback(struct urb *urb)
return;
}
- /* Turn on LED */
- if (mos7840_port->has_led && !mos7840_port->led_flag) {
- mos7840_port->led_flag = true;
- mos7840_set_led_async(mos7840_port, 0x0301,
- MODEM_CONTROL_REGISTER);
- mod_timer(&mos7840_port->led_timer1,
- jiffies + msecs_to_jiffies(LED_ON_MS));
- }
+ if (mos7840_port->has_led)
+ mos7840_led_activity(port);
mos7840_port->read_urb_busy = true;
retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
@@ -816,18 +836,6 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
/************************************************************************/
/* D R I V E R T T Y I N T E R F A C E F U N C T I O N S */
/************************************************************************/
-#ifdef MCSSerialProbe
-static int mos7840_serial_probe(struct usb_serial *serial,
- const struct usb_device_id *id)
-{
-
- /*need to implement the mode_reg reading and updating\
- structures usb_serial_ device_type\
- (i.e num_ports, num_bulkin,bulkout etc) */
- /* Also we can update the changes attach */
- return 1;
-}
-#endif
/*****************************************************************************
* mos7840_open
@@ -1454,13 +1462,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
data1 = urb->transfer_buffer;
dev_dbg(&port->dev, "bulkout endpoint is %d\n", port->bulk_out_endpointAddress);
- /* Turn on LED */
- if (mos7840_port->has_led && !mos7840_port->led_flag) {
- mos7840_port->led_flag = true;
- mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0301);
- mod_timer(&mos7840_port->led_timer1,
- jiffies + msecs_to_jiffies(LED_ON_MS));
- }
+ if (mos7840_port->has_led)
+ mos7840_led_activity(port);
/* send it down the pipe */
status = usb_submit_urb(urb, GFP_ATOMIC);
@@ -2187,38 +2190,48 @@ static int mos7810_check(struct usb_serial *serial)
return 0;
}
-static int mos7840_calc_num_ports(struct usb_serial *serial)
+static int mos7840_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
{
- __u16 data = 0x00;
+ u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
u8 *buf;
- int mos7840_num_ports;
+ int device_type;
+
+ if (product == MOSCHIP_DEVICE_ID_7810 ||
+ product == MOSCHIP_DEVICE_ID_7820) {
+ device_type = product;
+ goto out;
+ }
buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
- if (buf) {
- usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ if (!buf)
+ return -ENOMEM;
+
+ usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
- data = *buf;
- kfree(buf);
- }
- if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 ||
- serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) {
- device_type = serial->dev->descriptor.idProduct;
- } else {
- /* For a MCS7840 device GPIO0 must be set to 1 */
- if ((data & 0x01) == 1)
- device_type = MOSCHIP_DEVICE_ID_7840;
- else if (mos7810_check(serial))
- device_type = MOSCHIP_DEVICE_ID_7810;
- else
- device_type = MOSCHIP_DEVICE_ID_7820;
- }
+ /* For a MCS7840 device GPIO0 must be set to 1 */
+ if (buf[0] & 0x01)
+ device_type = MOSCHIP_DEVICE_ID_7840;
+ else if (mos7810_check(serial))
+ device_type = MOSCHIP_DEVICE_ID_7810;
+ else
+ device_type = MOSCHIP_DEVICE_ID_7820;
+
+ kfree(buf);
+out:
+ usb_set_serial_data(serial, (void *)(unsigned long)device_type);
+
+ return 0;
+}
+
+static int mos7840_calc_num_ports(struct usb_serial *serial)
+{
+ int device_type = (unsigned long)usb_get_serial_data(serial);
+ int mos7840_num_ports;
mos7840_num_ports = (device_type >> 4) & 0x000F;
- serial->num_bulk_in = mos7840_num_ports;
- serial->num_bulk_out = mos7840_num_ports;
- serial->num_ports = mos7840_num_ports;
return mos7840_num_ports;
}
@@ -2226,6 +2239,7 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
static int mos7840_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
+ int device_type = (unsigned long)usb_get_serial_data(serial);
struct moschip_port *mos7840_port;
int status;
int pnum;
@@ -2401,6 +2415,14 @@ static int mos7840_port_probe(struct usb_serial_port *port)
if (device_type == MOSCHIP_DEVICE_ID_7810) {
mos7840_port->has_led = true;
+ mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL);
+ mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr),
+ GFP_KERNEL);
+ if (!mos7840_port->led_urb || !mos7840_port->led_dr) {
+ status = -ENOMEM;
+ goto error;
+ }
+
init_timer(&mos7840_port->led_timer1);
mos7840_port->led_timer1.function = mos7840_led_off;
mos7840_port->led_timer1.expires =
@@ -2413,8 +2435,6 @@ static int mos7840_port_probe(struct usb_serial_port *port)
jiffies + msecs_to_jiffies(LED_OFF_MS);
mos7840_port->led_timer2.data = (unsigned long)mos7840_port;
- mos7840_port->led_flag = false;
-
/* Turn off LED */
mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
}
@@ -2436,6 +2456,8 @@ out:
}
return 0;
error:
+ kfree(mos7840_port->led_dr);
+ usb_free_urb(mos7840_port->led_urb);
kfree(mos7840_port->dr);
kfree(mos7840_port->ctrl_buf);
usb_free_urb(mos7840_port->control_urb);
@@ -2456,6 +2478,10 @@ static int mos7840_port_remove(struct usb_serial_port *port)
del_timer_sync(&mos7840_port->led_timer1);
del_timer_sync(&mos7840_port->led_timer2);
+
+ usb_kill_urb(mos7840_port->led_urb);
+ usb_free_urb(mos7840_port->led_urb);
+ kfree(mos7840_port->led_dr);
}
usb_kill_urb(mos7840_port->control_urb);
usb_free_urb(mos7840_port->control_urb);
@@ -2482,9 +2508,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
.throttle = mos7840_throttle,
.unthrottle = mos7840_unthrottle,
.calc_num_ports = mos7840_calc_num_ports,
-#ifdef MCSSerialProbe
- .probe = mos7840_serial_probe,
-#endif
+ .probe = mos7840_probe,
.ioctl = mos7840_ioctl,
.set_termios = mos7840_set_termios,
.break_ctl = mos7840_break,
diff --git a/drivers/usb/serial/suunto.c b/drivers/usb/serial/suunto.c
new file mode 100644
index 000000000000..2248e7a7d5ad
--- /dev/null
+++ b/drivers/usb/serial/suunto.c
@@ -0,0 +1,41 @@
+/*
+ * Suunto ANT+ USB Driver
+ *
+ * Copyright (C) 2013 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (C) 2013 Linux Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include <linux/uaccess.h>
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0fcf, 0x1008) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_serial_driver suunto_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ },
+ .id_table = id_table,
+ .num_ports = 1,
+};
+
+static struct usb_serial_driver * const serial_drivers[] = {
+ &suunto_device,
+ NULL,
+};
+
+module_usb_serial_driver(serial_drivers, id_table);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 375b5a400b6f..5c9f9b1d7736 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1536,14 +1536,15 @@ static int ti_download_firmware(struct ti_device *tdev)
char buf[32];
/* try ID specific firmware first, then try generic firmware */
- sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor,
- dev->descriptor.idProduct);
+ sprintf(buf, "ti_usb-v%04x-p%04x.fw",
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
status = request_firmware(&fw_p, buf, &dev->dev);
if (status != 0) {
buf[0] = '\0';
- if (dev->descriptor.idVendor == MTS_VENDOR_ID) {
- switch (dev->descriptor.idProduct) {
+ if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) {
+ switch (le16_to_cpu(dev->descriptor.idProduct)) {
case MTS_CDMA_PRODUCT_ID:
strcpy(buf, "mts_cdma.fw");
break;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 8257d30c4072..85365784040b 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -291,18 +291,18 @@ static void usb_wwan_indat_callback(struct urb *urb)
tty_flip_buffer_push(&port->port);
} else
dev_dbg(dev, "%s: empty read urb received\n", __func__);
-
- /* Resubmit urb so we continue receiving */
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err) {
- if (err != -EPERM) {
- dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err);
- /* busy also in error unless we are killed */
- usb_mark_last_busy(port->serial->dev);
- }
- } else {
+ }
+ /* Resubmit urb so we continue receiving */
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ if (err != -EPERM) {
+ dev_err(dev, "%s: resubmit read urb failed. (%d)\n",
+ __func__, err);
+ /* busy also in error unless we are killed */
usb_mark_last_busy(port->serial->dev);
}
+ } else {
+ usb_mark_last_busy(port->serial->dev);
}
}
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 16968c899493..d3493ca0525d 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1226,6 +1226,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
}
spin_lock_irqsave(&xfer->lock, flags);
rpipe = xfer->ep->hcpriv;
+ if (rpipe == NULL) {
+ pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
+ __func__, wa_xfer_id(xfer),
+ "Probably already aborted.\n" );
+ goto out_unlock;
+ }
/* Check the delayed list -> if there, release and complete */
spin_lock_irqsave(&wa->xfer_list_lock, flags2);
if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
@@ -1644,8 +1650,7 @@ static void wa_xfer_result_cb(struct urb *urb)
break;
}
usb_status = xfer_result->bTransferStatus & 0x3f;
- if (usb_status == WA_XFER_STATUS_ABORTED
- || usb_status == WA_XFER_STATUS_NOT_FOUND)
+ if (usb_status == WA_XFER_STATUS_NOT_FOUND)
/* taken care of already */
break;
xfer_id = xfer_result->dwTransferID;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index c5179e269df6..cef6002acbd4 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -137,8 +137,27 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
*/
pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
- if (vdev->reset_works)
- __pci_reset_function(pdev);
+ /*
+ * Careful, device_lock may already be held. This is the case if
+ * a driver unbind is blocked. Try to get the locks ourselves to
+ * prevent a deadlock.
+ */
+ if (vdev->reset_works) {
+ bool reset_done = false;
+
+ if (pci_cfg_access_trylock(pdev)) {
+ if (device_trylock(&pdev->dev)) {
+ __pci_reset_function_locked(pdev);
+ reset_done = true;
+ device_unlock(&pdev->dev);
+ }
+ pci_cfg_access_unlock(pdev);
+ }
+
+ if (!reset_done)
+ pr_warn("%s: Unable to acquire locks for reset of %s\n",
+ __func__, dev_name(&pdev->dev));
+ }
pci_restore_state(pdev);
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index c488da5db7c7..842f4507883e 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -494,27 +494,6 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
return 0;
}
-static int vfio_group_nb_del_dev(struct vfio_group *group, struct device *dev)
-{
- struct vfio_device *device;
-
- /*
- * Expect to fall out here. If a device was in use, it would
- * have been bound to a vfio sub-driver, which would have blocked
- * in .remove at vfio_del_group_dev. Sanity check that we no
- * longer track the device, so it's safe to remove.
- */
- device = vfio_group_get_device(group, dev);
- if (likely(!device))
- return 0;
-
- WARN("Device %s removed from live group %d!\n", dev_name(dev),
- iommu_group_id(group->iommu_group));
-
- vfio_device_put(device);
- return 0;
-}
-
static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
{
/* We don't care what happens when the group isn't in use */
@@ -531,13 +510,11 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
struct device *dev = data;
/*
- * Need to go through a group_lock lookup to get a reference or
- * we risk racing a group being removed. Leave a WARN_ON for
- * debuging, but if the group no longer exists, a spurious notify
- * is harmless.
+ * Need to go through a group_lock lookup to get a reference or we
+ * risk racing a group being removed. Ignore spurious notifies.
*/
group = vfio_group_try_get(group);
- if (WARN_ON(!group))
+ if (!group)
return NOTIFY_OK;
switch (action) {
@@ -545,7 +522,13 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
vfio_group_nb_add_dev(group, dev);
break;
case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
- vfio_group_nb_del_dev(group, dev);
+ /*
+ * Nothing to do here. If the device is in use, then the
+ * vfio sub-driver should block the remove callback until
+ * it is unused. If the device is unused or attached to a
+ * stub driver, then it should be released and we don't
+ * care that it will be going away.
+ */
break;
case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
pr_debug("%s: Device %s, group %d binding to driver\n",
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index a89c15de9f45..9b0f12c5c284 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -435,8 +435,8 @@ static int correct_chipset(struct atyfb_par *par)
const char *name;
int i;
- for (i = ARRAY_SIZE(aty_chips); i > 0; i--)
- if (par->pci_id == aty_chips[i - 1].pci_id)
+ for (i = (int)ARRAY_SIZE(aty_chips) - 1; i >= 0; i--)
+ if (par->pci_id == aty_chips[i].pci_id)
break;
if (i < 0)
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 3ba37713b1f9..dc09ebe4aba5 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -239,24 +239,6 @@ static const struct fb_bitfield def_rgb565[] = {
}
};
-static const struct fb_bitfield def_rgb666[] = {
- [RED] = {
- .offset = 16,
- .length = 6,
- },
- [GREEN] = {
- .offset = 8,
- .length = 6,
- },
- [BLUE] = {
- .offset = 0,
- .length = 6,
- },
- [TRANSP] = { /* no support for transparency */
- .length = 0,
- }
-};
-
static const struct fb_bitfield def_rgb888[] = {
[RED] = {
.offset = 16,
@@ -309,9 +291,6 @@ static int mxsfb_check_var(struct fb_var_screeninfo *var,
break;
case STMLCDIF_16BIT:
case STMLCDIF_18BIT:
- /* 24 bit to 18 bit mapping */
- rgb = def_rgb666;
- break;
case STMLCDIF_24BIT:
/* real 24 bit */
rgb = def_rgb888;
@@ -453,11 +432,6 @@ static int mxsfb_set_par(struct fb_info *fb_info)
return -EINVAL;
case STMLCDIF_16BIT:
case STMLCDIF_18BIT:
- /* 24 bit to 18 bit mapping */
- ctrl |= CTRL_DF24; /* ignore the upper 2 bits in
- * each colour component
- */
- break;
case STMLCDIF_24BIT:
/* real 24 bit */
break;
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 8c527e5b293c..796e5112ceee 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -587,8 +587,7 @@ static int nuc900fb_probe(struct platform_device *pdev)
fbinfo->flags = FBINFO_FLAG_DEFAULT;
fbinfo->pseudo_palette = &fbi->pseudo_pal;
- ret = request_irq(irq, nuc900fb_irqhandler, 0,
- pdev->name, fbinfo);
+ ret = request_irq(irq, nuc900fb_irqhandler, 0, pdev->name, fbi);
if (ret) {
dev_err(&pdev->dev, "cannot register irq handler %d -err %d\n",
irq, ret);
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c
index 5338f362293b..1b60698f141e 100644
--- a/drivers/video/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/omap2/displays-new/connector-analog-tv.c
@@ -28,6 +28,20 @@ struct panel_drv_data {
bool invert_polarity;
};
+static const struct omap_video_timings tvc_pal_timings = {
+ .x_res = 720,
+ .y_res = 574,
+ .pixel_clock = 13500,
+ .hsw = 64,
+ .hfp = 12,
+ .hbp = 68,
+ .vsw = 5,
+ .vfp = 5,
+ .vbp = 41,
+
+ .interlace = true,
+};
+
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
static int tvc_connect(struct omap_dss_device *dssdev)
@@ -212,14 +226,14 @@ static int tvc_probe(struct platform_device *pdev)
return -ENODEV;
}
- ddata->timings = omap_dss_pal_timings;
+ ddata->timings = tvc_pal_timings;
dssdev = &ddata->dssdev;
dssdev->driver = &tvc_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = omap_dss_pal_timings;
+ dssdev->panel.timings = tvc_pal_timings;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index b2a8912f6435..a9ac3ce2d0e9 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -713,7 +713,7 @@ static int sgivwfb_mmap(struct fb_info *info,
r = vm_iomap_memory(vma, sgivwfb_mem_phys, sgivwfb_mem_size);
printk(KERN_DEBUG "sgivwfb: mmap framebuffer P(%lx)->V(%lx)\n",
- offset, vma->vm_start);
+ sgivwfb_mem_phys + (vma->vm_pgoff << PAGE_SHIFT), vma->vm_start);
return r;
}
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index a8c6c43a4658..1265b25f9f99 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -567,7 +567,7 @@ static int sh7760fb_remove(struct platform_device *dev)
fb_dealloc_cmap(&info->cmap);
sh7760fb_free_mem(info);
if (par->irq >= 0)
- free_irq(par->irq, par);
+ free_irq(par->irq, &par->vsync);
iounmap(par->base);
release_mem_region(par->ioarea->start, resource_size(par->ioarea));
framebuffer_release(info);
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 830ded45fd47..2827333703d9 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1265,7 +1265,6 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image
static void vga16fb_destroy(struct fb_info *info)
{
- struct platform_device *dev = container_of(info->device, struct platform_device, dev);
iounmap(info->screen_base);
fb_dealloc_cmap(&info->cmap);
/* XXX unshare VGA regions */
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index f3d4a69e1e4e..6629b29a8202 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -341,8 +341,8 @@ static int xilinxfb_assign(struct platform_device *pdev,
if (drvdata->flags & BUS_ACCESS_FLAG) {
/* Put a banner in the log (for DEBUG) */
- dev_dbg(dev, "regs: phys=%x, virt=%p\n", drvdata->regs_phys,
- drvdata->regs);
+ dev_dbg(dev, "regs: phys=%pa, virt=%p\n",
+ &drvdata->regs_phys, drvdata->regs);
}
/* Put a banner in the log (for DEBUG) */
dev_dbg(dev, "fb: phys=%llx, virt=%p, size=%x\n",
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 9e02d60a364b..23eae5cb69c2 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -145,7 +145,7 @@ config SWIOTLB_XEN
config XEN_TMEM
tristate
- depends on !ARM
+ depends on !ARM && !ARM64
default m if (CLEANCACHE || FRONTSWAP)
help
Shim to interface in-kernel Transcendent Memory hooks
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index eabd0ee1c2bc..14fe79d8634a 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,9 +1,8 @@
-ifneq ($(CONFIG_ARM),y)
-obj-y += manage.o
+ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),)
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
endif
obj-$(CONFIG_X86) += fallback.o
-obj-y += grant-table.o features.o events.o balloon.o
+obj-y += grant-table.o features.o events.o balloon.o manage.o
obj-y += xenbus/
nostackp := $(call cc-option, -fno-stack-protector)
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index a58ac435a9a4..5e8be462aed5 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void)
for_each_possible_cpu(i)
memset(per_cpu(cpu_evtchn_mask, i),
- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
+ (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
}
static inline void clear_evtchn(int port)
@@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
+ struct shared_info *s = HYPERVISOR_shared_info;
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
+ int masked;
if (!VALID_EVTCHN(evtchn))
return -1;
@@ -1511,6 +1513,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
bind_vcpu.vcpu = tcpu;
/*
+ * Mask the event while changing the VCPU binding to prevent
+ * it being delivered on an unexpected VCPU.
+ */
+ masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
+
+ /*
* If this fails, it usually just indicates that we're dealing with a
* virq or IPI channel, which don't actually need to be rebound. Ignore
* it, but don't do the xenlinux-level rebind in that case.
@@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
bind_evtchn_to_cpu(evtchn, tcpu);
+ if (!masked)
+ unmask_evtchn(evtchn);
+
return 0;
}
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 8feecf01d55c..b6165e047f48 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -379,18 +379,12 @@ static long evtchn_ioctl(struct file *file,
if (unbind.port >= NR_EVENT_CHANNELS)
break;
- spin_lock_irq(&port_user_lock);
-
rc = -ENOTCONN;
- if (get_port_user(unbind.port) != u) {
- spin_unlock_irq(&port_user_lock);
+ if (get_port_user(unbind.port) != u)
break;
- }
disable_irq(irq_from_evtchn(unbind.port));
- spin_unlock_irq(&port_user_lock);
-
evtchn_unbind_from_user(u, unbind.port);
rc = 0;
@@ -490,26 +484,15 @@ static int evtchn_release(struct inode *inode, struct file *filp)
int i;
struct per_user_data *u = filp->private_data;
- spin_lock_irq(&port_user_lock);
-
- free_page((unsigned long)u->ring);
-
for (i = 0; i < NR_EVENT_CHANNELS; i++) {
if (get_port_user(i) != u)
continue;
disable_irq(irq_from_evtchn(i));
- }
-
- spin_unlock_irq(&port_user_lock);
-
- for (i = 0; i < NR_EVENT_CHANNELS; i++) {
- if (get_port_user(i) != u)
- continue;
-
evtchn_unbind_from_user(get_port_user(i), i);
}
+ free_page((unsigned long)u->ring);
kfree(u->name);
kfree(u);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 6ed8a9df4472..34b20bfa4e8c 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -115,7 +115,6 @@ static int xenbus_frontend_dev_resume(struct device *dev)
return -EFAULT;
}
- INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume);
queue_work(xenbus_frontend_wq, &xdev->work);
return 0;
@@ -124,6 +123,16 @@ static int xenbus_frontend_dev_resume(struct device *dev)
return xenbus_dev_resume(dev);
}
+static int xenbus_frontend_dev_probe(struct device *dev)
+{
+ if (xen_store_domain_type == XS_LOCAL) {
+ struct xenbus_device *xdev = to_xenbus_device(dev);
+ INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume);
+ }
+
+ return xenbus_dev_probe(dev);
+}
+
static const struct dev_pm_ops xenbus_pm_ops = {
.suspend = xenbus_dev_suspend,
.resume = xenbus_frontend_dev_resume,
@@ -142,7 +151,7 @@ static struct xen_bus_type xenbus_frontend = {
.name = "xen",
.match = xenbus_match,
.uevent = xenbus_uevent_frontend,
- .probe = xenbus_dev_probe,
+ .probe = xenbus_frontend_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
.dev_attrs = xenbus_dev_attrs,
@@ -474,7 +483,11 @@ static int __init xenbus_probe_frontend_init(void)
register_xenstore_notifier(&xenstore_notifier);
- xenbus_frontend_wq = create_workqueue("xenbus_frontend");
+ if (xen_store_domain_type == XS_LOCAL) {
+ xenbus_frontend_wq = create_workqueue("xenbus_frontend");
+ if (!xenbus_frontend_wq)
+ pr_warn("create xenbus frontend workqueue failed, S3 resume is likely to fail\n");
+ }
return 0;
}
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 5e376bb93419..8defc6b3f9a2 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -40,7 +40,7 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
int block, off;
inode = iget_locked(sb, ino);
- if (IS_ERR(inode))
+ if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
diff --git a/fs/bio.c b/fs/bio.c
index 94bbc04dba77..c5eae7251490 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
int bio_uncopy_user(struct bio *bio)
{
struct bio_map_data *bmd = bio->bi_private;
- int ret = 0;
+ struct bio_vec *bvec;
+ int ret = 0, i;
- if (!bio_flagged(bio, BIO_NULL_MAPPED))
- ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
- bmd->nr_sgvecs, bio_data_dir(bio) == READ,
- 0, bmd->is_our_pages);
+ if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+ /*
+ * if we're in a workqueue, the request is orphaned, so
+ * don't copy into a random user address space, just free.
+ */
+ if (current->mm)
+ ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
+ bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+ 0, bmd->is_our_pages);
+ else if (bmd->is_our_pages)
+ bio_for_each_segment_all(bvec, bio, i)
+ __free_page(bvec->bv_page);
+ }
bio_free_map_data(bmd);
bio_put(bio);
return ret;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index eaf133384a8f..8bc5e8ccb091 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -36,16 +36,23 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
u64 extent_item_pos,
struct extent_inode_elem **eie)
{
- u64 data_offset;
- u64 data_len;
+ u64 offset = 0;
struct extent_inode_elem *e;
- data_offset = btrfs_file_extent_offset(eb, fi);
- data_len = btrfs_file_extent_num_bytes(eb, fi);
+ if (!btrfs_file_extent_compression(eb, fi) &&
+ !btrfs_file_extent_encryption(eb, fi) &&
+ !btrfs_file_extent_other_encoding(eb, fi)) {
+ u64 data_offset;
+ u64 data_len;
- if (extent_item_pos < data_offset ||
- extent_item_pos >= data_offset + data_len)
- return 1;
+ data_offset = btrfs_file_extent_offset(eb, fi);
+ data_len = btrfs_file_extent_num_bytes(eb, fi);
+
+ if (extent_item_pos < data_offset ||
+ extent_item_pos >= data_offset + data_len)
+ return 1;
+ offset = extent_item_pos - data_offset;
+ }
e = kmalloc(sizeof(*e), GFP_NOFS);
if (!e)
@@ -53,7 +60,7 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
e->next = *eie;
e->inum = key->objectid;
- e->offset = key->offset + (extent_item_pos - data_offset);
+ e->offset = key->offset + offset;
*eie = e;
return 0;
@@ -189,7 +196,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
struct extent_buffer *eb;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
- struct extent_inode_elem *eie = NULL;
+ struct extent_inode_elem *eie = NULL, *old = NULL;
u64 disk_byte;
if (level != 0) {
@@ -223,6 +230,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
if (disk_byte == wanted_disk_byte) {
eie = NULL;
+ old = NULL;
if (extent_item_pos) {
ret = check_extent_in_eb(&key, eb, fi,
*extent_item_pos,
@@ -230,18 +238,20 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
if (ret < 0)
break;
}
- if (!ret) {
- ret = ulist_add(parents, eb->start,
- (uintptr_t)eie, GFP_NOFS);
- if (ret < 0)
- break;
- if (!extent_item_pos) {
- ret = btrfs_next_old_leaf(root, path,
- time_seq);
- continue;
- }
+ if (ret > 0)
+ goto next;
+ ret = ulist_add_merge(parents, eb->start,
+ (uintptr_t)eie,
+ (u64 *)&old, GFP_NOFS);
+ if (ret < 0)
+ break;
+ if (!ret && extent_item_pos) {
+ while (old->next)
+ old = old->next;
+ old->next = eie;
}
}
+next:
ret = btrfs_next_old_item(root, path, time_seq);
}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 5bf4c39e2ad6..ed504607d8ec 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1271,7 +1271,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
BUG_ON(!eb_rewin);
}
- extent_buffer_get(eb_rewin);
btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 583d98bd065e..fe443fece851 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4048,7 +4048,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
while (!end) {
- u64 offset_in_extent;
+ u64 offset_in_extent = 0;
/* break if the extent we found is outside the range */
if (em->start >= max || extent_map_end(em) < off)
@@ -4064,9 +4064,12 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
/*
* record the offset from the start of the extent
- * for adjusting the disk offset below
+ * for adjusting the disk offset below. Only do this if the
+ * extent isn't compressed since our in ram offset may be past
+ * what we have actually allocated on disk.
*/
- offset_in_extent = em_start - em->start;
+ if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
+ offset_in_extent = em_start - em->start;
em_end = extent_map_end(em);
em_len = em_end - em_start;
emflags = em->flags;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a005fe2c072a..8e686a427ce2 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -596,20 +596,29 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
if (no_splits)
goto next;
- if (em->block_start < EXTENT_MAP_LAST_BYTE &&
- em->start < start) {
+ if (em->start < start) {
split->start = em->start;
split->len = start - em->start;
- split->orig_start = em->orig_start;
- split->block_start = em->block_start;
- if (compressed)
- split->block_len = em->block_len;
- else
- split->block_len = split->len;
- split->ram_bytes = em->ram_bytes;
- split->orig_block_len = max(split->block_len,
- em->orig_block_len);
+ if (em->block_start < EXTENT_MAP_LAST_BYTE) {
+ split->orig_start = em->orig_start;
+ split->block_start = em->block_start;
+
+ if (compressed)
+ split->block_len = em->block_len;
+ else
+ split->block_len = split->len;
+ split->orig_block_len = max(split->block_len,
+ em->orig_block_len);
+ split->ram_bytes = em->ram_bytes;
+ } else {
+ split->orig_start = split->start;
+ split->block_len = 0;
+ split->block_start = em->block_start;
+ split->orig_block_len = 0;
+ split->ram_bytes = split->len;
+ }
+
split->generation = gen;
split->bdev = em->bdev;
split->flags = flags;
@@ -620,8 +629,7 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
split = split2;
split2 = NULL;
}
- if (em->block_start < EXTENT_MAP_LAST_BYTE &&
- testend && em->start + em->len > start + len) {
+ if (testend && em->start + em->len > start + len) {
u64 diff = start + len - em->start;
split->start = start + len;
@@ -630,18 +638,28 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
split->flags = flags;
split->compress_type = em->compress_type;
split->generation = gen;
- split->orig_block_len = max(em->block_len,
+
+ if (em->block_start < EXTENT_MAP_LAST_BYTE) {
+ split->orig_block_len = max(em->block_len,
em->orig_block_len);
- split->ram_bytes = em->ram_bytes;
- if (compressed) {
- split->block_len = em->block_len;
- split->block_start = em->block_start;
- split->orig_start = em->orig_start;
+ split->ram_bytes = em->ram_bytes;
+ if (compressed) {
+ split->block_len = em->block_len;
+ split->block_start = em->block_start;
+ split->orig_start = em->orig_start;
+ } else {
+ split->block_len = split->len;
+ split->block_start = em->block_start
+ + diff;
+ split->orig_start = em->orig_start;
+ }
} else {
- split->block_len = split->len;
- split->block_start = em->block_start + diff;
- split->orig_start = em->orig_start;
+ split->ram_bytes = split->len;
+ split->orig_start = split->start;
+ split->block_len = 0;
+ split->block_start = em->block_start;
+ split->orig_block_len = 0;
}
ret = add_extent_mapping(em_tree, split, modified);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 6d1b93c8aafb..021694c08181 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2166,16 +2166,23 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
continue;
- extent_offset = btrfs_file_extent_offset(leaf, extent);
- if (key.offset - extent_offset != offset)
+ /*
+ * 'offset' refers to the exact key.offset,
+ * NOT the 'offset' field in btrfs_extent_data_ref, ie.
+ * (key.offset - extent_offset).
+ */
+ if (key.offset != offset)
continue;
+ extent_offset = btrfs_file_extent_offset(leaf, extent);
num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
+
if (extent_offset >= old->extent_offset + old->offset +
old->len || extent_offset + num_bytes <=
old->extent_offset + old->offset)
continue;
+ ret = 0;
break;
}
@@ -2187,7 +2194,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
backref->root_id = root_id;
backref->inum = inum;
- backref->file_pos = offset + extent_offset;
+ backref->file_pos = offset;
backref->num_bytes = num_bytes;
backref->extent_offset = extent_offset;
backref->generation = btrfs_file_extent_generation(leaf, extent);
@@ -2210,7 +2217,8 @@ static noinline bool record_extent_backrefs(struct btrfs_path *path,
new->path = path;
list_for_each_entry_safe(old, tmp, &new->head, list) {
- ret = iterate_inodes_from_logical(old->bytenr, fs_info,
+ ret = iterate_inodes_from_logical(old->bytenr +
+ old->extent_offset, fs_info,
path, record_one_backref,
old);
BUG_ON(ret < 0 && ret != -ENOENT);
@@ -4391,9 +4399,6 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
int mask = attr->ia_valid;
int ret;
- if (newsize == oldsize)
- return 0;
-
/*
* The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
* special case where we need to update the times despite not having
@@ -5165,14 +5170,31 @@ next:
}
/* Reached end of directory/root. Bump pos past the last item. */
- if (key_type == BTRFS_DIR_INDEX_KEY)
- /*
- * 32-bit glibc will use getdents64, but then strtol -
- * so the last number we can serve is this.
- */
- ctx->pos = 0x7fffffff;
- else
- ctx->pos++;
+ ctx->pos++;
+
+ /*
+ * Stop new entries from being returned after we return the last
+ * entry.
+ *
+ * New directory entries are assigned a strictly increasing
+ * offset. This means that new entries created during readdir
+ * are *guaranteed* to be seen in the future by that readdir.
+ * This has broken buggy programs which operate on names as
+ * they're returned by readdir. Until we re-use freed offsets
+ * we have this hack to stop new entries from being returned
+ * under the assumption that they'll never reach this huge
+ * offset.
+ *
+ * This is being careful not to overflow 32bit loff_t unless the
+ * last entry requires it because doing so has broken 32bit apps
+ * in the past.
+ */
+ if (key_type == BTRFS_DIR_INDEX_KEY) {
+ if (ctx->pos >= INT_MAX)
+ ctx->pos = LLONG_MAX;
+ else
+ ctx->pos = INT_MAX;
+ }
nopos:
ret = 0;
err:
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index d58cce77fc6c..af1931a5960d 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -983,12 +983,12 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
* a dirty root struct and adds it into the list of dead roots that need to
* be deleted
*/
-int btrfs_add_dead_root(struct btrfs_root *root)
+void btrfs_add_dead_root(struct btrfs_root *root)
{
spin_lock(&root->fs_info->trans_lock);
- list_add_tail(&root->root_list, &root->fs_info->dead_roots);
+ if (list_empty(&root->root_list))
+ list_add_tail(&root->root_list, &root->fs_info->dead_roots);
spin_unlock(&root->fs_info->trans_lock);
- return 0;
}
/*
@@ -1925,7 +1925,7 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
}
root = list_first_entry(&fs_info->dead_roots,
struct btrfs_root, root_list);
- list_del(&root->root_list);
+ list_del_init(&root->root_list);
spin_unlock(&fs_info->trans_lock);
pr_debug("btrfs: cleaner removing %llu\n",
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 005b0375d18c..defbc4269897 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -143,7 +143,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-int btrfs_add_dead_root(struct btrfs_root *root);
+void btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root);
int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 2c6791493637..ff60d8978ae2 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3746,8 +3746,9 @@ next_slot:
}
log_extents:
+ btrfs_release_path(path);
+ btrfs_release_path(dst_path);
if (fast_search) {
- btrfs_release_path(dst_path);
ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
if (ret) {
err = ret;
@@ -3764,8 +3765,6 @@ log_extents:
}
if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
- btrfs_release_path(path);
- btrfs_release_path(dst_path);
ret = log_directory_changes(trans, root, inode, path, dst_path);
if (ret) {
err = ret;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 45e57cc38200..fc6f4f3a1a9d 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -43,17 +43,18 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(server->secmech.md5)) {
cifs_dbg(VFS, "could not allocate crypto md5\n");
- return PTR_ERR(server->secmech.md5);
+ rc = PTR_ERR(server->secmech.md5);
+ server->secmech.md5 = NULL;
+ return rc;
}
size = sizeof(struct shash_desc) +
crypto_shash_descsize(server->secmech.md5);
server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
if (!server->secmech.sdescmd5) {
- rc = -ENOMEM;
crypto_free_shash(server->secmech.md5);
server->secmech.md5 = NULL;
- return rc;
+ return -ENOMEM;
}
server->secmech.sdescmd5->shash.tfm = server->secmech.md5;
server->secmech.sdescmd5->shash.flags = 0x0;
@@ -421,7 +422,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
if (blobptr + attrsize > blobend)
break;
if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
- if (!attrsize)
+ if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
break;
if (!ses->domainName) {
ses->domainName =
@@ -591,6 +592,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
{
+ int rc;
unsigned int size;
/* check if already allocated */
@@ -600,7 +602,9 @@ static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
if (IS_ERR(server->secmech.hmacmd5)) {
cifs_dbg(VFS, "could not allocate crypto hmacmd5\n");
- return PTR_ERR(server->secmech.hmacmd5);
+ rc = PTR_ERR(server->secmech.hmacmd5);
+ server->secmech.hmacmd5 = NULL;
+ return rc;
}
size = sizeof(struct shash_desc) +
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 4bdd547dbf6f..85ea98d139fc 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -147,18 +147,17 @@ cifs_read_super(struct super_block *sb)
goto out_no_root;
}
+ if (cifs_sb_master_tcon(cifs_sb)->nocase)
+ sb->s_d_op = &cifs_ci_dentry_ops;
+ else
+ sb->s_d_op = &cifs_dentry_ops;
+
sb->s_root = d_make_root(inode);
if (!sb->s_root) {
rc = -ENOMEM;
goto out_no_root;
}
- /* do that *after* d_make_root() - we want NULL ->d_op for root here */
- if (cifs_sb_master_tcon(cifs_sb)->nocase)
- sb->s_d_op = &cifs_ci_dentry_ops;
- else
- sb->s_d_op = &cifs_dentry_ops;
-
#ifdef CONFIG_CIFS_NFSD_EXPORT
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
cifs_dbg(FYI, "export ops supported\n");
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 1fdc37041057..52ca861ed35e 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -44,6 +44,7 @@
#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
#define MAX_SERVER_SIZE 15
#define MAX_SHARE_SIZE 80
+#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
@@ -369,6 +370,9 @@ struct smb_version_operations {
void (*generate_signingkey)(struct TCP_Server_Info *server);
int (*calc_signature)(struct smb_rqst *rqst,
struct TCP_Server_Info *server);
+ int (*query_mf_symlink)(const unsigned char *path, char *pbuf,
+ unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+ unsigned int xid);
};
struct smb_version_values {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f7e584d047e2..b29a012bed33 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -497,5 +497,7 @@ void cifs_writev_complete(struct work_struct *work);
struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
work_func_t complete);
void cifs_writedata_release(struct kref *refcount);
-
+int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
+ unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+ unsigned int xid);
#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index fa68813396b5..d67c550c4980 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1675,7 +1675,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (string == NULL)
goto out_nomem;
- if (strnlen(string, 256) == 256) {
+ if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN)
+ == CIFS_MAX_DOMAINNAME_LEN) {
printk(KERN_WARNING "CIFS: domain name too"
" long\n");
goto cifs_parse_mount_err;
@@ -2276,8 +2277,8 @@ cifs_put_smb_ses(struct cifs_ses *ses)
#ifdef CONFIG_KEYS
-/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */
-#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1)
+/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
+#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
/* Populate username and pw fields from keyring if possible */
static int
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 1e57f36ea1b2..7e36ae34e947 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -647,6 +647,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
oflags, &oplock, &cfile->fid.netfid, xid);
if (rc == 0) {
cifs_dbg(FYI, "posix reopen succeeded\n");
+ oparms.reconnect = true;
goto reopen_success;
}
/*
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index b83c3f5646bd..562044f700e5 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -305,67 +305,89 @@ CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr)
}
int
-CIFSCheckMFSymlink(struct cifs_fattr *fattr,
- const unsigned char *path,
- struct cifs_sb_info *cifs_sb, unsigned int xid)
+open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
+ unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+ unsigned int xid)
{
int rc;
int oplock = 0;
__u16 netfid = 0;
struct tcon_link *tlink;
- struct cifs_tcon *pTcon;
+ struct cifs_tcon *ptcon;
struct cifs_io_parms io_parms;
- u8 *buf;
- char *pbuf;
- unsigned int bytes_read = 0;
int buf_type = CIFS_NO_BUFFER;
- unsigned int link_len = 0;
FILE_ALL_INFO file_info;
- if (!CIFSCouldBeMFSymlink(fattr))
- /* it's not a symlink */
- return 0;
-
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
- pTcon = tlink_tcon(tlink);
+ ptcon = tlink_tcon(tlink);
- rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ,
+ rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ,
CREATE_NOT_DIR, &netfid, &oplock, &file_info,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc != 0)
- goto out;
+ if (rc != 0) {
+ cifs_put_tlink(tlink);
+ return rc;
+ }
if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
- CIFSSMBClose(xid, pTcon, netfid);
+ CIFSSMBClose(xid, ptcon, netfid);
+ cifs_put_tlink(tlink);
/* it's not a symlink */
- goto out;
+ return rc;
}
- buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
- if (!buf) {
- rc = -ENOMEM;
- goto out;
- }
- pbuf = buf;
io_parms.netfid = netfid;
io_parms.pid = current->tgid;
- io_parms.tcon = pTcon;
+ io_parms.tcon = ptcon;
io_parms.offset = 0;
io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
- rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type);
- CIFSSMBClose(xid, pTcon, netfid);
- if (rc != 0) {
- kfree(buf);
+ rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
+ CIFSSMBClose(xid, ptcon, netfid);
+ cifs_put_tlink(tlink);
+ return rc;
+}
+
+
+int
+CIFSCheckMFSymlink(struct cifs_fattr *fattr,
+ const unsigned char *path,
+ struct cifs_sb_info *cifs_sb, unsigned int xid)
+{
+ int rc = 0;
+ u8 *buf = NULL;
+ unsigned int link_len = 0;
+ unsigned int bytes_read = 0;
+ struct cifs_tcon *ptcon;
+
+ if (!CIFSCouldBeMFSymlink(fattr))
+ /* it's not a symlink */
+ return 0;
+
+ buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
+ if (!buf) {
+ rc = -ENOMEM;
goto out;
}
+ ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
+ if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
+ rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
+ &bytes_read, cifs_sb, xid);
+ else
+ goto out;
+
+ if (rc != 0)
+ goto out;
+
+ if (bytes_read == 0) /* not a symlink */
+ goto out;
+
rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL);
- kfree(buf);
if (rc == -EINVAL) {
/* it's not a symlink */
rc = 0;
@@ -381,7 +403,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
fattr->cf_dtype = DT_LNK;
out:
- cifs_put_tlink(tlink);
+ kfree(buf);
return rc;
}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index ab8778469394..69d2c826a23b 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -111,6 +111,14 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
return;
}
+ /*
+ * If we know that the inode will need to be revalidated immediately,
+ * then don't create a new dentry for it. We'll end up doing an on
+ * the wire call either way and this spares us an invalidation.
+ */
+ if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
+ return;
+
dentry = d_alloc(parent, name);
if (!dentry)
return;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 79358e341fd2..08dd37bb23aa 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -197,7 +197,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
bytes_ret = 0;
} else
bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
- 256, nls_cp);
+ CIFS_MAX_DOMAINNAME_LEN, nls_cp);
bcc_ptr += 2 * bytes_ret;
bcc_ptr += 2; /* account for null terminator */
@@ -255,8 +255,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
/* copy domain */
if (ses->domainName != NULL) {
- strncpy(bcc_ptr, ses->domainName, 256);
- bcc_ptr += strnlen(ses->domainName, 256);
+ strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
} /* else we will send a null domain name
so the server will default to its own domain */
*bcc_ptr = 0;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 6457690731a2..60943978aec3 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -944,6 +944,7 @@ struct smb_version_operations smb1_operations = {
.mand_lock = cifs_mand_lock,
.mand_unlock_range = cifs_unlock_range,
.push_mand_locks = cifs_push_mandatory_locks,
+ .query_mf_symlink = open_query_close_cifs_symlink,
};
struct smb_version_values smb1_values = {
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 301b191270b9..4f2300d020c7 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -42,6 +42,7 @@
static int
smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
{
+ int rc;
unsigned int size;
if (server->secmech.sdeschmacsha256 != NULL)
@@ -50,7 +51,9 @@ smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0);
if (IS_ERR(server->secmech.hmacsha256)) {
cifs_dbg(VFS, "could not allocate crypto hmacsha256\n");
- return PTR_ERR(server->secmech.hmacsha256);
+ rc = PTR_ERR(server->secmech.hmacsha256);
+ server->secmech.hmacsha256 = NULL;
+ return rc;
}
size = sizeof(struct shash_desc) +
@@ -87,7 +90,9 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
server->secmech.sdeschmacsha256 = NULL;
crypto_free_shash(server->secmech.hmacsha256);
server->secmech.hmacsha256 = NULL;
- return PTR_ERR(server->secmech.cmacaes);
+ rc = PTR_ERR(server->secmech.cmacaes);
+ server->secmech.cmacaes = NULL;
+ return rc;
}
size = sizeof(struct shash_desc) +
diff --git a/fs/dcache.c b/fs/dcache.c
index 87bdb5329c3c..83cfb834db03 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2724,6 +2724,17 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
return memcpy(buffer, temp, sz);
}
+char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ char *end = buffer + buflen;
+ /* these dentries are never renamed, so d_lock is not needed */
+ if (prepend(&end, &buflen, " (deleted)", 11) ||
+ prepend_name(&end, &buflen, &dentry->d_name) ||
+ prepend(&end, &buflen, "/", 1))
+ end = ERR_PTR(-ENAMETOOLONG);
+ return end;
+}
+
/*
* Write full pathname from the root of the filesystem into the buffer.
*/
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 4888cb3fdef7..c7c83ff0f752 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -533,8 +533,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
*/
void debugfs_remove_recursive(struct dentry *dentry)
{
- struct dentry *child;
- struct dentry *parent;
+ struct dentry *child, *next, *parent;
if (IS_ERR_OR_NULL(dentry))
return;
@@ -544,61 +543,37 @@ void debugfs_remove_recursive(struct dentry *dentry)
return;
parent = dentry;
+ down:
mutex_lock(&parent->d_inode->i_mutex);
+ list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
+ if (!debugfs_positive(child))
+ continue;
- while (1) {
- /*
- * When all dentries under "parent" has been removed,
- * walk up the tree until we reach our starting point.
- */
- if (list_empty(&parent->d_subdirs)) {
- mutex_unlock(&parent->d_inode->i_mutex);
- if (parent == dentry)
- break;
- parent = parent->d_parent;
- mutex_lock(&parent->d_inode->i_mutex);
- }
- child = list_entry(parent->d_subdirs.next, struct dentry,
- d_u.d_child);
- next_sibling:
-
- /*
- * If "child" isn't empty, walk down the tree and
- * remove all its descendants first.
- */
+ /* perhaps simple_empty(child) makes more sense */
if (!list_empty(&child->d_subdirs)) {
mutex_unlock(&parent->d_inode->i_mutex);
parent = child;
- mutex_lock(&parent->d_inode->i_mutex);
- continue;
+ goto down;
}
- __debugfs_remove(child, parent);
- if (parent->d_subdirs.next == &child->d_u.d_child) {
- /*
- * Try the next sibling.
- */
- if (child->d_u.d_child.next != &parent->d_subdirs) {
- child = list_entry(child->d_u.d_child.next,
- struct dentry,
- d_u.d_child);
- goto next_sibling;
- }
-
- /*
- * Avoid infinite loop if we fail to remove
- * one dentry.
- */
- mutex_unlock(&parent->d_inode->i_mutex);
- break;
- }
- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ up:
+ if (!__debugfs_remove(child, parent))
+ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
- parent = dentry->d_parent;
+ mutex_unlock(&parent->d_inode->i_mutex);
+ child = parent;
+ parent = parent->d_parent;
mutex_lock(&parent->d_inode->i_mutex);
- __debugfs_remove(dentry, parent);
+
+ if (child != dentry) {
+ next = list_entry(child->d_u.d_child.next, struct dentry,
+ d_u.d_child);
+ goto up;
+ }
+
+ if (!__debugfs_remove(child, parent))
+ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
mutex_unlock(&parent->d_inode->i_mutex);
- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 911649a47dd5..812149119fa3 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -686,7 +686,6 @@ static int device_close(struct inode *inode, struct file *file)
device_remove_lockspace() */
sigprocmask(SIG_SETMASK, &tmpsig, NULL);
- recalc_sigpending();
return 0;
}
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index f3913eb2c474..d15ccf20f1b3 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -57,7 +57,7 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
struct inode *inode;
inode = iget_locked(super, ino);
- if (IS_ERR(inode))
+ if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
diff --git a/fs/exec.c b/fs/exec.c
index 9c73def87642..fd774c7cb483 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -608,7 +608,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
return -ENOMEM;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, old_start, old_end);
if (new_end > old_start) {
/*
* when the old and new regions overlap clear from new_end.
@@ -625,7 +625,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
free_pgd_range(&tlb, old_start, old_end, new_end,
vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
}
- tlb_finish_mmu(&tlb, new_end, old_end);
+ tlb_finish_mmu(&tlb, old_start, old_end);
/*
* Shrink the vma to just the new range. Always succeeds.
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b577e45425b0..0ab26fbf3380 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2086,6 +2086,7 @@ extern int ext4_sync_inode(handle_t *, struct inode *);
extern void ext4_dirty_inode(struct inode *, int);
extern int ext4_change_inode_journal_flag(struct inode *, int);
extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
+extern int ext4_inode_attach_jinode(struct inode *inode);
extern int ext4_can_truncate(struct inode *inode);
extern void ext4_truncate(struct inode *);
extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 72a3600aedbd..17ac112ab101 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -255,10 +255,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
set_buffer_prio(bh);
if (ext4_handle_valid(handle)) {
err = jbd2_journal_dirty_metadata(handle, bh);
- if (err) {
- /* Errors can only happen if there is a bug */
- handle->h_err = err;
- __ext4_journal_stop(where, line, handle);
+ /* Errors can only happen if there is a bug */
+ if (WARN_ON_ONCE(err)) {
+ ext4_journal_abort_handle(where, line, __func__, bh,
+ handle, err);
}
} else {
if (inode)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index a61873808f76..72ba4705d4fa 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4412,7 +4412,7 @@ void ext4_ext_truncate(handle_t *handle, struct inode *inode)
retry:
err = ext4_es_remove_extent(inode, last_block,
EXT_MAX_BLOCKS - last_block);
- if (err == ENOMEM) {
+ if (err == -ENOMEM) {
cond_resched();
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6f4cc567c382..319c9d26279a 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -219,7 +219,6 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
{
struct super_block *sb = inode->i_sb;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- struct ext4_inode_info *ei = EXT4_I(inode);
struct vfsmount *mnt = filp->f_path.mnt;
struct path path;
char buf[64], *cp;
@@ -259,22 +258,10 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
* Set up the jbd2_inode if we are opening the inode for
* writing and the journal is present
*/
- if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
- struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
-
- spin_lock(&inode->i_lock);
- if (!ei->jinode) {
- if (!jinode) {
- spin_unlock(&inode->i_lock);
- return -ENOMEM;
- }
- ei->jinode = jinode;
- jbd2_journal_init_jbd_inode(ei->jinode, inode);
- jinode = NULL;
- }
- spin_unlock(&inode->i_lock);
- if (unlikely(jinode != NULL))
- jbd2_free_inode(jinode);
+ if (filp->f_mode & FMODE_WRITE) {
+ int ret = ext4_inode_attach_jinode(inode);
+ if (ret < 0)
+ return ret;
}
return dquot_file_open(inode, filp);
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index f03598c6ffd3..8bf5999875ee 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -734,11 +734,8 @@ repeat_in_this_group:
ino = ext4_find_next_zero_bit((unsigned long *)
inode_bitmap_bh->b_data,
EXT4_INODES_PER_GROUP(sb), ino);
- if (ino >= EXT4_INODES_PER_GROUP(sb)) {
- if (++group == ngroups)
- group = 0;
- continue;
- }
+ if (ino >= EXT4_INODES_PER_GROUP(sb))
+ goto next_group;
if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
ext4_error(sb, "reserved inode found cleared - "
"inode=%lu", ino + 1);
@@ -769,6 +766,9 @@ repeat_in_this_group:
goto got; /* we grabbed the inode! */
if (ino < EXT4_INODES_PER_GROUP(sb))
goto repeat_in_this_group;
+next_group:
+ if (++group == ngroups)
+ group = 0;
}
err = -ENOSPC;
goto out;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ba33c67d6e48..c2ca04e67a4f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -555,14 +555,13 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
int ret;
unsigned long long status;
-#ifdef ES_AGGRESSIVE_TEST
- if (retval != map->m_len) {
- printk("ES len assertion failed for inode: %lu "
- "retval %d != map->m_len %d "
- "in %s (lookup)\n", inode->i_ino, retval,
- map->m_len, __func__);
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode "
+ "%lu: retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
}
-#endif
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
@@ -656,14 +655,13 @@ found:
int ret;
unsigned long long status;
-#ifdef ES_AGGRESSIVE_TEST
- if (retval != map->m_len) {
- printk("ES len assertion failed for inode: %lu "
- "retval %d != map->m_len %d "
- "in %s (allocation)\n", inode->i_ino, retval,
- map->m_len, __func__);
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode "
+ "%lu: retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
}
-#endif
/*
* If the extent has been zeroed out, we don't need to update
@@ -1637,14 +1635,13 @@ add_delayed:
int ret;
unsigned long long status;
-#ifdef ES_AGGRESSIVE_TEST
- if (retval != map->m_len) {
- printk("ES len assertion failed for inode: %lu "
- "retval %d != map->m_len %d "
- "in %s (lookup)\n", inode->i_ino, retval,
- map->m_len, __func__);
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode "
+ "%lu: retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
}
-#endif
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
@@ -3536,6 +3533,18 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
offset;
}
+ if (offset & (sb->s_blocksize - 1) ||
+ (offset + length) & (sb->s_blocksize - 1)) {
+ /*
+ * Attach jinode to inode for jbd2 if we do any zeroing of
+ * partial block
+ */
+ ret = ext4_inode_attach_jinode(inode);
+ if (ret < 0)
+ goto out_mutex;
+
+ }
+
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
@@ -3604,6 +3613,31 @@ out_mutex:
return ret;
}
+int ext4_inode_attach_jinode(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct jbd2_inode *jinode;
+
+ if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
+ return 0;
+
+ jinode = jbd2_alloc_inode(GFP_KERNEL);
+ spin_lock(&inode->i_lock);
+ if (!ei->jinode) {
+ if (!jinode) {
+ spin_unlock(&inode->i_lock);
+ return -ENOMEM;
+ }
+ ei->jinode = jinode;
+ jbd2_journal_init_jbd_inode(ei->jinode, inode);
+ jinode = NULL;
+ }
+ spin_unlock(&inode->i_lock);
+ if (unlikely(jinode != NULL))
+ jbd2_free_inode(jinode);
+ return 0;
+}
+
/*
* ext4_truncate()
*
@@ -3664,6 +3698,12 @@ void ext4_truncate(struct inode *inode)
return;
}
+ /* If we zero-out tail of the page, we have to create jinode for jbd2 */
+ if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
+ if (ext4_inode_attach_jinode(inode) < 0)
+ return;
+ }
+
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 9491ac0590f7..c0427e2f6648 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -77,8 +77,10 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags));
memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
- memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree));
- memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr));
+ ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
+ ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
+ ext4_es_lru_del(inode1);
+ ext4_es_lru_del(inode2);
isize = i_size_read(inode1);
i_size_write(inode1, i_size_read(inode2));
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index bca26f34edf4..b59373b625e9 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1359,7 +1359,7 @@ static const struct mount_opts {
{Opt_delalloc, EXT4_MOUNT_DELALLOC,
MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
- MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT},
+ MOPT_EXT4_ONLY | MOPT_CLEAR},
{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
MOPT_EXT4_ONLY | MOPT_SET},
{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
@@ -3483,7 +3483,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
if (test_opt(sb, DIOREAD_NOLOCK)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
- "both data=journal and delalloc");
+ "both data=journal and dioread_nolock");
goto failed_mount;
}
if (test_opt(sb, DELALLOC))
@@ -4727,6 +4727,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
+ if (test_opt2(sb, EXPLICIT_DELALLOC)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and delalloc");
+ err = -EINVAL;
+ goto restore_opts;
+ }
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and dioread_nolock");
+ err = -EINVAL;
+ goto restore_opts;
+ }
+ }
+
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
ext4_abort(sb, "Abort forced by user");
@@ -5481,6 +5496,7 @@ static void __exit ext4_exit_fs(void)
kset_unregister(ext4_kset);
ext4_exit_system_zone();
ext4_exit_pageio();
+ ext4_exit_es();
}
MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 6599222536eb..65343c3741ff 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -730,14 +730,14 @@ static int __init fcntl_init(void)
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
O_RDONLY | O_WRONLY | O_RDWR |
O_CREAT | O_EXCL | O_NOCTTY |
O_TRUNC | O_APPEND | /* O_NONBLOCK | */
__O_SYNC | O_DSYNC | FASYNC |
O_DIRECT | O_LARGEFILE | O_DIRECTORY |
O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
- __FMODE_EXEC | O_PATH
+ __FMODE_EXEC | O_PATH | __O_TMPFILE
));
fasync_cache = kmem_cache_create("fasync_cache",
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9435384562a2..544a809819c3 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1838,14 +1838,14 @@ int __init gfs2_glock_init(void)
glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
WQ_HIGHPRI | WQ_FREEZABLE, 0);
- if (IS_ERR(glock_workqueue))
- return PTR_ERR(glock_workqueue);
+ if (!glock_workqueue)
+ return -ENOMEM;
gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
WQ_MEM_RECLAIM | WQ_FREEZABLE,
0);
- if (IS_ERR(gfs2_delete_workqueue)) {
+ if (!gfs2_delete_workqueue) {
destroy_workqueue(glock_workqueue);
- return PTR_ERR(gfs2_delete_workqueue);
+ return -ENOMEM;
}
register_shrinker(&glock_shrinker);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 5f2e5224c51c..e2e0a90396e7 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -47,7 +47,8 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
* None of the buffers should be dirty, locked, or pinned.
*/
-static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
+static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
+ unsigned int nr_revokes)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
struct list_head *head = &gl->gl_ail_list;
@@ -57,7 +58,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_ail_lock);
- list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) {
+ list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
+ if (nr_revokes == 0)
+ break;
bh = bd->bd_bh;
if (bh->b_state & b_state) {
if (fsync)
@@ -65,6 +68,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
gfs2_ail_error(gl, bh);
}
gfs2_trans_add_revoke(sdp, bd);
+ nr_revokes--;
}
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
spin_unlock(&sdp->sd_ail_lock);
@@ -91,7 +95,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
WARN_ON_ONCE(current->journal_info);
current->journal_info = &tr;
- __gfs2_ail_flush(gl, 0);
+ __gfs2_ail_flush(gl, 0, tr.tr_revokes);
gfs2_trans_end(sdp);
gfs2_log_flush(sdp, NULL);
@@ -101,15 +105,19 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
unsigned int revokes = atomic_read(&gl->gl_ail_count);
+ unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
int ret;
if (!revokes)
return;
- ret = gfs2_trans_begin(sdp, 0, revokes);
+ while (revokes > max_revokes)
+ max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
+
+ ret = gfs2_trans_begin(sdp, 0, max_revokes);
if (ret)
return;
- __gfs2_ail_flush(gl, fsync);
+ __gfs2_ail_flush(gl, fsync, max_revokes);
gfs2_trans_end(sdp);
gfs2_log_flush(sdp, NULL);
}
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index bbb2715171cd..64915eeae5a7 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -594,7 +594,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
}
gfs2_glock_dq_uninit(ghs);
if (IS_ERR(d))
- return PTR_RET(d);
+ return PTR_ERR(d);
return error;
} else if (error != -ENOENT) {
goto fail_gunlock;
@@ -1750,6 +1750,10 @@ static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name,
struct gfs2_holder gh;
int ret;
+ /* For selinux during lookup */
+ if (gfs2_glock_is_locked_by_me(ip->i_gl))
+ return generic_getxattr(dentry, name, data, size);
+
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
ret = gfs2_glock_nq(&gh);
if (ret == 0) {
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index e04d0e09ee7b..7b0f5043cf24 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -155,7 +155,7 @@ static int __init init_gfs2_fs(void)
goto fail_wq;
gfs2_control_wq = alloc_workqueue("gfs2_control",
- WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0);
+ WQ_UNBOUND | WQ_FREEZABLE, 0);
if (!gfs2_control_wq)
goto fail_recovery;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a3f868ae3fd4..d19b30ababf1 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -463,6 +463,14 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
return inode;
}
+/*
+ * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never
+ * be taken from reclaim -- unlike regular filesystems. This needs an
+ * annotation because huge_pmd_share() does an allocation under
+ * i_mmap_mutex.
+ */
+struct lock_class_key hugetlbfs_i_mmap_mutex_key;
+
static struct inode *hugetlbfs_get_inode(struct super_block *sb,
struct inode *dir,
umode_t mode, dev_t dev)
@@ -474,6 +482,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
+ lockdep_set_class(&inode->i_mapping->i_mmap_mutex,
+ &hugetlbfs_i_mmap_mutex_key);
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -916,14 +926,8 @@ static int get_hstate_idx(int page_size_log)
return h - hstates;
}
-static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen)
-{
- return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
- dentry->d_name.name);
-}
-
static struct dentry_operations anon_ops = {
- .d_dname = hugetlb_dname
+ .d_dname = simple_dname
};
/*
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 01bfe7662751..41e491b8e5d7 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -64,12 +64,17 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
nlm_init->protocol, nlm_version,
nlm_init->hostname, nlm_init->noresvport,
nlm_init->net);
- if (host == NULL) {
- lockd_down(nlm_init->net);
- return ERR_PTR(-ENOLCK);
- }
+ if (host == NULL)
+ goto out_nohost;
+ if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL)
+ goto out_nobind;
return host;
+out_nobind:
+ nlmclnt_release_host(host);
+out_nohost:
+ lockd_down(nlm_init->net);
+ return ERR_PTR(-ENOLCK);
}
EXPORT_SYMBOL_GPL(nlmclnt_init);
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 9760ecb9b60f..acd394716349 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -125,14 +125,15 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
{
struct nlm_args *argp = &req->a_args;
struct nlm_lock *lock = &argp->lock;
+ char *nodename = req->a_host->h_rpcclnt->cl_nodename;
nlmclnt_next_cookie(&argp->cookie);
memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
- lock->caller = utsname()->nodename;
+ lock->caller = nodename;
lock->oh.data = req->a_owner;
lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
(unsigned int)fl->fl_u.nfs_fl.owner->pid,
- utsname()->nodename);
+ nodename);
lock->svid = fl->fl_u.nfs_fl.owner->pid;
lock->fl.fl_start = fl->fl_start;
lock->fl.fl_end = fl->fl_end;
diff --git a/fs/namei.c b/fs/namei.c
index 8b61d103a8a7..89a612e392eb 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3671,15 +3671,11 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
return -EINVAL;
/*
- * To use null names we require CAP_DAC_READ_SEARCH
- * This ensures that not everyone will be able to create
- * handlink using the passed filedescriptor.
+ * Using empty names is equivalent to using AT_SYMLINK_FOLLOW
+ * on /proc/self/fd/<fd>.
*/
- if (flags & AT_EMPTY_PATH) {
- if (!capable(CAP_DAC_READ_SEARCH))
- return -ENOENT;
+ if (flags & AT_EMPTY_PATH)
how = LOOKUP_EMPTY;
- }
if (flags & AT_SYMLINK_FOLLOW)
how |= LOOKUP_FOLLOW;
diff --git a/fs/namespace.c b/fs/namespace.c
index 7b1ca9ba0b0a..a45ba4f267fe 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1429,7 +1429,7 @@ struct vfsmount *collect_mounts(struct path *path)
CL_COPY_ALL | CL_PRIVATE);
namespace_unlock();
if (IS_ERR(tree))
- return NULL;
+ return ERR_CAST(tree);
return &tree->mnt;
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index af6e806044d7..941246f2b43d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -463,7 +463,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
unlock_new_inode(inode);
} else
nfs_refresh_inode(inode, fattr);
- nfs_setsecurity(inode, fattr, label);
dprintk("NFS: nfs_fhget(%s/%Ld fh_crc=0x%08x ct=%d)\n",
inode->i_sb->s_id,
(long long)NFS_FILEID(inode),
@@ -963,9 +962,15 @@ EXPORT_SYMBOL_GPL(nfs_revalidate_inode);
static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
{
struct nfs_inode *nfsi = NFS_I(inode);
-
+ int ret;
+
if (mapping->nrpages != 0) {
- int ret = invalidate_inode_pages2(mapping);
+ if (S_ISREG(inode->i_mode)) {
+ ret = nfs_sync_mapping(mapping);
+ if (ret < 0)
+ return ret;
+ }
+ ret = invalidate_inode_pages2(mapping);
if (ret < 0)
return ret;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index cf11799297c4..108a774095f7 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3071,15 +3071,13 @@ struct rpc_clnt *
nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
+ struct rpc_clnt *client = NFS_CLIENT(dir);
int status;
- struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
- if (status < 0) {
- rpc_shutdown_client(client);
+ if (status < 0)
return ERR_PTR(status);
- }
- return client;
+ return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
}
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 71fdc0dfa0d2..f6db66d8f647 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2478,6 +2478,10 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
if (server->flags & NFS_MOUNT_NOAC)
sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+ if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL)
+ if (mount_info->cloned->sb->s_flags & MS_SYNCHRONOUS)
+ sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
/* Get a superblock - note that we may end up sharing one that already exists */
s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata);
if (IS_ERR(s)) {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 0d4c410e4589..419572f33b72 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1524,7 +1524,7 @@ static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
- 1 + 1 + 0 + /* eir_flags, spr_how, SP4_NONE (for now) */\
+ 1 + 1 + 2 + /* eir_flags, spr_how, spo_must_enforce & _allow */\
2 + /*eir_server_owner.so_minor_id */\
/* eir_server_owner.so_major_id<> */\
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 280acef6f0dc..43f42290e5df 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1264,6 +1264,8 @@ static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
struct svc_cred *cr = &rqstp->rq_cred;
u32 service;
+ if (!cr->cr_gss_mech)
+ return false;
service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
return service == RPC_GSS_SVC_INTEGRITY ||
service == RPC_GSS_SVC_PRIVACY;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 0c0f3ea90de5..c2a4701d7286 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3360,7 +3360,8 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
8 /* eir_clientid */ +
4 /* eir_sequenceid */ +
4 /* eir_flags */ +
- 4 /* spr_how (SP4_NONE) */ +
+ 4 /* spr_how */ +
+ 8 /* spo_must_enforce, spo_must_allow */ +
8 /* so_minor_id */ +
4 /* so_major_id.len */ +
(XDR_QUADLEN(major_id_sz) * 4) +
@@ -3372,8 +3373,6 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
WRITE32(exid->seqid);
WRITE32(exid->flags);
- /* state_protect4_r. Currently only support SP4_NONE */
- BUG_ON(exid->spa_how != SP4_NONE);
WRITE32(exid->spa_how);
switch (exid->spa_how) {
case SP4_NONE:
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index dc9a913784ab..2d8be51f90dc 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
- bio_put(bio);
- /* to be detected by submit_seg_bio() */
+ /* to be detected by nilfs_segbuf_submit_bio() */
}
if (!uptodate)
@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
bio->bi_private = segbuf;
bio_get(bio);
submit_bio(mode, bio);
+ segbuf->sb_nbio++;
if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
bio_put(bio);
err = -EOPNOTSUPP;
goto failed;
}
- segbuf->sb_nbio++;
bio_put(bio);
wi->bio = NULL;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 79736a28d84f..2abf97b2a592 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1757,7 +1757,7 @@ try_again:
goto out;
} else if (ret == 1) {
clusters_need = wc->w_clen;
- ret = ocfs2_refcount_cow(inode, filp, di_bh,
+ ret = ocfs2_refcount_cow(inode, di_bh,
wc->w_cpos, wc->w_clen, UINT_MAX);
if (ret) {
mlog_errno(ret);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index eb760d8acd50..30544ce8e9f7 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2153,11 +2153,9 @@ int ocfs2_empty_dir(struct inode *inode)
{
int ret;
struct ocfs2_empty_dir_priv priv = {
- .ctx.actor = ocfs2_empty_dir_filldir
+ .ctx.actor = ocfs2_empty_dir_filldir,
};
- memset(&priv, 0, sizeof(priv));
-
if (ocfs2_dir_indexed(inode)) {
ret = ocfs2_empty_dir_dx(inode, &priv);
if (ret)
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 41000f223ca4..3261d71319ee 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode,
if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
goto out;
- return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
+ return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
out:
return status;
@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode,
zero_clusters = last_cpos - zero_cpos;
if (needs_cow) {
- rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
+ rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
zero_clusters, UINT_MAX);
if (rc) {
mlog_errno(rc);
@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
*meta_level = 1;
- ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
+ ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
if (ret)
mlog_errno(ret);
out:
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 96f9ac237e86..0a992737dcaf 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -537,7 +537,7 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb,
extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth);
return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks +
- ocfs2_quota_trans_credits(sb) + bits_wanted;
+ ocfs2_quota_trans_credits(sb);
}
static inline int ocfs2_calc_symlink_credits(struct super_block *sb)
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index f1fc172175b6..452068b45749 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle,
u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
- ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
+ ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
p_cpos, new_p_cpos, len);
if (ret) {
mlog_errno(ret);
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 998b17eda09d..a70d604593b6 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -49,7 +49,6 @@
struct ocfs2_cow_context {
struct inode *inode;
- struct file *file;
u32 cow_start;
u32 cow_len;
struct ocfs2_extent_tree data_et;
@@ -66,7 +65,7 @@ struct ocfs2_cow_context {
u32 *num_clusters,
unsigned int *extent_flags);
int (*cow_duplicate_clusters)(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len);
};
@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
}
int ocfs2_duplicate_clusters_by_page(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len)
{
int ret = 0, partial;
- struct inode *inode = file_inode(file);
- struct ocfs2_caching_info *ci = INODE_CACHE(inode);
- struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ struct super_block *sb = inode->i_sb;
u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
struct page *page;
pgoff_t page_index;
@@ -2965,6 +2962,11 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
to = map_end & (PAGE_CACHE_SIZE - 1);
page = find_or_create_page(mapping, page_index, GFP_NOFS);
+ if (!page) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ break;
+ }
/*
* In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
@@ -2973,13 +2975,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
BUG_ON(PageDirty(page));
- if (PageReadahead(page)) {
- page_cache_async_readahead(mapping,
- &file->f_ra, file,
- page, page_index,
- readahead_pages);
- }
-
if (!PageUptodate(page)) {
ret = block_read_full_page(page, ocfs2_get_block);
if (ret) {
@@ -2999,7 +2994,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
}
}
- ocfs2_map_and_dirty_page(inode, handle, from, to,
+ ocfs2_map_and_dirty_page(inode,
+ handle, from, to,
page, 0, &new_block);
mark_page_accessed(page);
unlock:
@@ -3015,12 +3011,11 @@ unlock:
}
int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len)
{
int ret = 0;
- struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct ocfs2_caching_info *ci = INODE_CACHE(inode);
int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
@@ -3145,7 +3140,7 @@ static int ocfs2_replace_clusters(handle_t *handle,
/*If the old clusters is unwritten, no need to duplicate. */
if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
- ret = context->cow_duplicate_clusters(handle, context->file,
+ ret = context->cow_duplicate_clusters(handle, context->inode,
cpos, old, new, len);
if (ret) {
mlog_errno(ret);
@@ -3423,35 +3418,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
return ret;
}
-static void ocfs2_readahead_for_cow(struct inode *inode,
- struct file *file,
- u32 start, u32 len)
-{
- struct address_space *mapping;
- pgoff_t index;
- unsigned long num_pages;
- int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
-
- if (!file)
- return;
-
- mapping = file->f_mapping;
- num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
- if (!num_pages)
- num_pages = 1;
-
- index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
- page_cache_sync_readahead(mapping, &file->f_ra, file,
- index, num_pages);
-}
-
/*
* Starting at cpos, try to CoW write_len clusters. Don't CoW
* past max_cpos. This will stop when it runs into a hole or an
* unrefcounted extent.
*/
static int ocfs2_refcount_cow_hunk(struct inode *inode,
- struct file *file,
struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos)
{
@@ -3480,8 +3452,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
BUG_ON(cow_len == 0);
- ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
-
context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
if (!context) {
ret = -ENOMEM;
@@ -3503,7 +3473,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
context->ref_root_bh = ref_root_bh;
context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
context->get_clusters = ocfs2_di_get_clusters;
- context->file = file;
ocfs2_init_dinode_extent_tree(&context->data_et,
INODE_CACHE(inode), di_bh);
@@ -3532,7 +3501,6 @@ out:
* clusters between cpos and cpos+write_len are safe to modify.
*/
int ocfs2_refcount_cow(struct inode *inode,
- struct file *file,
struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos)
{
@@ -3552,7 +3520,7 @@ int ocfs2_refcount_cow(struct inode *inode,
num_clusters = write_len;
if (ext_flags & OCFS2_EXT_REFCOUNTED) {
- ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos,
+ ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
num_clusters, max_cpos);
if (ret) {
mlog_errno(ret);
diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
index 7754608c83a4..6422bbcdb525 100644
--- a/fs/ocfs2/refcounttree.h
+++ b/fs/ocfs2/refcounttree.h
@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
int *credits,
int *ref_blocks);
int ocfs2_refcount_cow(struct inode *inode,
- struct file *filep, struct buffer_head *di_bh,
+ struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos);
typedef int (ocfs2_post_refcount_func)(struct inode *inode,
@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
u32 cpos, u32 write_len,
struct ocfs2_post_refcount *post);
int ocfs2_duplicate_clusters_by_page(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len);
int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len);
int ocfs2_cow_sync_writeback(struct super_block *sb,
diff --git a/fs/open.c b/fs/open.c
index d53e29895082..7931f76acc2b 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -823,7 +823,7 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
int lookup_flags = 0;
int acc_mode;
- if (flags & O_CREAT)
+ if (flags & (O_CREAT | __O_TMPFILE))
op->mode = (mode & S_IALLUGO) | S_IFREG;
else
op->mode = 0;
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 75f2890abbd8..0ff80f9b930f 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -230,8 +230,6 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
if (!dir_emit_dots(file, ctx))
goto out;
- if (!dir_emit_dots(file, ctx))
- goto out;
files = get_files_struct(p);
if (!files)
goto out;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 94441a407337..737e15615b04 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -271,7 +271,7 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *file,
de = next;
} while (de);
spin_unlock(&proc_subdir_lock);
- return 0;
+ return 1;
}
int proc_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 229e366598da..e0a790da726d 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -205,7 +205,9 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr
static int proc_root_readdir(struct file *file, struct dir_context *ctx)
{
if (ctx->pos < FIRST_PROCESS_ENTRY) {
- proc_readdir(file, ctx);
+ int error = proc_readdir(file, ctx);
+ if (unlikely(error <= 0))
+ return error;
ctx->pos = FIRST_PROCESS_ENTRY;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index dbf61f6174f0..107d026f5d6e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -730,8 +730,16 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
* of how soft-dirty works.
*/
pte_t ptent = *pte;
- ptent = pte_wrprotect(ptent);
- ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
+
+ if (pte_present(ptent)) {
+ ptent = pte_wrprotect(ptent);
+ ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
+ } else if (is_swap_pte(ptent)) {
+ ptent = pte_swp_clear_soft_dirty(ptent);
+ } else if (pte_file(ptent)) {
+ ptent = pte_file_clear_soft_dirty(ptent);
+ }
+
set_pte_at(vma->vm_mm, addr, pte, ptent);
#endif
}
@@ -752,14 +760,15 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
- if (!pte_present(ptent))
- continue;
if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
clear_soft_dirty(vma, addr, pte);
continue;
}
+ if (!pte_present(ptent))
+ continue;
+
page = vm_normal_page(vma, addr, ptent);
if (!page)
continue;
@@ -859,7 +868,7 @@ typedef struct {
} pagemap_entry_t;
struct pagemapread {
- int pos, len;
+ int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
pagemap_entry_t *buffer;
bool v2;
};
@@ -867,7 +876,7 @@ struct pagemapread {
#define PAGEMAP_WALK_SIZE (PMD_SIZE)
#define PAGEMAP_WALK_MASK (PMD_MASK)
-#define PM_ENTRY_BYTES sizeof(u64)
+#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
#define PM_STATUS_BITS 3
#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
@@ -930,8 +939,10 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
flags = PM_PRESENT;
page = vm_normal_page(vma, addr, pte);
} else if (is_swap_pte(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
-
+ swp_entry_t entry;
+ if (pte_swp_soft_dirty(pte))
+ flags2 |= __PM_SOFT_DIRTY;
+ entry = pte_to_swp_entry(pte);
frame = swp_type(entry) |
(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
flags = PM_SWAP;
@@ -1116,8 +1127,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
goto out_task;
pm.v2 = soft_dirty_cleared;
- pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
- pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
+ pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+ pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
ret = -ENOMEM;
if (!pm.buffer)
goto out_task;
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 33532f79b4f7..a958444a75fc 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -19,12 +19,13 @@
/*
* LOCKING:
*
- * We rely on new Alexander Viro's super-block locking.
+ * These guys are evicted from procfs as the very first step in ->kill_sb().
*
*/
-static int show_version(struct seq_file *m, struct super_block *sb)
+static int show_version(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
char *format;
if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) {
@@ -66,8 +67,9 @@ static int show_version(struct seq_file *m, struct super_block *sb)
#define DJP( x ) le32_to_cpu( jp -> x )
#define JF( x ) ( r -> s_journal -> x )
-static int show_super(struct seq_file *m, struct super_block *sb)
+static int show_super(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *r = REISERFS_SB(sb);
seq_printf(m, "state: \t%s\n"
@@ -128,8 +130,9 @@ static int show_super(struct seq_file *m, struct super_block *sb)
return 0;
}
-static int show_per_level(struct seq_file *m, struct super_block *sb)
+static int show_per_level(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *r = REISERFS_SB(sb);
int level;
@@ -186,8 +189,9 @@ static int show_per_level(struct seq_file *m, struct super_block *sb)
return 0;
}
-static int show_bitmap(struct seq_file *m, struct super_block *sb)
+static int show_bitmap(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *r = REISERFS_SB(sb);
seq_printf(m, "free_block: %lu\n"
@@ -218,8 +222,9 @@ static int show_bitmap(struct seq_file *m, struct super_block *sb)
return 0;
}
-static int show_on_disk_super(struct seq_file *m, struct super_block *sb)
+static int show_on_disk_super(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
struct reiserfs_super_block *rs = sb_info->s_rs;
int hash_code = DFL(s_hash_function_code);
@@ -261,8 +266,9 @@ static int show_on_disk_super(struct seq_file *m, struct super_block *sb)
return 0;
}
-static int show_oidmap(struct seq_file *m, struct super_block *sb)
+static int show_oidmap(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
struct reiserfs_super_block *rs = sb_info->s_rs;
unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize);
@@ -291,8 +297,9 @@ static int show_oidmap(struct seq_file *m, struct super_block *sb)
return 0;
}
-static int show_journal(struct seq_file *m, struct super_block *sb)
+static int show_journal(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *r = REISERFS_SB(sb);
struct reiserfs_super_block *rs = r->s_rs;
struct journal_params *jp = &rs->s_v1.s_journal;
@@ -383,92 +390,24 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
return 0;
}
-/* iterator */
-static int test_sb(struct super_block *sb, void *data)
-{
- return data == sb;
-}
-
-static int set_sb(struct super_block *sb, void *data)
-{
- return -ENOENT;
-}
-
-struct reiserfs_seq_private {
- struct super_block *sb;
- int (*show) (struct seq_file *, struct super_block *);
-};
-
-static void *r_start(struct seq_file *m, loff_t * pos)
-{
- struct reiserfs_seq_private *priv = m->private;
- loff_t l = *pos;
-
- if (l)
- return NULL;
-
- if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, 0, priv->sb)))
- return NULL;
-
- up_write(&priv->sb->s_umount);
- return priv->sb;
-}
-
-static void *r_next(struct seq_file *m, void *v, loff_t * pos)
-{
- ++*pos;
- if (v)
- deactivate_super(v);
- return NULL;
-}
-
-static void r_stop(struct seq_file *m, void *v)
-{
- if (v)
- deactivate_super(v);
-}
-
-static int r_show(struct seq_file *m, void *v)
-{
- struct reiserfs_seq_private *priv = m->private;
- return priv->show(m, v);
-}
-
-static const struct seq_operations r_ops = {
- .start = r_start,
- .next = r_next,
- .stop = r_stop,
- .show = r_show,
-};
-
static int r_open(struct inode *inode, struct file *file)
{
- struct reiserfs_seq_private *priv;
- int ret = seq_open_private(file, &r_ops,
- sizeof(struct reiserfs_seq_private));
-
- if (!ret) {
- struct seq_file *m = file->private_data;
- priv = m->private;
- priv->sb = proc_get_parent_data(inode);
- priv->show = PDE_DATA(inode);
- }
- return ret;
+ return single_open(file, PDE_DATA(inode),
+ proc_get_parent_data(inode));
}
static const struct file_operations r_file_operations = {
.open = r_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_private,
- .owner = THIS_MODULE,
+ .release = single_release,
};
static struct proc_dir_entry *proc_info_root = NULL;
static const char proc_info_root_name[] = "fs/reiserfs";
static void add_file(struct super_block *sb, char *name,
- int (*func) (struct seq_file *, struct super_block *))
+ int (*func) (struct seq_file *, void *))
{
proc_create_data(name, 0, REISERFS_SB(sb)->procdir,
&r_file_operations, func);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f8a23c3078f8..e2e202a07b31 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -499,6 +499,7 @@ int remove_save_link(struct inode *inode, int truncate)
static void reiserfs_kill_sb(struct super_block *s)
{
if (REISERFS_SB(s)) {
+ reiserfs_proc_info_done(s);
/*
* Force any pending inode evictions to occur now. Any
* inodes to be removed that have extended attributes
@@ -554,8 +555,6 @@ static void reiserfs_put_super(struct super_block *s)
REISERFS_SB(s)->reserved_blocks);
}
- reiserfs_proc_info_done(s);
-
reiserfs_write_unlock(s);
mutex_destroy(&REISERFS_SB(s)->lock);
kfree(s->s_fs_info);
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 56e6b68c8d2f..94383a70c1a3 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -274,15 +274,12 @@ struct acpi_device_wakeup {
};
struct acpi_device_physical_node {
- u8 node_id;
+ unsigned int node_id;
struct list_head node;
struct device *dev;
bool put_online:1;
};
-/* set maximum of physical nodes to 32 for expansibility */
-#define ACPI_MAX_PHYSICAL_NODE 32
-
/* Device */
struct acpi_device {
int device_type;
@@ -302,10 +299,9 @@ struct acpi_device {
struct acpi_driver *driver;
void *driver_data;
struct device dev;
- u8 physical_node_count;
+ unsigned int physical_node_count;
struct list_head physical_node_list;
struct mutex physical_node_lock;
- DECLARE_BITMAP(physical_node_id_bitmap, ACPI_MAX_PHYSICAL_NODE);
struct list_head power_dependent;
void (*remove)(struct acpi_device *);
};
@@ -445,7 +441,11 @@ struct acpi_pci_root {
};
/* helper */
-acpi_handle acpi_get_child(acpi_handle, u64);
+acpi_handle acpi_find_child(acpi_handle, u64, bool);
+static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
+{
+ return acpi_find_child(handle, addr, false);
+}
int acpi_is_root_bridge(acpi_handle);
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev))
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 2f47ade1b567..0807ddf97b05 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -417,6 +417,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
return pmd;
}
+
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+ return 0;
+}
#endif
#ifndef __HAVE_PFNMAP_TRACKING
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 13821c339a41..5672d7ea1fa0 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -112,7 +112,7 @@ struct mmu_gather {
#define HAVE_GENERIC_MMU_GATHER
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
void tlb_flush_mmu(struct mmu_gather *tlb);
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
unsigned long end);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 7b75621fda4c..a1441c5ac63d 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -260,6 +260,7 @@ struct hdmi_vendor_infoframe;
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
+int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index f5e1168c7647..d639049a613d 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -84,12 +84,12 @@ static inline int drm_fixp2int(int64_t a)
return ((s64)a) >> DRM_FIXED_POINT;
}
-static inline s64 drm_fixp_msbset(int64_t a)
+static inline unsigned drm_fixp_msbset(int64_t a)
{
unsigned shift, sign = (a >> 63) & 1;
for (shift = 62; shift > 0; --shift)
- if ((a >> shift) != sign)
+ if (((a >> shift) & 1) != sign)
return shift;
return 0;
@@ -100,9 +100,9 @@ static inline s64 drm_fixp_mul(s64 a, s64 b)
unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b);
s64 result;
- if (shift > 63) {
- shift = shift - 63;
- a >>= shift >> 1;
+ if (shift > 61) {
+ shift = shift - 61;
+ a >>= (shift >> 1) + (shift & 1);
b >>= shift >> 1;
} else
shift = 0;
@@ -120,7 +120,7 @@ static inline s64 drm_fixp_mul(s64 a, s64 b)
static inline s64 drm_fixp_div(s64 a, s64 b)
{
- unsigned shift = 63 - drm_fixp_msbset(a);
+ unsigned shift = 62 - drm_fixp_msbset(a);
s64 result;
a <<= shift;
@@ -154,7 +154,7 @@ static inline s64 drm_fixp_exp(s64 x)
}
if (x < 0)
- sum = drm_fixp_div(1, sum);
+ sum = drm_fixp_div(DRM_FIXED_ONE, sum);
return sum;
}
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 0a85e5c5d61b..fd54a14a7c2a 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -1,4 +1,22 @@
#define radeon_PCI_IDS \
+ {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b90337c9d468..4a12532da8c4 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -336,6 +336,7 @@ extern int d_validate(struct dentry *, struct dentry *);
* helper function for dentry_operations.d_dname() members
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern char *simple_dname(struct dentry *, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 3b0e820375ab..5d7782e42b8f 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -436,6 +436,7 @@ struct fw_iso_context {
int type;
int channel;
int speed;
+ bool drop_overflow_headers;
size_t header_size;
union {
fw_iso_callback_t sc;
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 4372658c73ae..120d57a1c3a5 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -78,6 +78,11 @@ struct trace_iterator {
/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
+ cpumask_var_t started;
+
+ /* it's true when current open file is snapshot */
+ bool snapshot;
+
/* The below is zeroed out in pipe_read */
struct trace_seq seq;
struct trace_entry *ent;
@@ -90,10 +95,7 @@ struct trace_iterator {
loff_t pos;
long idx;
- cpumask_var_t started;
-
- /* it's true when current open file is snapshot */
- bool snapshot;
+ /* All new field here will be zeroed out in pipe_read */
};
enum trace_iter_flags {
@@ -332,7 +334,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type);
extern int trace_add_event_call(struct ftrace_event_call *call);
-extern void trace_remove_event_call(struct ftrace_event_call *call);
+extern int trace_remove_event_call(struct ftrace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)1)
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 3869c525b052..369cf2cd5144 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -8,6 +8,7 @@
*/
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/atomic.h>
#ifndef _IIO_TRIGGER_H_
#define _IIO_TRIGGER_H_
@@ -61,7 +62,7 @@ struct iio_trigger {
struct list_head list;
struct list_head alloc_list;
- int use_count;
+ atomic_t use_count;
struct irq_chip subirq_chip;
int subirq_base;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index b99cd23f3474..79640e015a86 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -5,45 +5,13 @@
#include <linux/bitmap.h>
#include <linux/if.h>
+#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/timer.h>
#include <linux/sysctl.h>
#include <linux/rtnetlink.h>
-enum
-{
- IPV4_DEVCONF_FORWARDING=1,
- IPV4_DEVCONF_MC_FORWARDING,
- IPV4_DEVCONF_PROXY_ARP,
- IPV4_DEVCONF_ACCEPT_REDIRECTS,
- IPV4_DEVCONF_SECURE_REDIRECTS,
- IPV4_DEVCONF_SEND_REDIRECTS,
- IPV4_DEVCONF_SHARED_MEDIA,
- IPV4_DEVCONF_RP_FILTER,
- IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
- IPV4_DEVCONF_BOOTP_RELAY,
- IPV4_DEVCONF_LOG_MARTIANS,
- IPV4_DEVCONF_TAG,
- IPV4_DEVCONF_ARPFILTER,
- IPV4_DEVCONF_MEDIUM_ID,
- IPV4_DEVCONF_NOXFRM,
- IPV4_DEVCONF_NOPOLICY,
- IPV4_DEVCONF_FORCE_IGMP_VERSION,
- IPV4_DEVCONF_ARP_ANNOUNCE,
- IPV4_DEVCONF_ARP_IGNORE,
- IPV4_DEVCONF_PROMOTE_SECONDARIES,
- IPV4_DEVCONF_ARP_ACCEPT,
- IPV4_DEVCONF_ARP_NOTIFY,
- IPV4_DEVCONF_ACCEPT_LOCAL,
- IPV4_DEVCONF_SRC_VMARK,
- IPV4_DEVCONF_PROXY_ARP_PVLAN,
- IPV4_DEVCONF_ROUTE_LOCALNET,
- __IPV4_DEVCONF_MAX
-};
-
-#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
-
struct ipv4_devconf {
void *sysctl;
int data[IPV4_DEVCONF_MAX];
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 850e95bc766c..b8b7dc755752 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -101,6 +101,7 @@ struct inet6_skb_parm {
#define IP6SKB_FORWARDED 2
#define IP6SKB_REROUTED 4
#define IP6SKB_ROUTERALERT 8
+#define IP6SKB_FRAGMENTED 16
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3bef14c6586b..482ad2d84a32 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -629,7 +629,7 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { }
-static inline void trace_dump_stack(void) { }
+static inline void trace_dump_stack(int skip) { }
static inline void tracing_on(void) { }
static inline void tracing_off(void) { }
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 8d73fe29796a..db1791bb997a 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -113,11 +113,27 @@
#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3)
#define CNTRLREG_TSCENB BIT(7)
+/* FIFO READ Register */
+#define FIFOREAD_DATA_MASK (0xfff << 0)
+#define FIFOREAD_CHNLID_MASK (0xf << 16)
+
+/* Sequencer Status */
+#define SEQ_STATUS BIT(5)
+
#define ADC_CLK 3000000
#define MAX_CLK_DIV 7
#define TOTAL_STEPS 16
#define TOTAL_CHANNELS 8
+/*
+* ADC runs at 3MHz, and it takes
+* 15 cycles to latch one data output.
+* Hence the idle time for ADC to
+* process one sample data would be
+* around 5 micro seconds.
+*/
+#define IDLE_TIMEOUT 5 /* microsec */
+
#define TSCADC_CELLS 2
struct ti_tscadc_dev {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8de8d8f22384..68029b30c3dc 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -309,21 +309,20 @@ struct mlx5_hca_cap {
__be16 max_desc_sz_rq;
u8 rsvd21[2];
__be16 max_desc_sz_sq_dc;
- u8 rsvd22[4];
- __be16 max_qp_mcg;
- u8 rsvd23;
+ __be32 max_qp_mcg;
+ u8 rsvd22[3];
u8 log_max_mcg;
- u8 rsvd24;
+ u8 rsvd23;
u8 log_max_pd;
- u8 rsvd25;
+ u8 rsvd24;
u8 log_max_xrcd;
- u8 rsvd26[42];
+ u8 rsvd25[42];
__be16 log_uar_page_sz;
- u8 rsvd27[28];
+ u8 rsvd26[28];
u8 log_msx_atomic_size_qp;
- u8 rsvd28[2];
+ u8 rsvd27[2];
u8 log_msx_atomic_size_dc;
- u8 rsvd29[76];
+ u8 rsvd28[76];
};
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
struct mlx5_eqe_page_req {
u8 rsvd0[2];
__be16 func_id;
- u8 rsvd1[2];
- __be16 num_pages;
- __be32 rsvd2[5];
+ __be32 num_pages;
+ __be32 rsvd1[5];
};
union ev_data {
@@ -690,6 +688,26 @@ struct mlx5_query_cq_mbox_out {
__be64 pas[0];
};
+struct mlx5_enable_hca_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_enable_hca_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_disable_hca_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_disable_hca_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
struct mlx5_eq_context {
u8 status;
u8 ec_oi;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index f22e4419839b..8888381fc150 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -101,6 +101,8 @@ enum {
MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
MLX5_CMD_OP_INIT_HCA = 0x102,
MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
+ MLX5_CMD_OP_ENABLE_HCA = 0x104,
+ MLX5_CMD_OP_DISABLE_HCA = 0x105,
MLX5_CMD_OP_QUERY_PAGES = 0x107,
MLX5_CMD_OP_MANAGE_PAGES = 0x108,
MLX5_CMD_OP_SET_HCA_CAP = 0x109,
@@ -356,7 +358,7 @@ struct mlx5_caps {
u32 reserved_lkey;
u8 local_ca_ack_delay;
u8 log_max_mcg;
- u16 max_qp_mcg;
+ u32 max_qp_mcg;
int min_page_sz;
};
@@ -689,8 +691,8 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s16 npages);
-int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev);
+ s32 npages);
+int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);
@@ -729,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
-typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
-int mlx5_register_health_report_handler(health_handler_t handler);
-void mlx5_unregister_health_report_handler(void);
const char *mlx5_command_str(int command);
int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fb425aa16c01..faf4b7c1ad12 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -332,6 +332,7 @@ struct mm_struct {
unsigned long pgoff, unsigned long flags);
#endif
unsigned long mmap_base; /* base of mmap area */
+ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
unsigned long task_size; /* size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index b62d4af6c667..45e921401b06 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -361,7 +361,8 @@ struct ssb_device_id {
__u16 vendor;
__u16 coreid;
__u8 revision;
-};
+ __u8 __pad;
+} __attribute__((packed, aligned(2)));
#define SSB_DEVICE(_vendor, _coreid, _revision) \
{ .vendor = _vendor, .coreid = _coreid, .revision = _revision, }
#define SSB_DEVTABLE_END \
@@ -377,7 +378,7 @@ struct bcma_device_id {
__u16 id;
__u8 rev;
__u8 class;
-};
+} __attribute__((packed,aligned(2)));
#define BCMA_CORE(_manuf, _id, _rev, _class) \
{ .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, }
#define BCMA_CORETABLE_END \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0741a1e919a5..9a4156845e93 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -973,7 +973,7 @@ struct net_device_ops {
gfp_t gfp);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
int (*ndo_busy_poll)(struct napi_struct *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 75981d0b57dc..580a5320cc96 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
+#include <linux/err.h>
struct module;
struct device;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 50d04b92ceda..078066daffd4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1532,6 +1532,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
* Test if a process is not yet dead (at most zombie state)
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
+ *
+ * Return: 1 if the process is alive. 0 otherwise.
*/
static inline int pid_alive(struct task_struct *p)
{
@@ -1543,6 +1545,8 @@ static inline int pid_alive(struct task_struct *p)
* @tsk: Task structure to be checked.
*
* Check if a task structure is the first user space task the kernel created.
+ *
+ * Return: 1 if the task structure is init. 0 otherwise.
*/
static inline int is_global_init(struct task_struct *tsk)
{
@@ -1628,6 +1632,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
+#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
/*
* Only the _current_ task can read/write to tsk->flags, but other
@@ -1893,6 +1898,8 @@ extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
* @p: the task in question.
+ *
+ * Return: 1 if @p is an idle task. 0 otherwise.
*/
static inline bool is_idle_task(const struct task_struct *p)
{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5afefa01a13c..3b71a4e83642 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -501,7 +501,7 @@ struct sk_buff {
/* 7/9 bit hole (depending on ndisc_nodetype presence) */
kmemcheck_bitfield_end(flags2);
-#if defined CONFIG_NET_DMA || defined CONFIG_NET_LL_RX_POLL
+#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
union {
unsigned int napi_id;
dma_cookie_t dma_cookie;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7d537ced949a..75f34949d9ab 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -117,9 +117,17 @@ do { \
#endif /*arch_spin_is_contended*/
#endif
-/* The lock does not imply full memory barrier. */
-#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
-static inline void smp_mb__after_lock(void) { smp_mb(); }
+/*
+ * Despite its name it doesn't necessarily has to be a full barrier.
+ * It should only guarantee that a STORE before the critical section
+ * can not be reordered with a LOAD inside this section.
+ * spin_lock() is the one-way barrier, this LOAD can not escape out
+ * of the region. So the default implementation simply ensures that
+ * a STORE can not move into the critical section, smp_wmb() should
+ * serialize it with another STORE done by spin_lock().
+ */
+#ifndef smp_mb__before_spinlock
+#define smp_mb__before_spinlock() smp_wmb()
#endif
/**
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 6d870353674a..1821445708d6 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -121,6 +121,7 @@ struct rpc_task_setup {
#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_TASK_SENT 0x0800 /* message was sent */
#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
+#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index c5fd30d2a415..8d4fa82bfb91 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
swp_entry_t arch_entry;
BUG_ON(pte_file(pte));
+ if (pte_swp_soft_dirty(pte))
+ pte = pte_swp_clear_soft_dirty(pte);
arch_entry = __pte_to_swp_entry(pte);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 4147d700a293..84662ecc7b51 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void);
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
int __user *);
#else
+#ifdef CONFIG_CLONE_BACKWARDS3
+asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
+ int __user *, int);
+#else
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
int __user *, int);
#endif
+#endif
asmlinkage long sys_execve(const char __user *filename,
const char __user *const __user *argv,
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 9180f4b85e6d..62bd8b72873c 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -174,10 +174,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk) { }
#endif
-# ifdef CONFIG_CPU_IDLE_GOV_MENU
-extern void menu_hrtimer_cancel(void);
-# else
-static inline void menu_hrtimer_cancel(void) {}
-# endif /* CONFIG_CPU_IDLE_GOV_MENU */
-
#endif
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index b6b215f13b45..14105c26a836 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -23,6 +23,7 @@ struct user_namespace {
struct uid_gid_map projid_map;
atomic_t count;
struct user_namespace *parent;
+ int level;
kuid_t owner;
kgid_t group;
unsigned int proc_inum;
diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h
index 76be077340ea..7dc17e2456de 100644
--- a/include/linux/vmpressure.h
+++ b/include/linux/vmpressure.h
@@ -12,7 +12,7 @@ struct vmpressure {
unsigned long scanned;
unsigned long reclaimed;
/* The lock is used to keep the scanned/reclaimed above in sync. */
- struct mutex sr_lock;
+ struct spinlock sr_lock;
/* The list of vmpressure_event structs. */
struct list_head events;
@@ -30,6 +30,7 @@ extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
extern void vmpressure_init(struct vmpressure *vmpr);
+extern void vmpressure_cleanup(struct vmpressure *vmpr);
extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr);
extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index f487a4750b7f..a67fc1635592 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -811,6 +811,63 @@ do { \
__ret; \
})
+#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
+ lock, ret) \
+do { \
+ DEFINE_WAIT(__wait); \
+ \
+ for (;;) { \
+ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (signal_pending(current)) { \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ spin_unlock_irq(&lock); \
+ ret = schedule_timeout(ret); \
+ spin_lock_irq(&lock); \
+ if (!ret) \
+ break; \
+ } \
+ finish_wait(&wq, &__wait); \
+} while (0)
+
+/**
+ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
+ * The condition is checked under the lock. This is expected
+ * to be called with the lock taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before schedule()
+ * and reacquired afterwards.
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or signal is received. The @condition is
+ * checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before going to sleep and is reacquired afterwards.
+ *
+ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
+ * was interrupted by a signal, and the remaining jiffies otherwise
+ * if the condition evaluated to true before the timeout elapsed.
+ */
+#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
+ timeout) \
+({ \
+ int __ret = timeout; \
+ \
+ if (!(condition)) \
+ __wait_event_interruptible_lock_irq_timeout( \
+ wq, condition, lock, __ret); \
+ __ret; \
+})
+
/*
* These are the old interfaces to sleep waiting for an event.
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 7343a27fe819..47ada23345a1 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -22,6 +22,7 @@
#define _V4L2_CTRLS_H
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/videodev2.h>
/* forward references */
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index a14339c2985f..8a358a2c97e6 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -27,7 +27,7 @@
#include <linux/netdevice.h>
#include <net/ip.h>
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
struct napi_struct;
extern unsigned int sysctl_net_busy_read __read_mostly;
@@ -122,7 +122,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
if (rc > 0)
/* local bh are disabled so it is ok to use _BH */
NET_ADD_STATS_BH(sock_net(sk),
- LINUX_MIB_LOWLATENCYRXPACKETS, rc);
+ LINUX_MIB_BUSYPOLLRXPACKETS, rc);
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
!need_resched() && !busy_loop_timeout(end_time));
@@ -146,7 +146,7 @@ static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
sk->sk_napi_id = skb->napi_id;
}
-#else /* CONFIG_NET_LL_RX_POLL */
+#else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void)
{
return 0;
@@ -162,11 +162,6 @@ static inline bool sk_can_busy_loop(struct sock *sk)
return false;
}
-static inline bool sk_busy_poll(struct sock *sk, int nonblock)
-{
- return false;
-}
-
static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
{
@@ -181,5 +176,10 @@ static inline bool busy_loop_timeout(unsigned long end_time)
return true;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+static inline bool sk_busy_loop(struct sock *sk, int nonblock)
+{
+ return false;
+}
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#endif /* _LINUX_NET_BUSY_POLL_H */
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 2a601e7da1bf..48ec25a7fcb6 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -300,7 +300,7 @@ extern void inet6_rt_notify(int event, struct rt6_info *rt,
struct nl_info *info);
extern void fib6_run_gc(unsigned long expires,
- struct net *net);
+ struct net *net, bool force);
extern void fib6_gc_cleanup(void);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 260f83f16bcf..f667248202b6 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -135,6 +135,8 @@ extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
__be32 mtu);
extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+ u32 mark);
extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
struct netlink_callback;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 781b3cf86a2f..a354db5b7662 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -145,20 +145,6 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
return INET_ECN_encapsulate(tos, inner);
}
-static inline void tunnel_ip_select_ident(struct sk_buff *skb,
- const struct iphdr *old_iph,
- struct dst_entry *dst)
-{
- struct iphdr *iph = ip_hdr(skb);
-
- /* Use inner packet iph-id if possible. */
- if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
- iph->id = old_iph->id;
- else
- __ip_select_ident(iph, dst,
- (skb_shinfo(skb)->gso_segs ?: 1) - 1);
-}
-
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
int iptunnel_xmit(struct net *net, struct rtable *rt,
struct sk_buff *skb,
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 949d77528f2f..6fea32340ae8 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -119,7 +119,7 @@ extern struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
* if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may
* also need a pad of 2.
*/
-static int ndisc_addr_option_pad(unsigned short type)
+static inline int ndisc_addr_option_pad(unsigned short type)
{
switch (type) {
case ARPHRD_INFINIBAND: return 2;
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 0af851c3b038..b64b7bce4b94 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -59,7 +59,7 @@ struct nfc_hci_ops {
struct nfc_target *target);
int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event,
struct sk_buff *skb);
- int (*fw_upload)(struct nfc_hci_dev *hdev, const char *firmware_name);
+ int (*fw_download)(struct nfc_hci_dev *hdev, const char *firmware_name);
int (*discover_se)(struct nfc_hci_dev *dev);
int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx);
int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx);
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 0e353f1658bb..5f286b726bb6 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -68,7 +68,7 @@ struct nfc_ops {
void *cb_context);
int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb);
int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target);
- int (*fw_upload)(struct nfc_dev *dev, const char *firmware_name);
+ int (*fw_download)(struct nfc_dev *dev, const char *firmware_name);
/* Secure Element API */
int (*discover_se)(struct nfc_dev *dev);
@@ -127,7 +127,7 @@ struct nfc_dev {
int targets_generation;
struct device dev;
bool dev_up;
- bool fw_upload_in_progress;
+ bool fw_download_in_progress;
u8 rf_mode;
bool polling;
struct nfc_target *active_target;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6eab63363e59..e5ae0c50fa9c 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -683,13 +683,19 @@ struct psched_ratecfg {
u64 rate_bytes_ps; /* bytes per second */
u32 mult;
u16 overhead;
+ u8 linklayer;
u8 shift;
};
static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
unsigned int len)
{
- return ((u64)(len + r->overhead) * r->mult) >> r->shift;
+ len += r->overhead;
+
+ if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
+ return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
+
+ return ((u64)len * r->mult) >> r->shift;
}
extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
@@ -700,6 +706,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
memset(res, 0, sizeof(*res));
res->rate = r->rate_bytes_ps;
res->overhead = r->overhead;
+ res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
}
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 95a5a2c6925a..31d5cfbb51ec 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -327,7 +327,7 @@ struct sock {
#ifdef CONFIG_RPS
__u32 sk_rxhash;
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_napi_id;
unsigned int sk_ll_usec;
#endif
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 321d4ac5c512..fa8b3adf9ffb 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -979,6 +979,8 @@ struct drm_radeon_cs {
#define RADEON_INFO_RING_WORKING 0x15
/* SI tile mode array */
#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
+/* query if CP DMA is supported on the compute ring */
+#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
struct drm_radeon_info {
diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
index d50036953497..1db453e4b550 100644
--- a/include/uapi/linux/firewire-cdev.h
+++ b/include/uapi/linux/firewire-cdev.h
@@ -215,8 +215,8 @@ struct fw_cdev_event_request2 {
* with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with
* %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets
* without the interrupt bit set that the kernel's internal buffer for @header
- * is about to overflow. (In the last case, kernels with ABI version < 5 drop
- * header data up to the next interrupt packet.)
+ * is about to overflow. (In the last case, ABI versions < 5 drop header data
+ * up to the next interrupt packet.)
*
* Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT):
*
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 6cf06bfd841b..2fee45bdec0a 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -133,4 +133,38 @@ struct ip_beet_phdr {
__u8 reserved;
};
+/* index values for the variables in ipv4_devconf */
+enum
+{
+ IPV4_DEVCONF_FORWARDING=1,
+ IPV4_DEVCONF_MC_FORWARDING,
+ IPV4_DEVCONF_PROXY_ARP,
+ IPV4_DEVCONF_ACCEPT_REDIRECTS,
+ IPV4_DEVCONF_SECURE_REDIRECTS,
+ IPV4_DEVCONF_SEND_REDIRECTS,
+ IPV4_DEVCONF_SHARED_MEDIA,
+ IPV4_DEVCONF_RP_FILTER,
+ IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
+ IPV4_DEVCONF_BOOTP_RELAY,
+ IPV4_DEVCONF_LOG_MARTIANS,
+ IPV4_DEVCONF_TAG,
+ IPV4_DEVCONF_ARPFILTER,
+ IPV4_DEVCONF_MEDIUM_ID,
+ IPV4_DEVCONF_NOXFRM,
+ IPV4_DEVCONF_NOPOLICY,
+ IPV4_DEVCONF_FORCE_IGMP_VERSION,
+ IPV4_DEVCONF_ARP_ANNOUNCE,
+ IPV4_DEVCONF_ARP_IGNORE,
+ IPV4_DEVCONF_PROMOTE_SECONDARIES,
+ IPV4_DEVCONF_ARP_ACCEPT,
+ IPV4_DEVCONF_ARP_NOTIFY,
+ IPV4_DEVCONF_ACCEPT_LOCAL,
+ IPV4_DEVCONF_SRC_VMARK,
+ IPV4_DEVCONF_PROXY_ARP_PVLAN,
+ IPV4_DEVCONF_ROUTE_LOCALNET,
+ __IPV4_DEVCONF_MAX
+};
+
+#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
+
#endif /* _UAPI_LINUX_IP_H */
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index caed0f324d5f..8137dd8d2adf 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -69,8 +69,8 @@
* starting a poll from a device which has a secure element enabled means
* we want to do SE based card emulation.
* @NFC_CMD_DISABLE_SE: Disable the physical link to a specific secure element.
- * @NFC_CMD_FW_UPLOAD: Request to Load/flash firmware, or event to inform that
- * some firmware was loaded
+ * @NFC_CMD_FW_DOWNLOAD: Request to Load/flash firmware, or event to inform
+ * that some firmware was loaded
*/
enum nfc_commands {
NFC_CMD_UNSPEC,
@@ -94,7 +94,7 @@ enum nfc_commands {
NFC_CMD_DISABLE_SE,
NFC_CMD_LLC_SDREQ,
NFC_EVENT_LLC_SDRES,
- NFC_CMD_FW_UPLOAD,
+ NFC_CMD_FW_DOWNLOAD,
NFC_EVENT_SE_ADDED,
NFC_EVENT_SE_REMOVED,
/* private: internal use only */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index dbd71b0c7d8c..09d62b9228ff 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -73,9 +73,17 @@ struct tc_estimator {
#define TC_H_ROOT (0xFFFFFFFFU)
#define TC_H_INGRESS (0xFFFFFFF1U)
+/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
+enum tc_link_layer {
+ TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
+ TC_LINKLAYER_ETHERNET,
+ TC_LINKLAYER_ATM,
+};
+#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
+
struct tc_ratespec {
unsigned char cell_log;
- unsigned char __reserved;
+ __u8 linklayer; /* lower 4 bits */
unsigned short overhead;
short cell_align;
unsigned short mpu;
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index af0a674cc677..a1356d3b54df 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -253,7 +253,7 @@ enum
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
- LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */
+ LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */
__LINUX_MIB_MAX
};
diff --git a/init/Kconfig b/init/Kconfig
index 247084be0590..fed81b576f29 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -955,7 +955,7 @@ config MEMCG_SWAP_ENABLED
Memory Resource Controller Swap Extension comes with its price in
a bigger memory consumption. General purpose distribution kernels
which want to enable the feature but keep it disabled by default
- and let the user enable it by swapaccount boot command line
+ and let the user enable it by swapaccount=1 boot command line
parameter should have this option unselected.
For those who want to have the feature enabled by default should
select this option (if, for some reason, they need to disable it
diff --git a/kernel/Makefile b/kernel/Makefile
index 470839d1a30e..35ef1185e359 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
-obj-y = fork.o exec_domain.o panic.o printk.o \
+obj-y = fork.o exec_domain.o panic.o \
cpu.o exit.o itimer.o time.o softirq.o resource.o \
sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
@@ -24,6 +24,7 @@ endif
obj-y += sched/
obj-y += power/
+obj-y += printk/
obj-y += cpu/
obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 789ec4683db3..781845a013ab 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4335,8 +4335,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
}
err = percpu_ref_init(&css->refcnt, css_release);
- if (err)
+ if (err) {
+ ss->css_free(cgrp);
goto err_free_all;
+ }
init_cgroup_css(css, ss, cgrp);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index e5657788fedd..ea1966db34f2 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -475,13 +475,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
/*
* Cpusets with tasks - existing or newly being attached - can't
- * have empty cpus_allowed or mems_allowed.
+ * be changed to have empty cpus_allowed or mems_allowed.
*/
ret = -ENOSPC;
- if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
- (cpumask_empty(trial->cpus_allowed) &&
- nodes_empty(trial->mems_allowed)))
- goto out;
+ if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
+ if (!cpumask_empty(cur->cpus_allowed) &&
+ cpumask_empty(trial->cpus_allowed))
+ goto out;
+ if (!nodes_empty(cur->mems_allowed) &&
+ nodes_empty(trial->mems_allowed))
+ goto out;
+ }
ret = 0;
out:
@@ -1608,11 +1612,13 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
{
struct cpuset *cs = cgroup_cs(cgrp);
cpuset_filetype_t type = cft->private;
- int retval = -ENODEV;
+ int retval = 0;
mutex_lock(&cpuset_mutex);
- if (!is_cpuset_online(cs))
+ if (!is_cpuset_online(cs)) {
+ retval = -ENODEV;
goto out_unlock;
+ }
switch (type) {
case FILE_CPU_EXCLUSIVE:
diff --git a/kernel/fork.c b/kernel/fork.c
index 403d2bb8a968..e23bb19e2a3e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1679,6 +1679,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
int __user *, parent_tidptr,
int __user *, child_tidptr,
int, tls_val)
+#elif defined(CONFIG_CLONE_BACKWARDS3)
+SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
+ int, stack_size,
+ int __user *, parent_tidptr,
+ int __user *, child_tidptr,
+ int, tls_val)
#else
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
int __user *, parent_tidptr,
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 8b2afc1c9df0..b462fa197517 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -33,7 +33,7 @@ static DEFINE_SPINLOCK(freezer_lock);
*/
bool freezing_slow_path(struct task_struct *p)
{
- if (p->flags & PF_NOFREEZE)
+ if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
return false;
if (pm_nosig_freezing || cgroup_freezing(p))
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ff05f4bd86eb..a52ee7bb830d 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -686,7 +686,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
might_sleep();
ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
0, &ctx->dep_map, _RET_IP_, ctx);
- if (!ret && ctx->acquired > 0)
+ if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
return ret;
@@ -702,7 +702,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
0, &ctx->dep_map, _RET_IP_, ctx);
- if (!ret && ctx->acquired > 0)
+ if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
return ret;
diff --git a/kernel/power/process.c b/kernel/power/process.c
index fc0df8486449..06ec8869dbf1 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -109,6 +109,8 @@ static int try_to_freeze_tasks(bool user_only)
/**
* freeze_processes - Signal user space processes to enter the refrigerator.
+ * The current thread will not be frozen. The same process that calls
+ * freeze_processes must later call thaw_processes.
*
* On success, returns 0. On failure, -errno and system is fully thawed.
*/
@@ -120,6 +122,9 @@ int freeze_processes(void)
if (error)
return error;
+ /* Make sure this task doesn't get frozen */
+ current->flags |= PF_SUSPEND_TASK;
+
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
@@ -168,6 +173,7 @@ int freeze_kernel_threads(void)
void thaw_processes(void)
{
struct task_struct *g, *p;
+ struct task_struct *curr = current;
if (pm_freezing)
atomic_dec(&system_freezing_cnt);
@@ -182,10 +188,15 @@ void thaw_processes(void)
read_lock(&tasklist_lock);
do_each_thread(g, p) {
+ /* No other threads should have PF_SUSPEND_TASK set */
+ WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
__thaw_task(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
+ WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
+ curr->flags &= ~PF_SUSPEND_TASK;
+
usermodehelper_enable();
schedule();
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 06fe28589e9c..a394297f8b2f 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req)
}
EXPORT_SYMBOL_GPL(pm_qos_request_active);
+static void __pm_qos_update_request(struct pm_qos_request *req,
+ s32 new_value)
+{
+ trace_pm_qos_update_request(req->pm_qos_class, new_value);
+
+ if (new_value != req->node.prio)
+ pm_qos_update_target(
+ pm_qos_array[req->pm_qos_class]->constraints,
+ &req->node, PM_QOS_UPDATE_REQ, new_value);
+}
+
/**
* pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
* @work: work struct for the delayed work (timeout)
@@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work)
struct pm_qos_request,
work);
- pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
+ __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
}
/**
@@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
}
cancel_delayed_work_sync(&req->work);
-
- trace_pm_qos_update_request(req->pm_qos_class, new_value);
- if (new_value != req->node.prio)
- pm_qos_update_target(
- pm_qos_array[req->pm_qos_class]->constraints,
- &req->node, PM_QOS_UPDATE_REQ, new_value);
+ __pm_qos_update_request(req, new_value);
}
EXPORT_SYMBOL_GPL(pm_qos_update_request);
diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile
new file mode 100644
index 000000000000..85405bdcf2b3
--- /dev/null
+++ b/kernel/printk/Makefile
@@ -0,0 +1,2 @@
+obj-y = printk.o
+obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c
new file mode 100644
index 000000000000..276762f3a460
--- /dev/null
+++ b/kernel/printk/braille.c
@@ -0,0 +1,49 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/string.h>
+
+#include "console_cmdline.h"
+#include "braille.h"
+
+char *_braille_console_setup(char **str, char **brl_options)
+{
+ if (!memcmp(*str, "brl,", 4)) {
+ *brl_options = "";
+ *str += 4;
+ } else if (!memcmp(str, "brl=", 4)) {
+ *brl_options = *str + 4;
+ *str = strchr(*brl_options, ',');
+ if (!*str)
+ pr_err("need port name after brl=\n");
+ else
+ *((*str)++) = 0;
+ } else
+ return NULL;
+
+ return *str;
+}
+
+int
+_braille_register_console(struct console *console, struct console_cmdline *c)
+{
+ int rtn = 0;
+
+ if (c->brl_options) {
+ console->flags |= CON_BRL;
+ rtn = braille_register_console(console, c->index, c->options,
+ c->brl_options);
+ }
+
+ return rtn;
+}
+
+int
+_braille_unregister_console(struct console *console)
+{
+ if (console->flags & CON_BRL)
+ return braille_unregister_console(console);
+
+ return 0;
+}
diff --git a/kernel/printk/braille.h b/kernel/printk/braille.h
new file mode 100644
index 000000000000..769d771145c8
--- /dev/null
+++ b/kernel/printk/braille.h
@@ -0,0 +1,48 @@
+#ifndef _PRINTK_BRAILLE_H
+#define _PRINTK_BRAILLE_H
+
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+
+static inline void
+braille_set_options(struct console_cmdline *c, char *brl_options)
+{
+ c->brl_options = brl_options;
+}
+
+char *
+_braille_console_setup(char **str, char **brl_options);
+
+int
+_braille_register_console(struct console *console, struct console_cmdline *c);
+
+int
+_braille_unregister_console(struct console *console);
+
+#else
+
+static inline void
+braille_set_options(struct console_cmdline *c, char *brl_options)
+{
+}
+
+static inline char *
+_braille_console_setup(char **str, char **brl_options)
+{
+ return NULL;
+}
+
+static inline int
+_braille_register_console(struct console *console, struct console_cmdline *c)
+{
+ return 0;
+}
+
+static inline int
+_braille_unregister_console(struct console *console)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
new file mode 100644
index 000000000000..cbd69d842341
--- /dev/null
+++ b/kernel/printk/console_cmdline.h
@@ -0,0 +1,14 @@
+#ifndef _CONSOLE_CMDLINE_H
+#define _CONSOLE_CMDLINE_H
+
+struct console_cmdline
+{
+ char name[8]; /* Name of the driver */
+ int index; /* Minor dev. to use */
+ char *options; /* Options for the driver */
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+ char *brl_options; /* Options for braille driver */
+#endif
+};
+
+#endif
diff --git a/kernel/printk.c b/kernel/printk/printk.c
index 69b0890ed7e5..5b5a7080e2a5 100644
--- a/kernel/printk.c
+++ b/kernel/printk/printk.c
@@ -51,6 +51,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>
+#include "console_cmdline.h"
+#include "braille.h"
+
/* printk's without a loglevel use this.. */
#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
@@ -105,19 +108,11 @@ static struct console *exclusive_console;
/*
* Array of consoles built from command line options (console=)
*/
-struct console_cmdline
-{
- char name[8]; /* Name of the driver */
- int index; /* Minor dev. to use */
- char *options; /* Options for the driver */
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- char *brl_options; /* Options for braille driver */
-#endif
-};
#define MAX_CMDLINECONSOLES 8
static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
+
static int selected_console = -1;
static int preferred_console = -1;
int console_set_on_cmdline;
@@ -178,7 +173,7 @@ static int console_may_schedule;
* 67 "g"
* 0032 00 00 00 padding to next message header
*
- * The 'struct log' buffer header must never be directly exported to
+ * The 'struct printk_log' buffer header must never be directly exported to
* userspace, it is a kernel-private implementation detail that might
* need to be changed in the future, when the requirements change.
*
@@ -200,7 +195,7 @@ enum log_flags {
LOG_CONT = 8, /* text is a fragment of a continuation line */
};
-struct log {
+struct printk_log {
u64 ts_nsec; /* timestamp in nanoseconds */
u16 len; /* length of entire record */
u16 text_len; /* length of text buffer */
@@ -248,7 +243,7 @@ static u32 clear_idx;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
#define LOG_ALIGN 4
#else
-#define LOG_ALIGN __alignof__(struct log)
+#define LOG_ALIGN __alignof__(struct printk_log)
#endif
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
@@ -259,35 +254,35 @@ static u32 log_buf_len = __LOG_BUF_LEN;
static volatile unsigned int logbuf_cpu = UINT_MAX;
/* human readable text of the record */
-static char *log_text(const struct log *msg)
+static char *log_text(const struct printk_log *msg)
{
- return (char *)msg + sizeof(struct log);
+ return (char *)msg + sizeof(struct printk_log);
}
/* optional key/value pair dictionary attached to the record */
-static char *log_dict(const struct log *msg)
+static char *log_dict(const struct printk_log *msg)
{
- return (char *)msg + sizeof(struct log) + msg->text_len;
+ return (char *)msg + sizeof(struct printk_log) + msg->text_len;
}
/* get record by index; idx must point to valid msg */
-static struct log *log_from_idx(u32 idx)
+static struct printk_log *log_from_idx(u32 idx)
{
- struct log *msg = (struct log *)(log_buf + idx);
+ struct printk_log *msg = (struct printk_log *)(log_buf + idx);
/*
* A length == 0 record is the end of buffer marker. Wrap around and
* read the message at the start of the buffer.
*/
if (!msg->len)
- return (struct log *)log_buf;
+ return (struct printk_log *)log_buf;
return msg;
}
/* get next record; idx must point to valid msg */
static u32 log_next(u32 idx)
{
- struct log *msg = (struct log *)(log_buf + idx);
+ struct printk_log *msg = (struct printk_log *)(log_buf + idx);
/* length == 0 indicates the end of the buffer; wrap */
/*
@@ -296,7 +291,7 @@ static u32 log_next(u32 idx)
* return the one after that.
*/
if (!msg->len) {
- msg = (struct log *)log_buf;
+ msg = (struct printk_log *)log_buf;
return msg->len;
}
return idx + msg->len;
@@ -308,11 +303,11 @@ static void log_store(int facility, int level,
const char *dict, u16 dict_len,
const char *text, u16 text_len)
{
- struct log *msg;
+ struct printk_log *msg;
u32 size, pad_len;
/* number of '\0' padding bytes to next message */
- size = sizeof(struct log) + text_len + dict_len;
+ size = sizeof(struct printk_log) + text_len + dict_len;
pad_len = (-size) & (LOG_ALIGN - 1);
size += pad_len;
@@ -324,7 +319,7 @@ static void log_store(int facility, int level,
else
free = log_first_idx - log_next_idx;
- if (free > size + sizeof(struct log))
+ if (free > size + sizeof(struct printk_log))
break;
/* drop old messages until we have enough contiuous space */
@@ -332,18 +327,18 @@ static void log_store(int facility, int level,
log_first_seq++;
}
- if (log_next_idx + size + sizeof(struct log) >= log_buf_len) {
+ if (log_next_idx + size + sizeof(struct printk_log) >= log_buf_len) {
/*
* This message + an additional empty header does not fit
* at the end of the buffer. Add an empty header with len == 0
* to signify a wrap around.
*/
- memset(log_buf + log_next_idx, 0, sizeof(struct log));
+ memset(log_buf + log_next_idx, 0, sizeof(struct printk_log));
log_next_idx = 0;
}
/* fill message */
- msg = (struct log *)(log_buf + log_next_idx);
+ msg = (struct printk_log *)(log_buf + log_next_idx);
memcpy(log_text(msg), text, text_len);
msg->text_len = text_len;
memcpy(log_dict(msg), dict, dict_len);
@@ -356,7 +351,7 @@ static void log_store(int facility, int level,
else
msg->ts_nsec = local_clock();
memset(log_dict(msg) + dict_len, 0, pad_len);
- msg->len = sizeof(struct log) + text_len + dict_len + pad_len;
+ msg->len = sizeof(struct printk_log) + text_len + dict_len + pad_len;
/* insert message */
log_next_idx += msg->len;
@@ -479,7 +474,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct devkmsg_user *user = file->private_data;
- struct log *msg;
+ struct printk_log *msg;
u64 ts_usec;
size_t i;
char cont = '-';
@@ -724,14 +719,14 @@ void log_buf_kexec_setup(void)
VMCOREINFO_SYMBOL(log_first_idx);
VMCOREINFO_SYMBOL(log_next_idx);
/*
- * Export struct log size and field offsets. User space tools can
+ * Export struct printk_log size and field offsets. User space tools can
* parse it and detect any changes to structure down the line.
*/
- VMCOREINFO_STRUCT_SIZE(log);
- VMCOREINFO_OFFSET(log, ts_nsec);
- VMCOREINFO_OFFSET(log, len);
- VMCOREINFO_OFFSET(log, text_len);
- VMCOREINFO_OFFSET(log, dict_len);
+ VMCOREINFO_STRUCT_SIZE(printk_log);
+ VMCOREINFO_OFFSET(printk_log, ts_nsec);
+ VMCOREINFO_OFFSET(printk_log, len);
+ VMCOREINFO_OFFSET(printk_log, text_len);
+ VMCOREINFO_OFFSET(printk_log, dict_len);
}
#endif
@@ -884,7 +879,7 @@ static size_t print_time(u64 ts, char *buf)
(unsigned long)ts, rem_nsec / 1000);
}
-static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
+static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf)
{
size_t len = 0;
unsigned int prefix = (msg->facility << 3) | msg->level;
@@ -907,7 +902,7 @@ static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
return len;
}
-static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
bool syslog, char *buf, size_t size)
{
const char *text = log_text(msg);
@@ -969,7 +964,7 @@ static size_t msg_print_text(const struct log *msg, enum log_flags prev,
static int syslog_print(char __user *buf, int size)
{
char *text;
- struct log *msg;
+ struct printk_log *msg;
int len = 0;
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
@@ -1060,7 +1055,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
idx = clear_idx;
prev = 0;
while (seq < log_next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
len += msg_print_text(msg, prev, true, NULL, 0);
prev = msg->flags;
@@ -1073,7 +1068,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
idx = clear_idx;
prev = 0;
while (len > size && seq < log_next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
len -= msg_print_text(msg, prev, true, NULL, 0);
prev = msg->flags;
@@ -1087,7 +1082,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
len = 0;
prev = 0;
while (len >= 0 && seq < next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
int textlen;
textlen = msg_print_text(msg, prev, true, text,
@@ -1233,7 +1228,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
error = 0;
while (seq < log_next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
error += msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
@@ -1719,10 +1714,10 @@ static struct cont {
u8 level;
bool flushed:1;
} cont;
-static struct log *log_from_idx(u32 idx) { return NULL; }
+static struct printk_log *log_from_idx(u32 idx) { return NULL; }
static u32 log_next(u32 idx) { return 0; }
static void call_console_drivers(int level, const char *text, size_t len) {}
-static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
bool syslog, char *buf, size_t size) { return 0; }
static size_t cont_print_text(char *text, size_t size) { return 0; }
@@ -1761,23 +1756,23 @@ static int __add_preferred_console(char *name, int idx, char *options,
* See if this tty is not yet registered, and
* if we have a slot free.
*/
- for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
- if (strcmp(console_cmdline[i].name, name) == 0 &&
- console_cmdline[i].index == idx) {
- if (!brl_options)
- selected_console = i;
- return 0;
+ for (i = 0, c = console_cmdline;
+ i < MAX_CMDLINECONSOLES && c->name[0];
+ i++, c++) {
+ if (strcmp(c->name, name) == 0 && c->index == idx) {
+ if (!brl_options)
+ selected_console = i;
+ return 0;
}
+ }
if (i == MAX_CMDLINECONSOLES)
return -E2BIG;
if (!brl_options)
selected_console = i;
- c = &console_cmdline[i];
strlcpy(c->name, name, sizeof(c->name));
c->options = options;
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- c->brl_options = brl_options;
-#endif
+ braille_set_options(c, brl_options);
+
c->index = idx;
return 0;
}
@@ -1790,20 +1785,8 @@ static int __init console_setup(char *str)
char *s, *options, *brl_options = NULL;
int idx;
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- if (!memcmp(str, "brl,", 4)) {
- brl_options = "";
- str += 4;
- } else if (!memcmp(str, "brl=", 4)) {
- brl_options = str + 4;
- str = strchr(brl_options, ',');
- if (!str) {
- printk(KERN_ERR "need port name after brl=\n");
- return 1;
- }
- *(str++) = 0;
- }
-#endif
+ if (_braille_console_setup(&str, &brl_options))
+ return 1;
/*
* Decode str into name, index, options.
@@ -1858,15 +1841,15 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
struct console_cmdline *c;
int i;
- for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
- if (strcmp(console_cmdline[i].name, name) == 0 &&
- console_cmdline[i].index == idx) {
- c = &console_cmdline[i];
- strlcpy(c->name, name_new, sizeof(c->name));
- c->name[sizeof(c->name) - 1] = 0;
- c->options = options;
- c->index = idx_new;
- return i;
+ for (i = 0, c = console_cmdline;
+ i < MAX_CMDLINECONSOLES && c->name[0];
+ i++, c++)
+ if (strcmp(c->name, name) == 0 && c->index == idx) {
+ strlcpy(c->name, name_new, sizeof(c->name));
+ c->name[sizeof(c->name) - 1] = 0;
+ c->options = options;
+ c->index = idx_new;
+ return i;
}
/* not found */
return -1;
@@ -2046,7 +2029,7 @@ void console_unlock(void)
console_cont_flush(text, sizeof(text));
again:
for (;;) {
- struct log *msg;
+ struct printk_log *msg;
size_t len;
int level;
@@ -2241,6 +2224,7 @@ void register_console(struct console *newcon)
int i;
unsigned long flags;
struct console *bcon = NULL;
+ struct console_cmdline *c;
/*
* before we register a new CON_BOOT console, make sure we don't
@@ -2288,30 +2272,25 @@ void register_console(struct console *newcon)
* See if this console matches one we selected on
* the command line.
*/
- for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0];
- i++) {
- if (strcmp(console_cmdline[i].name, newcon->name) != 0)
+ for (i = 0, c = console_cmdline;
+ i < MAX_CMDLINECONSOLES && c->name[0];
+ i++, c++) {
+ if (strcmp(c->name, newcon->name) != 0)
continue;
if (newcon->index >= 0 &&
- newcon->index != console_cmdline[i].index)
+ newcon->index != c->index)
continue;
if (newcon->index < 0)
- newcon->index = console_cmdline[i].index;
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- if (console_cmdline[i].brl_options) {
- newcon->flags |= CON_BRL;
- braille_register_console(newcon,
- console_cmdline[i].index,
- console_cmdline[i].options,
- console_cmdline[i].brl_options);
+ newcon->index = c->index;
+
+ if (_braille_register_console(newcon, c))
return;
- }
-#endif
+
if (newcon->setup &&
newcon->setup(newcon, console_cmdline[i].options) != 0)
break;
newcon->flags |= CON_ENABLED;
- newcon->index = console_cmdline[i].index;
+ newcon->index = c->index;
if (i == selected_console) {
newcon->flags |= CON_CONSDEV;
preferred_console = selected_console;
@@ -2394,13 +2373,13 @@ EXPORT_SYMBOL(register_console);
int unregister_console(struct console *console)
{
struct console *a, *b;
- int res = 1;
+ int res;
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- if (console->flags & CON_BRL)
- return braille_unregister_console(console);
-#endif
+ res = _braille_unregister_console(console);
+ if (res)
+ return res;
+ res = 1;
console_lock();
if (console_drivers == console) {
console_drivers=console->next;
@@ -2666,7 +2645,7 @@ void kmsg_dump(enum kmsg_dump_reason reason)
bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
char *line, size_t size, size_t *len)
{
- struct log *msg;
+ struct printk_log *msg;
size_t l = 0;
bool ret = false;
@@ -2778,7 +2757,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
idx = dumper->cur_idx;
prev = 0;
while (seq < dumper->next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
l += msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
@@ -2791,7 +2770,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
idx = dumper->cur_idx;
prev = 0;
while (l > size && seq < dumper->next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
l -= msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
@@ -2806,7 +2785,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
l = 0;
prev = 0;
while (seq < dumper->next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
l += msg_print_text(msg, prev, syslog, buf + l, size - l);
idx = log_next(idx);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 4041f5747e73..a146ee327f6a 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -469,7 +469,6 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
/* Architecture-specific hardware disable .. */
ptrace_disable(child);
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- flush_ptrace_hw_breakpoint(child);
write_lock_irq(&tasklist_lock);
/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b7c32cb7bfeb..05c39f030314 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p)
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
+ *
+ * Return: 1 if the task is currently executing. 0 otherwise.
*/
inline int task_curr(const struct task_struct *p)
{
@@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
* the simpler "current->state = TASK_RUNNING" to mark yourself
* runnable without the overhead of this.
*
- * Returns %true if @p was woken up, %false if it was already running
+ * Return: %true if @p was woken up, %false if it was already running.
* or @state didn't match @p's state.
*/
static int
@@ -1491,7 +1493,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
unsigned long flags;
int cpu, success = 0;
- smp_wmb();
+ /*
+ * If we are going to wake up a thread waiting for CONDITION we
+ * need to ensure that CONDITION=1 done by the caller can not be
+ * reordered with p->state check below. This pairs with mb() in
+ * set_current_state() the waiting thread does.
+ */
+ smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
if (!(p->state & state))
goto out;
@@ -1577,8 +1585,9 @@ out:
* @p: The process to be woken up.
*
* Attempt to wake up the nominated process and move it to the set of runnable
- * processes. Returns 1 if the process was woken up, 0 if it was already
- * running.
+ * processes.
+ *
+ * Return: 1 if the process was woken up, 0 if it was already running.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
@@ -2191,6 +2200,8 @@ void scheduler_tick(void)
* This makes sure that uptime, CFS vruntime, load
* balancing, etc... continue to move forward, even
* with a very low granularity.
+ *
+ * Return: Maximum deferment in nanoseconds.
*/
u64 scheduler_tick_max_deferment(void)
{
@@ -2394,6 +2405,12 @@ need_resched:
if (sched_feat(HRTICK))
hrtick_clear(rq);
+ /*
+ * Make sure that signal_pending_state()->signal_pending() below
+ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
+ * done by the caller to avoid the race with signal_wake_up().
+ */
+ smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock);
switch_count = &prev->nivcsw;
@@ -2796,8 +2813,8 @@ EXPORT_SYMBOL(wait_for_completion);
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible.
*
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
+ * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
+ * till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -2829,8 +2846,8 @@ EXPORT_SYMBOL(wait_for_completion_io);
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible. The caller is accounted as waiting for IO.
*
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
+ * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
+ * till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
@@ -2846,7 +2863,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout);
* This waits for completion of a specific task to be signaled. It is
* interruptible.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_interruptible(struct completion *x)
{
@@ -2865,8 +2882,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. It is interruptible. The timeout is in jiffies.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
+ * or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_interruptible_timeout(struct completion *x,
@@ -2883,7 +2900,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
* This waits to be signaled for completion of a specific task. It can be
* interrupted by a kill signal.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_killable(struct completion *x)
{
@@ -2903,8 +2920,8 @@ EXPORT_SYMBOL(wait_for_completion_killable);
* signaled or for a specified timeout to expire. It can be
* interrupted by a kill signal. The timeout is in jiffies.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
+ * or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_killable_timeout(struct completion *x,
@@ -2918,7 +2935,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout);
* try_wait_for_completion - try to decrement a completion without blocking
* @x: completion structure
*
- * Returns: 0 if a decrement cannot be done without blocking
+ * Return: 0 if a decrement cannot be done without blocking
* 1 if a decrement succeeded.
*
* If a completion is being used as a counting completion,
@@ -2945,7 +2962,7 @@ EXPORT_SYMBOL(try_wait_for_completion);
* completion_done - Test to see if a completion has any waiters
* @x: completion structure
*
- * Returns: 0 if there are waiters (wait_for_completion() in progress)
+ * Return: 0 if there are waiters (wait_for_completion() in progress)
* 1 if there are no waiters.
*
*/
@@ -3182,7 +3199,7 @@ SYSCALL_DEFINE1(nice, int, increment)
* task_prio - return the priority value of a given task.
* @p: the task in question.
*
- * This is the priority value as seen by users in /proc.
+ * Return: The priority value as seen by users in /proc.
* RT tasks are offset by -200. Normal tasks are centered
* around 0, value goes from -16 to +15.
*/
@@ -3194,6 +3211,8 @@ int task_prio(const struct task_struct *p)
/**
* task_nice - return the nice value of a given task.
* @p: the task in question.
+ *
+ * Return: The nice value [ -20 ... 0 ... 19 ].
*/
int task_nice(const struct task_struct *p)
{
@@ -3204,6 +3223,8 @@ EXPORT_SYMBOL(task_nice);
/**
* idle_cpu - is a given cpu idle currently?
* @cpu: the processor in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
*/
int idle_cpu(int cpu)
{
@@ -3226,6 +3247,8 @@ int idle_cpu(int cpu)
/**
* idle_task - return the idle task for a given cpu.
* @cpu: the processor in question.
+ *
+ * Return: The idle task for the cpu @cpu.
*/
struct task_struct *idle_task(int cpu)
{
@@ -3235,6 +3258,8 @@ struct task_struct *idle_task(int cpu)
/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
+ *
+ * The task of @pid, if found. %NULL otherwise.
*/
static struct task_struct *find_process_by_pid(pid_t pid)
{
@@ -3432,6 +3457,8 @@ recheck:
* @policy: new policy.
* @param: structure containing the new RT priority.
*
+ * Return: 0 on success. An error code otherwise.
+ *
* NOTE that the task may be already dead.
*/
int sched_setscheduler(struct task_struct *p, int policy,
@@ -3451,6 +3478,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
* current context has permission. For example, this is needed in
* stop_machine(): we create temporary high priority worker threads,
* but our caller might not have that capability.
+ *
+ * Return: 0 on success. An error code otherwise.
*/
int sched_setscheduler_nocheck(struct task_struct *p, int policy,
const struct sched_param *param)
@@ -3485,6 +3514,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
* @pid: the pid in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
struct sched_param __user *, param)
@@ -3500,6 +3531,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
* sys_sched_setparam - set/change the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
{
@@ -3509,6 +3542,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
/**
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
+ *
+ * Return: On success, the policy of the thread. Otherwise, a negative error
+ * code.
*/
SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
{
@@ -3535,6 +3571,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
* sys_sched_getparam - get the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the RT priority.
+ *
+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
+ * code.
*/
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
{
@@ -3659,6 +3698,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to the new cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
@@ -3710,6 +3751,8 @@ out_unlock:
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to hold the current cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
@@ -3744,6 +3787,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
*
* This function yields the current CPU to other tasks. If there are no
* other threads running on this CPU then this function will return.
+ *
+ * Return: 0.
*/
SYSCALL_DEFINE0(sched_yield)
{
@@ -3869,7 +3914,7 @@ EXPORT_SYMBOL(yield);
* It's the caller's job to ensure that the target task struct
* can't go away on us before we can do any checks.
*
- * Returns:
+ * Return:
* true (>0) if we indeed boosted the target task.
* false (0) if we failed to boost the target.
* -ESRCH if there's no task to yield to.
@@ -3972,8 +4017,9 @@ long __sched io_schedule_timeout(long timeout)
* sys_sched_get_priority_max - return maximum RT priority.
* @policy: scheduling class.
*
- * this syscall returns the maximum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the maximum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
{
@@ -3997,8 +4043,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
* sys_sched_get_priority_min - return minimum RT priority.
* @policy: scheduling class.
*
- * this syscall returns the minimum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the minimum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
{
@@ -4024,6 +4071,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
*
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
+ *
+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
+ * an error code.
*/
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
struct timespec __user *, interval)
@@ -6632,6 +6682,8 @@ void normalize_rt_tasks(void)
* @cpu: the processor in question.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ *
+ * Return: The current task for @cpu.
*/
struct task_struct *curr_task(int cpu)
{
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 1095e878a46f..8b836b376d91 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -62,7 +62,7 @@ static int convert_prio(int prio)
* any discrepancies created by racing against the uncertainty of the current
* priority configuration.
*
- * Returns: (int)bool - CPUs were found
+ * Return: (int)bool - CPUs were found
*/
int cpupri_find(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask)
@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
* cpupri_init - initialize the cpupri structure
* @cp: The cpupri context
*
- * Returns: -ENOMEM if memory fails.
+ * Return: -ENOMEM on memory allocation failure.
*/
int cpupri_init(struct cpupri *cp)
{
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bb456f44b7b1..68f1609ca149 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -851,7 +851,7 @@ void task_numa_fault(int node, int pages, bool migrated)
{
struct task_struct *p = current;
- if (!sched_feat_numa(NUMA))
+ if (!numabalancing_enabled)
return;
/* FIXME: Allocate task-specific structure for placement policy here */
@@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
*/
update_entity_load_avg(curr, 1);
update_cfs_rq_blocked_load(cfs_rq, 1);
+ update_cfs_shares(cfs_rq);
#ifdef CONFIG_SCHED_HRTICK
/*
@@ -4280,6 +4281,8 @@ struct sg_lb_stats {
* get_sd_load_idx - Obtain the load index for a given sched domain.
* @sd: The sched_domain whose load_idx is to be obtained.
* @idle: The Idle status of the CPU for whose sd load_icx is obtained.
+ *
+ * Return: The load index.
*/
static inline int get_sd_load_idx(struct sched_domain *sd,
enum cpu_idle_type idle)
@@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
*
* Determine if @sg is a busier group than the previously selected
* busiest group.
+ *
+ * Return: %true if @sg is a busier group than the previously selected
+ * busiest group. %false otherwise.
*/
static bool update_sd_pick_busiest(struct lb_env *env,
struct sd_lb_stats *sds,
@@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
* assuming lower CPU number will be equivalent to lower a SMT thread
* number.
*
- * Returns 1 when packing is required and a task should be moved to
+ * Return: 1 when packing is required and a task should be moved to
* this CPU. The amount of the imbalance is returned in *imbalance.
*
* @env: The load balancing environment.
@@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* @balance: Pointer to a variable indicating if this_cpu
* is the appropriate cpu to perform load balancing at this_level.
*
- * Returns: - the busiest group if imbalance exists.
+ * Return: - The busiest group if imbalance exists.
* - If no imbalance and user has opted for power-savings balance,
* return the least loaded group whose CPUs can be
* put to idle by rebalancing its tasks onto our group.
@@ -5786,7 +5792,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
entity_tick(cfs_rq, se, queued);
}
- if (sched_feat_numa(NUMA))
+ if (numabalancing_enabled)
task_tick_numa(rq, curr);
update_rq_runnable_avg(rq, 1);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ac09d98490aa..07f6fc468e17 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2346,7 +2346,11 @@ static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp,
int write, void *data)
{
if (write) {
- *valp = msecs_to_jiffies(*negp ? -*lvalp : *lvalp);
+ unsigned long jif = msecs_to_jiffies(*negp ? -*lvalp : *lvalp);
+
+ if (jif > INT_MAX)
+ return 1;
+ *valp = (int)jif;
} else {
int val = *valp;
unsigned long lval;
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index a326f27d7f09..0b479a6a22bb 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
BUG_ON(bits > 32);
WARN_ON(!irqs_disabled());
read_sched_clock = read;
- sched_clock_mask = (1 << bits) - 1;
+ sched_clock_mask = (1ULL << bits) - 1;
cd.rate = rate;
/* calculate the mult/shift to convert counter ticks to ns. */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e80183f4a6c4..e8a1516cc0a3 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -182,7 +182,8 @@ static bool can_stop_full_tick(void)
* Don't allow the user to think they can get
* full NO_HZ with this machine.
*/
- WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock");
+ WARN_ONCE(have_nohz_full_mask,
+ "NO_HZ FULL will not work with unstable sched clock");
return false;
}
#endif
@@ -343,8 +344,6 @@ static int tick_nohz_init_all(void)
void __init tick_nohz_init(void)
{
- int cpu;
-
if (!have_nohz_full_mask) {
if (tick_nohz_init_all() < 0)
return;
@@ -827,13 +826,10 @@ void tick_nohz_irq_exit(void)
{
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
- if (ts->inidle) {
- /* Cancel the timer because CPU already waken up from the C-states*/
- menu_hrtimer_cancel();
+ if (ts->inidle)
__tick_nohz_idle_enter(ts);
- } else {
+ else
tick_nohz_full_stop_tick(ts);
- }
}
/**
@@ -931,8 +927,6 @@ void tick_nohz_idle_exit(void)
ts->inidle = 0;
- /* Cancel the timer because CPU already waken up from the C-states*/
- menu_hrtimer_cancel();
if (ts->idle_active || ts->tick_stopped)
now = ktime_get();
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8ce9eefc5bb4..a6d098c6df3f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2169,12 +2169,57 @@ static cycle_t ftrace_update_time;
static unsigned long ftrace_update_cnt;
unsigned long ftrace_update_tot_cnt;
-static int ops_traces_mod(struct ftrace_ops *ops)
+static inline int ops_traces_mod(struct ftrace_ops *ops)
{
- struct ftrace_hash *hash;
+ /*
+ * Filter_hash being empty will default to trace module.
+ * But notrace hash requires a test of individual module functions.
+ */
+ return ftrace_hash_empty(ops->filter_hash) &&
+ ftrace_hash_empty(ops->notrace_hash);
+}
+
+/*
+ * Check if the current ops references the record.
+ *
+ * If the ops traces all functions, then it was already accounted for.
+ * If the ops does not trace the current record function, skip it.
+ * If the ops ignores the function via notrace filter, skip it.
+ */
+static inline bool
+ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
+{
+ /* If ops isn't enabled, ignore it */
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return 0;
+
+ /* If ops traces all mods, we already accounted for it */
+ if (ops_traces_mod(ops))
+ return 0;
+
+ /* The function must be in the filter */
+ if (!ftrace_hash_empty(ops->filter_hash) &&
+ !ftrace_lookup_ip(ops->filter_hash, rec->ip))
+ return 0;
+
+ /* If in notrace hash, we ignore it too */
+ if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
+ return 0;
+
+ return 1;
+}
+
+static int referenced_filters(struct dyn_ftrace *rec)
+{
+ struct ftrace_ops *ops;
+ int cnt = 0;
- hash = ops->filter_hash;
- return ftrace_hash_empty(hash);
+ for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
+ if (ops_references_rec(ops, rec))
+ cnt++;
+ }
+
+ return cnt;
}
static int ftrace_update_code(struct module *mod)
@@ -2183,6 +2228,7 @@ static int ftrace_update_code(struct module *mod)
struct dyn_ftrace *p;
cycle_t start, stop;
unsigned long ref = 0;
+ bool test = false;
int i;
/*
@@ -2196,9 +2242,12 @@ static int ftrace_update_code(struct module *mod)
for (ops = ftrace_ops_list;
ops != &ftrace_list_end; ops = ops->next) {
- if (ops->flags & FTRACE_OPS_FL_ENABLED &&
- ops_traces_mod(ops))
- ref++;
+ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
+ if (ops_traces_mod(ops))
+ ref++;
+ else
+ test = true;
+ }
}
}
@@ -2208,12 +2257,16 @@ static int ftrace_update_code(struct module *mod)
for (pg = ftrace_new_pgs; pg; pg = pg->next) {
for (i = 0; i < pg->index; i++) {
+ int cnt = ref;
+
/* If something went wrong, bail without enabling anything */
if (unlikely(ftrace_disabled))
return -1;
p = &pg->records[i];
- p->flags = ref;
+ if (test)
+ cnt += referenced_filters(p);
+ p->flags = cnt;
/*
* Do the initial record conversion from mcount jump
@@ -2233,7 +2286,7 @@ static int ftrace_update_code(struct module *mod)
* conversion puts the module to the correct state, thus
* passing the ftrace_make_call check.
*/
- if (ftrace_start_up && ref) {
+ if (ftrace_start_up && cnt) {
int failed = __ftrace_replace_code(p, 1);
if (failed)
ftrace_bug(failed, p->ip);
@@ -3384,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
return add_hash_entry(hash, ip);
}
+static void ftrace_ops_update_code(struct ftrace_ops *ops)
+{
+ if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
+ ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+}
+
static int
ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
unsigned long ip, int remove, int reset, int enable)
@@ -3426,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
- if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
- && ftrace_enabled)
- ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ if (!ret)
+ ftrace_ops_update_code(ops);
mutex_unlock(&ftrace_lock);
@@ -3655,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(iter->ops, filter_hash,
orig_hash, iter->hash);
- if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
- && ftrace_enabled)
- ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ if (!ret)
+ ftrace_ops_update_code(iter->ops);
mutex_unlock(&ftrace_lock);
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 882ec1dd1515..496f94d57698 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -243,20 +243,25 @@ int filter_current_check_discard(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(filter_current_check_discard);
-cycle_t ftrace_now(int cpu)
+cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
{
u64 ts;
/* Early boot up does not have a buffer yet */
- if (!global_trace.trace_buffer.buffer)
+ if (!buf->buffer)
return trace_clock_local();
- ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
- ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
+ ts = ring_buffer_time_stamp(buf->buffer, cpu);
+ ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
return ts;
}
+cycle_t ftrace_now(int cpu)
+{
+ return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
+}
+
/**
* tracing_is_enabled - Show if global_trace has been disabled
*
@@ -1211,7 +1216,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
/* Make sure all commits have finished */
synchronize_sched();
- buf->time_start = ftrace_now(buf->cpu);
+ buf->time_start = buffer_ftrace_now(buf, buf->cpu);
for_each_online_cpu(cpu)
ring_buffer_reset_cpu(buffer, cpu);
@@ -1219,11 +1224,6 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
ring_buffer_record_enable(buffer);
}
-void tracing_reset_current(int cpu)
-{
- tracing_reset(&global_trace.trace_buffer, cpu);
-}
-
/* Must have trace_types_lock held */
void tracing_reset_all_online_cpus(void)
{
@@ -4151,6 +4151,7 @@ waitagain:
memset(&iter->seq, 0,
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
+ cpumask_clear(iter->started);
iter->pos = -1;
trace_event_read_lock();
@@ -4468,7 +4469,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
/* disable tracing ? */
if (trace_flags & TRACE_ITER_STOP_ON_FREE)
- tracing_off();
+ tracer_tracing_off(tr);
/* resize the ring buffer to 0 */
tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
@@ -4633,12 +4634,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
* New clock may not be consistent with the previous clock.
* Reset the buffer so that it doesn't have incomparable timestamps.
*/
- tracing_reset_online_cpus(&global_trace.trace_buffer);
+ tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
- tracing_reset_online_cpus(&global_trace.max_buffer);
+ tracing_reset_online_cpus(&tr->max_buffer);
#endif
mutex_unlock(&trace_types_lock);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 898f868833f2..29a7ebcfb426 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -409,33 +409,42 @@ static void put_system(struct ftrace_subsystem_dir *dir)
mutex_unlock(&event_mutex);
}
-/*
- * Open and update trace_array ref count.
- * Must have the current trace_array passed to it.
- */
-static int tracing_open_generic_file(struct inode *inode, struct file *filp)
+static void remove_subsystem(struct ftrace_subsystem_dir *dir)
{
- struct ftrace_event_file *file = inode->i_private;
- struct trace_array *tr = file->tr;
- int ret;
+ if (!dir)
+ return;
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ if (!--dir->nr_events) {
+ debugfs_remove_recursive(dir->entry);
+ list_del(&dir->list);
+ __put_system_dir(dir);
+ }
+}
- ret = tracing_open_generic(inode, filp);
- if (ret < 0)
- trace_array_put(tr);
- return ret;
+static void *event_file_data(struct file *filp)
+{
+ return ACCESS_ONCE(file_inode(filp)->i_private);
}
-static int tracing_release_generic_file(struct inode *inode, struct file *filp)
+static void remove_event_file_dir(struct ftrace_event_file *file)
{
- struct ftrace_event_file *file = inode->i_private;
- struct trace_array *tr = file->tr;
+ struct dentry *dir = file->dir;
+ struct dentry *child;
- trace_array_put(tr);
+ if (dir) {
+ spin_lock(&dir->d_lock); /* probably unneeded */
+ list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
+ if (child->d_inode) /* probably unneeded */
+ child->d_inode->i_private = NULL;
+ }
+ spin_unlock(&dir->d_lock);
- return 0;
+ debugfs_remove_recursive(dir);
+ }
+
+ list_del(&file->list);
+ remove_subsystem(file->system);
+ kmem_cache_free(file_cachep, file);
}
/*
@@ -679,15 +688,25 @@ static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_file *file = filp->private_data;
+ struct ftrace_event_file *file;
+ unsigned long flags;
char buf[4] = "0";
- if (file->flags & FTRACE_EVENT_FL_ENABLED &&
- !(file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
+ mutex_lock(&event_mutex);
+ file = event_file_data(filp);
+ if (likely(file))
+ flags = file->flags;
+ mutex_unlock(&event_mutex);
+
+ if (!file)
+ return -ENODEV;
+
+ if (flags & FTRACE_EVENT_FL_ENABLED &&
+ !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
strcpy(buf, "1");
- if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
- file->flags & FTRACE_EVENT_FL_SOFT_MODE)
+ if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
+ flags & FTRACE_EVENT_FL_SOFT_MODE)
strcat(buf, "*");
strcat(buf, "\n");
@@ -699,13 +718,10 @@ static ssize_t
event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_file *file = filp->private_data;
+ struct ftrace_event_file *file;
unsigned long val;
int ret;
- if (!file)
- return -EINVAL;
-
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
@@ -717,8 +733,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
switch (val) {
case 0:
case 1:
+ ret = -ENODEV;
mutex_lock(&event_mutex);
- ret = ftrace_event_enable_disable(file, val);
+ file = event_file_data(filp);
+ if (likely(file))
+ ret = ftrace_event_enable_disable(file, val);
mutex_unlock(&event_mutex);
break;
@@ -825,7 +844,7 @@ enum {
static void *f_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct ftrace_event_call *call = m->private;
+ struct ftrace_event_call *call = event_file_data(m->private);
struct list_head *common_head = &ftrace_common_fields;
struct list_head *head = trace_get_fields(call);
struct list_head *node = v;
@@ -857,7 +876,7 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos)
static int f_show(struct seq_file *m, void *v)
{
- struct ftrace_event_call *call = m->private;
+ struct ftrace_event_call *call = event_file_data(m->private);
struct ftrace_event_field *field;
const char *array_descriptor;
@@ -910,6 +929,11 @@ static void *f_start(struct seq_file *m, loff_t *pos)
void *p = (void *)FORMAT_HEADER;
loff_t l = 0;
+ /* ->stop() is called even if ->start() fails */
+ mutex_lock(&event_mutex);
+ if (!event_file_data(m->private))
+ return ERR_PTR(-ENODEV);
+
while (l < *pos && p)
p = f_next(m, p, &l);
@@ -918,6 +942,7 @@ static void *f_start(struct seq_file *m, loff_t *pos)
static void f_stop(struct seq_file *m, void *p)
{
+ mutex_unlock(&event_mutex);
}
static const struct seq_operations trace_format_seq_ops = {
@@ -929,7 +954,6 @@ static const struct seq_operations trace_format_seq_ops = {
static int trace_format_open(struct inode *inode, struct file *file)
{
- struct ftrace_event_call *call = inode->i_private;
struct seq_file *m;
int ret;
@@ -938,7 +962,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
return ret;
m = file->private_data;
- m->private = call;
+ m->private = file;
return 0;
}
@@ -946,14 +970,18 @@ static int trace_format_open(struct inode *inode, struct file *file)
static ssize_t
event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
+ int id = (long)event_file_data(filp);
char buf[32];
int len;
if (*ppos)
return 0;
- len = sprintf(buf, "%d\n", call->event.type);
+ if (unlikely(!id))
+ return -ENODEV;
+
+ len = sprintf(buf, "%d\n", id);
+
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
@@ -961,21 +989,28 @@ static ssize_t
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
+ struct ftrace_event_call *call;
struct trace_seq *s;
- int r;
+ int r = -ENODEV;
if (*ppos)
return 0;
s = kmalloc(sizeof(*s), GFP_KERNEL);
+
if (!s)
return -ENOMEM;
trace_seq_init(s);
- print_event_filter(call, s);
- r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
+ mutex_lock(&event_mutex);
+ call = event_file_data(filp);
+ if (call)
+ print_event_filter(call, s);
+ mutex_unlock(&event_mutex);
+
+ if (call)
+ r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
kfree(s);
@@ -986,9 +1021,9 @@ static ssize_t
event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
+ struct ftrace_event_call *call;
char *buf;
- int err;
+ int err = -ENODEV;
if (cnt >= PAGE_SIZE)
return -EINVAL;
@@ -1003,7 +1038,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
}
buf[cnt] = '\0';
- err = apply_event_filter(call, buf);
+ mutex_lock(&event_mutex);
+ call = event_file_data(filp);
+ if (call)
+ err = apply_event_filter(call, buf);
+ mutex_unlock(&event_mutex);
+
free_page((unsigned long) buf);
if (err < 0)
return err;
@@ -1225,10 +1265,9 @@ static const struct file_operations ftrace_set_event_fops = {
};
static const struct file_operations ftrace_enable_fops = {
- .open = tracing_open_generic_file,
+ .open = tracing_open_generic,
.read = event_enable_read,
.write = event_enable_write,
- .release = tracing_release_generic_file,
.llseek = default_llseek,
};
@@ -1240,7 +1279,6 @@ static const struct file_operations ftrace_event_format_fops = {
};
static const struct file_operations ftrace_event_id_fops = {
- .open = tracing_open_generic,
.read = event_id_read,
.llseek = default_llseek,
};
@@ -1488,8 +1526,8 @@ event_create_dir(struct dentry *parent,
#ifdef CONFIG_PERF_EVENTS
if (call->event.type && call->class->reg)
- trace_create_file("id", 0444, file->dir, call,
- id);
+ trace_create_file("id", 0444, file->dir,
+ (void *)(long)call->event.type, id);
#endif
/*
@@ -1514,33 +1552,16 @@ event_create_dir(struct dentry *parent,
return 0;
}
-static void remove_subsystem(struct ftrace_subsystem_dir *dir)
-{
- if (!dir)
- return;
-
- if (!--dir->nr_events) {
- debugfs_remove_recursive(dir->entry);
- list_del(&dir->list);
- __put_system_dir(dir);
- }
-}
-
static void remove_event_from_tracers(struct ftrace_event_call *call)
{
struct ftrace_event_file *file;
struct trace_array *tr;
do_for_each_event_file_safe(tr, file) {
-
if (file->event_call != call)
continue;
- list_del(&file->list);
- debugfs_remove_recursive(file->dir);
- remove_subsystem(file->system);
- kmem_cache_free(file_cachep, file);
-
+ remove_event_file_dir(file);
/*
* The do_for_each_event_file_safe() is
* a double loop. After finding the call for this
@@ -1692,16 +1713,53 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
destroy_preds(call);
}
+static int probe_remove_event_call(struct ftrace_event_call *call)
+{
+ struct trace_array *tr;
+ struct ftrace_event_file *file;
+
+#ifdef CONFIG_PERF_EVENTS
+ if (call->perf_refcount)
+ return -EBUSY;
+#endif
+ do_for_each_event_file(tr, file) {
+ if (file->event_call != call)
+ continue;
+ /*
+ * We can't rely on ftrace_event_enable_disable(enable => 0)
+ * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
+ * TRACE_REG_UNREGISTER.
+ */
+ if (file->flags & FTRACE_EVENT_FL_ENABLED)
+ return -EBUSY;
+ /*
+ * The do_for_each_event_file_safe() is
+ * a double loop. After finding the call for this
+ * trace_array, we use break to jump to the next
+ * trace_array.
+ */
+ break;
+ } while_for_each_event_file();
+
+ __trace_remove_event_call(call);
+
+ return 0;
+}
+
/* Remove an event_call */
-void trace_remove_event_call(struct ftrace_event_call *call)
+int trace_remove_event_call(struct ftrace_event_call *call)
{
+ int ret;
+
mutex_lock(&trace_types_lock);
mutex_lock(&event_mutex);
down_write(&trace_event_sem);
- __trace_remove_event_call(call);
+ ret = probe_remove_event_call(call);
up_write(&trace_event_sem);
mutex_unlock(&event_mutex);
mutex_unlock(&trace_types_lock);
+
+ return ret;
}
#define for_each_event(event, start, end) \
@@ -2270,12 +2328,8 @@ __trace_remove_event_dirs(struct trace_array *tr)
{
struct ftrace_event_file *file, *next;
- list_for_each_entry_safe(file, next, &tr->events, list) {
- list_del(&file->list);
- debugfs_remove_recursive(file->dir);
- remove_subsystem(file->system);
- kmem_cache_free(file_cachep, file);
- }
+ list_for_each_entry_safe(file, next, &tr->events, list)
+ remove_event_file_dir(file);
}
static void
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0c7b75a8acc8..97daa8cf958d 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -637,17 +637,15 @@ static void append_filter_err(struct filter_parse_state *ps,
free_page((unsigned long) buf);
}
+/* caller must hold event_mutex */
void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
{
- struct event_filter *filter;
+ struct event_filter *filter = call->filter;
- mutex_lock(&event_mutex);
- filter = call->filter;
if (filter && filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
trace_seq_puts(s, "none\n");
- mutex_unlock(&event_mutex);
}
void print_subsystem_event_filter(struct event_subsystem *system,
@@ -1841,23 +1839,22 @@ static int create_system_filter(struct event_subsystem *system,
return err;
}
+/* caller must hold event_mutex */
int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
{
struct event_filter *filter;
- int err = 0;
-
- mutex_lock(&event_mutex);
+ int err;
if (!strcmp(strstrip(filter_string), "0")) {
filter_disable(call);
filter = call->filter;
if (!filter)
- goto out_unlock;
+ return 0;
RCU_INIT_POINTER(call->filter, NULL);
/* Make sure the filter is not being used */
synchronize_sched();
__free_filter(filter);
- goto out_unlock;
+ return 0;
}
err = create_filter(call, filter_string, true, &filter);
@@ -1884,8 +1881,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
__free_filter(tmp);
}
}
-out_unlock:
- mutex_unlock(&event_mutex);
return err;
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 3811487e7a7a..243f6834d026 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -95,7 +95,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
}
static int register_probe_event(struct trace_probe *tp);
-static void unregister_probe_event(struct trace_probe *tp);
+static int unregister_probe_event(struct trace_probe *tp);
static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list);
@@ -351,9 +351,12 @@ static int unregister_trace_probe(struct trace_probe *tp)
if (trace_probe_is_enabled(tp))
return -EBUSY;
+ /* Will fail if probe is being used by ftrace or perf */
+ if (unregister_probe_event(tp))
+ return -EBUSY;
+
__unregister_trace_probe(tp);
list_del(&tp->list);
- unregister_probe_event(tp);
return 0;
}
@@ -632,7 +635,9 @@ static int release_all_trace_probes(void)
/* TODO: Use batch unregistration */
while (!list_empty(&probe_list)) {
tp = list_entry(probe_list.next, struct trace_probe, list);
- unregister_trace_probe(tp);
+ ret = unregister_trace_probe(tp);
+ if (ret)
+ goto end;
free_trace_probe(tp);
}
@@ -1247,11 +1252,15 @@ static int register_probe_event(struct trace_probe *tp)
return ret;
}
-static void unregister_probe_event(struct trace_probe *tp)
+static int unregister_probe_event(struct trace_probe *tp)
{
+ int ret;
+
/* tp->event is unregistered in trace_remove_event_call() */
- trace_remove_event_call(&tp->call);
- kfree(tp->call.print_fmt);
+ ret = trace_remove_event_call(&tp->call);
+ if (!ret)
+ kfree(tp->call.print_fmt);
+ return ret;
}
/* Make a debugfs interface for controlling probe points */
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index a23d2d71188e..272261b5f94f 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -70,7 +70,7 @@ struct trace_uprobe {
(sizeof(struct probe_arg) * (n)))
static int register_uprobe_event(struct trace_uprobe *tu);
-static void unregister_uprobe_event(struct trace_uprobe *tu);
+static int unregister_uprobe_event(struct trace_uprobe *tu);
static DEFINE_MUTEX(uprobe_lock);
static LIST_HEAD(uprobe_list);
@@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
}
/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
-static void unregister_trace_uprobe(struct trace_uprobe *tu)
+static int unregister_trace_uprobe(struct trace_uprobe *tu)
{
+ int ret;
+
+ ret = unregister_uprobe_event(tu);
+ if (ret)
+ return ret;
+
list_del(&tu->list);
- unregister_uprobe_event(tu);
free_trace_uprobe(tu);
+ return 0;
}
/* Register a trace_uprobe and probe_event */
@@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
/* register as an event */
old_tp = find_probe_event(tu->call.name, tu->call.class->system);
- if (old_tp)
+ if (old_tp) {
/* delete old event */
- unregister_trace_uprobe(old_tp);
+ ret = unregister_trace_uprobe(old_tp);
+ if (ret)
+ goto end;
+ }
ret = register_uprobe_event(tu);
if (ret) {
@@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv)
group = UPROBE_EVENT_SYSTEM;
if (is_delete) {
+ int ret;
+
if (!event) {
pr_info("Delete command needs an event name.\n");
return -EINVAL;
@@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv)
return -ENOENT;
}
/* delete an event */
- unregister_trace_uprobe(tu);
+ ret = unregister_trace_uprobe(tu);
mutex_unlock(&uprobe_lock);
- return 0;
+ return ret;
}
if (argc < 2) {
@@ -408,16 +419,20 @@ fail_address_parse:
return ret;
}
-static void cleanup_all_probes(void)
+static int cleanup_all_probes(void)
{
struct trace_uprobe *tu;
+ int ret = 0;
mutex_lock(&uprobe_lock);
while (!list_empty(&uprobe_list)) {
tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
- unregister_trace_uprobe(tu);
+ ret = unregister_trace_uprobe(tu);
+ if (ret)
+ break;
}
mutex_unlock(&uprobe_lock);
+ return ret;
}
/* Probes listing interfaces */
@@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = {
static int probes_open(struct inode *inode, struct file *file)
{
- if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
- cleanup_all_probes();
+ int ret;
+
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+ ret = cleanup_all_probes();
+ if (ret)
+ return ret;
+ }
return seq_open(file, &probes_seq_op);
}
@@ -968,12 +988,17 @@ static int register_uprobe_event(struct trace_uprobe *tu)
return ret;
}
-static void unregister_uprobe_event(struct trace_uprobe *tu)
+static int unregister_uprobe_event(struct trace_uprobe *tu)
{
+ int ret;
+
/* tu->event is unregistered in trace_remove_event_call() */
- trace_remove_event_call(&tu->call);
+ ret = trace_remove_event_call(&tu->call);
+ if (ret)
+ return ret;
kfree(tu->call.print_fmt);
tu->call.print_fmt = NULL;
+ return 0;
}
/* Make a trace interface for controling probe points */
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index d8c30db06c5b..9064b919a406 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -62,6 +62,9 @@ int create_user_ns(struct cred *new)
kgid_t group = new->egid;
int ret;
+ if (parent_ns->level > 32)
+ return -EUSERS;
+
/*
* Verify that we can not violate the policy of which files
* may be accessed that is specified by the root directory,
@@ -92,6 +95,7 @@ int create_user_ns(struct cred *new)
atomic_set(&ns->count, 1);
/* Leave the new->user_ns reference with the new user namespace. */
ns->parent = parent_ns;
+ ns->level = parent_ns->level + 1;
ns->owner = owner;
ns->group = group;
@@ -105,16 +109,21 @@ int create_user_ns(struct cred *new)
int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
{
struct cred *cred;
+ int err = -ENOMEM;
if (!(unshare_flags & CLONE_NEWUSER))
return 0;
cred = prepare_creds();
- if (!cred)
- return -ENOMEM;
+ if (cred) {
+ err = create_user_ns(cred);
+ if (err)
+ put_cred(cred);
+ else
+ *new_cred = cred;
+ }
- *new_cred = cred;
- return create_user_ns(cred);
+ return err;
}
void free_user_ns(struct user_namespace *ns)
diff --git a/kernel/wait.c b/kernel/wait.c
index dec68bd4e9d8..d550920e040c 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -363,8 +363,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
/**
* wake_up_atomic_t - Wake up a waiter on a atomic_t
- * @word: The word being waited on, a kernel virtual address
- * @bit: The bit of the word being waited on
+ * @p: The atomic_t being waited on, a kernel virtual address
*
* Wake up anyone waiting for the atomic_t to go to zero.
*
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0b72e816b8d0..7f5d4be22034 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2817,6 +2817,19 @@ already_gone:
return false;
}
+static bool __flush_work(struct work_struct *work)
+{
+ struct wq_barrier barr;
+
+ if (start_flush_work(work, &barr)) {
+ wait_for_completion(&barr.done);
+ destroy_work_on_stack(&barr.work);
+ return true;
+ } else {
+ return false;
+ }
+}
+
/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
@@ -2830,18 +2843,10 @@ already_gone:
*/
bool flush_work(struct work_struct *work)
{
- struct wq_barrier barr;
-
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
- if (start_flush_work(work, &barr)) {
- wait_for_completion(&barr.done);
- destroy_work_on_stack(&barr.work);
- return true;
- } else {
- return false;
- }
+ return __flush_work(work);
}
EXPORT_SYMBOL_GPL(flush_work);
@@ -3411,6 +3416,12 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
{
to->nice = from->nice;
cpumask_copy(to->cpumask, from->cpumask);
+ /*
+ * Unlike hash and equality test, this function doesn't ignore
+ * ->no_numa as it is used for both pool and wq attrs. Instead,
+ * get_unbound_pool() explicitly clears ->no_numa after copying.
+ */
+ to->no_numa = from->no_numa;
}
/* hash value of the content of @attr */
@@ -3578,6 +3589,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
copy_workqueue_attrs(pool->attrs, attrs);
+ /*
+ * no_numa isn't a worker_pool attribute, always clear it. See
+ * 'struct workqueue_attrs' comments for detail.
+ */
+ pool->attrs->no_numa = false;
+
/* if cpumask is contained inside a NUMA node, we belong to that node */
if (wq_numa_enabled) {
for_each_node(node) {
@@ -4756,7 +4773,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
schedule_work_on(cpu, &wfc.work);
- flush_work(&wfc.work);
+
+ /*
+ * The work item is on-stack and can't lead to deadlock through
+ * flushing. Use __flush_work() to avoid spurious lockdep warnings
+ * when work_on_cpu()s are nested.
+ */
+ __flush_work(&wfc.work);
+
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index fd94058bd7f9..28321d8f75ef 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -437,7 +437,7 @@ int lz4_compress(const unsigned char *src, size_t src_len,
exit:
return ret;
}
-EXPORT_SYMBOL_GPL(lz4_compress);
+EXPORT_SYMBOL(lz4_compress);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 compressor");
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index d3414eae73a1..411be80ddb46 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -299,7 +299,7 @@ exit_0:
return ret;
}
#ifndef STATIC
-EXPORT_SYMBOL_GPL(lz4_decompress);
+EXPORT_SYMBOL(lz4_decompress);
#endif
int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
@@ -319,8 +319,8 @@ exit_0:
return ret;
}
#ifndef STATIC
-EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize);
+EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 Decompressor");
#endif
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index eb1a74f5e368..f344f76b6559 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -533,7 +533,7 @@ int lz4hc_compress(const unsigned char *src, size_t src_len,
exit:
return ret;
}
-EXPORT_SYMBOL_GPL(lz4hc_compress);
+EXPORT_SYMBOL(lz4hc_compress);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4HC compressor");
diff --git a/mm/fremap.c b/mm/fremap.c
index 87da3590c61e..5bff08147768 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long pgoff, pgprot_t prot)
{
int err = -ENOMEM;
- pte_t *pte;
+ pte_t *pte, ptfile;
spinlock_t *ptl;
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
- if (!pte_none(*pte))
+ ptfile = pgoff_to_pte(pgoff);
+
+ if (!pte_none(*pte)) {
+ if (pte_present(*pte) && pte_soft_dirty(*pte))
+ pte_file_mksoft_dirty(ptfile);
zap_pte(mm, vma, addr, pte);
+ }
- set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
+ set_pte_at(mm, addr, pte, ptfile);
/*
* We don't need to run update_mmu_cache() here because the "file pte"
* being installed by install_file_pte() is not a real pte - it's a
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 243e710c6039..a92012a71702 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1620,7 +1620,9 @@ static void __split_huge_page_refcount(struct page *page,
((1L << PG_referenced) |
(1L << PG_swapbacked) |
(1L << PG_mlocked) |
- (1L << PG_uptodate)));
+ (1L << PG_uptodate) |
+ (1L << PG_active) |
+ (1L << PG_unevictable)));
page_tail->flags |= (1L << PG_dirty);
/* clear PageTail before overwriting first_page */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 83aff0a4d093..b60f33080a28 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
mm = vma->vm_mm;
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, start, end);
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
tlb_finish_mmu(&tlb, start, end);
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 00a7a664b9c1..0878ff7c26a9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3195,11 +3195,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
if (!s->memcg_params)
return -ENOMEM;
- INIT_WORK(&s->memcg_params->destroy,
- kmem_cache_destroy_work_func);
if (memcg) {
s->memcg_params->memcg = memcg;
s->memcg_params->root_cache = root_cache;
+ INIT_WORK(&s->memcg_params->destroy,
+ kmem_cache_destroy_work_func);
} else
s->memcg_params->is_root_cache = true;
@@ -6335,6 +6335,7 @@ static void mem_cgroup_css_offline(struct cgroup *cont)
mem_cgroup_invalidate_reclaim_iterators(memcg);
mem_cgroup_reparent_charges(memcg);
mem_cgroup_destroy_all_caches(memcg);
+ vmpressure_cleanup(&memcg->vmpressure);
}
static void mem_cgroup_css_free(struct cgroup *cont)
@@ -6968,7 +6969,6 @@ struct cgroup_subsys mem_cgroup_subsys = {
#ifdef CONFIG_MEMCG_SWAP
static int __init enable_swap_account(char *s)
{
- /* consider enabled if no parameter or 1 is given */
if (!strcmp(s, "1"))
really_do_swap_account = 1;
else if (!strcmp(s, "0"))
diff --git a/mm/memory.c b/mm/memory.c
index 1ce2e2a734fc..af84bc0ec17c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
* tear-down from @mm. The @fullmm argument is used when @mm is without
* users and we're going to destroy the full address space (exit/execve).
*/
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ /* Is it from 0 to ~0? */
+ tlb->fullmm = !(start | (end+1));
tlb->need_flush_all = 0;
- tlb->start = -1UL;
- tlb->end = 0;
+ tlb->start = start;
+ tlb->end = end;
tlb->need_flush = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
@@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
{
struct mmu_gather_batch *batch, *next;
- tlb->start = start;
- tlb->end = end;
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
@@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
spinlock_t *ptl;
pte_t *start_pte;
pte_t *pte;
- unsigned long range_start = addr;
again:
init_rss_vec(rss);
@@ -1141,9 +1139,12 @@ again:
continue;
if (unlikely(details) && details->nonlinear_vma
&& linear_page_index(details->nonlinear_vma,
- addr) != page->index)
- set_pte_at(mm, addr, pte,
- pgoff_to_pte(page->index));
+ addr) != page->index) {
+ pte_t ptfile = pgoff_to_pte(page->index);
+ if (pte_soft_dirty(ptent))
+ pte_file_mksoft_dirty(ptfile);
+ set_pte_at(mm, addr, pte, ptfile);
+ }
if (PageAnon(page))
rss[MM_ANONPAGES]--;
else {
@@ -1202,17 +1203,25 @@ again:
* and page-free while holding it.
*/
if (force_flush) {
+ unsigned long old_end;
+
force_flush = 0;
-#ifdef HAVE_GENERIC_MMU_GATHER
- tlb->start = range_start;
+ /*
+ * Flush the TLB just for the previous segment,
+ * then update the range to be the remaining
+ * TLB range.
+ */
+ old_end = tlb->end;
tlb->end = addr;
-#endif
+
tlb_flush_mmu(tlb);
- if (addr != end) {
- range_start = addr;
+
+ tlb->start = addr;
+ tlb->end = old_end;
+
+ if (addr != end)
goto again;
- }
}
return addr;
@@ -1397,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end = start + size;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, start, end);
for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1423,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
unsigned long end = address + size;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, address, end);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, address, end);
unmap_single_vma(&tlb, vma, address, end, details);
@@ -3115,6 +3124,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
exclusive = 1;
}
flush_icache_page(vma, page);
+ if (pte_swp_soft_dirty(orig_pte))
+ pte = pte_mksoft_dirty(pte);
set_pte_at(mm, address, page_table, pte);
if (page == swapcache)
do_page_add_anon_rmap(page, vma, address, exclusive);
@@ -3408,6 +3419,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
+ pte_mksoft_dirty(entry);
if (anon) {
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 74310017296e..4baf12e534d1 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -732,7 +732,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (prev) {
vma = prev;
next = vma->vm_next;
- continue;
+ if (mpol_equal(vma_policy(vma), new_pol))
+ continue;
+ /* vma_merge() joined vma && vma->next, case 8 */
+ goto replace;
}
if (vma->vm_start != vmstart) {
err = split_vma(vma->vm_mm, vma, vmstart, 1);
@@ -744,6 +747,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (err)
goto out;
}
+ replace:
err = vma_replace_policy(vma, new_pol);
if (err)
goto out;
diff --git a/mm/mmap.c b/mm/mmap.c
index fbad7b091090..f9c97d10b873 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -865,7 +865,7 @@ again: remove_next = 1 + (end > next->vm_end);
if (next->anon_vma)
anon_vma_merge(vma, next);
mm->map_count--;
- vma_set_policy(vma, vma_policy(next));
+ mpol_put(vma_policy(next));
kmem_cache_free(vm_area_cachep, next);
/*
* In mprotect's case 6 (see comments on vma_merge),
@@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm,
struct mmu_gather tlb;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end);
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm)
lru_add_drain();
flush_cache_mm(mm);
- tlb_gather_mmu(&tlb, mm, 1);
+ tlb_gather_mmu(&tlb, mm, 0, -1);
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1);
diff --git a/mm/rmap.c b/mm/rmap.c
index cd356df4f71a..b2e29acd7e3d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
swp_entry_to_pte(make_hwpoison_entry(page)));
} else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(page) };
+ pte_t swp_pte;
if (PageSwapCache(page)) {
/*
@@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
entry = make_migration_entry(page, pte_write(pteval));
}
- set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
+ swp_pte = swp_entry_to_pte(entry);
+ if (pte_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ set_pte_at(mm, address, pte, swp_pte);
BUG_ON(pte_file(*pte));
} else if (IS_ENABLED(CONFIG_MIGRATION) &&
(TTU_ACTION(flags) == TTU_MIGRATION)) {
@@ -1401,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
pteval = ptep_clear_flush(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */
- if (page->index != linear_page_index(vma, address))
- set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
+ if (page->index != linear_page_index(vma, address)) {
+ pte_t ptfile = pgoff_to_pte(page->index);
+ if (pte_soft_dirty(pteval))
+ pte_file_mksoft_dirty(ptfile);
+ set_pte_at(mm, address, pte, ptfile);
+ }
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
diff --git a/mm/shmem.c b/mm/shmem.c
index a87990cf9f94..e43dc555069d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1798,7 +1798,8 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
}
}
- offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
+ if (offset >= 0)
+ offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
mutex_unlock(&inode->i_mutex);
return offset;
}
@@ -2908,14 +2909,8 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
/* common code */
-static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen)
-{
- return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
- dentry->d_name.name);
-}
-
static struct dentry_operations anon_ops = {
- .d_dname = shmem_dname
+ .d_dname = simple_dname
};
/**
diff --git a/mm/slub.c b/mm/slub.c
index 2b02d666bf63..e3ba1f2cf60c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1968,9 +1968,6 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
int pages;
int pobjects;
- if (!s->cpu_partial)
- return;
-
do {
pages = 0;
pobjects = 0;
diff --git a/mm/swap.c b/mm/swap.c
index 4a1d0d2c52fa..62b78a6e224f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -512,12 +512,7 @@ EXPORT_SYMBOL(__lru_cache_add);
*/
void lru_cache_add(struct page *page)
{
- if (PageActive(page)) {
- VM_BUG_ON(PageUnevictable(page));
- } else if (PageUnevictable(page)) {
- VM_BUG_ON(PageActive(page));
- }
-
+ VM_BUG_ON(PageActive(page) && PageUnevictable(page));
VM_BUG_ON(PageLRU(page));
__lru_cache_add(page);
}
@@ -539,6 +534,7 @@ void add_page_to_unevictable_list(struct page *page)
spin_lock_irq(&zone->lru_lock);
lruvec = mem_cgroup_page_lruvec(page, zone);
+ ClearPageActive(page);
SetPageUnevictable(page);
SetPageLRU(page);
add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
@@ -774,8 +770,6 @@ EXPORT_SYMBOL(__pagevec_release);
void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *list)
{
- int uninitialized_var(active);
- enum lru_list lru;
const int file = 0;
VM_BUG_ON(!PageHead(page));
@@ -787,20 +781,6 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
if (!list)
SetPageLRU(page_tail);
- if (page_evictable(page_tail)) {
- if (PageActive(page)) {
- SetPageActive(page_tail);
- active = 1;
- lru = LRU_ACTIVE_ANON;
- } else {
- active = 0;
- lru = LRU_INACTIVE_ANON;
- }
- } else {
- SetPageUnevictable(page_tail);
- lru = LRU_UNEVICTABLE;
- }
-
if (likely(PageLRU(page)))
list_add_tail(&page_tail->lru, &page->lru);
else if (list) {
@@ -816,13 +796,13 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
* Use the standard add function to put page_tail on the list,
* but then correct its position so they all end up in order.
*/
- add_page_to_lru_list(page_tail, lruvec, lru);
+ add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
list_head = page_tail->lru.prev;
list_move_tail(&page_tail->lru, list_head);
}
if (!PageUnevictable(page))
- update_page_reclaim_stat(lruvec, file, active);
+ update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -833,7 +813,6 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
int active = PageActive(page);
enum lru_list lru = page_lru(page);
- VM_BUG_ON(PageUnevictable(page));
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 36af6eeaa67e..6cf2e60983b7 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free)
}
#endif /* CONFIG_HIBERNATION */
+static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
+{
+#ifdef CONFIG_MEM_SOFT_DIRTY
+ /*
+ * When pte keeps soft dirty bit the pte generated
+ * from swap entry does not has it, still it's same
+ * pte from logical point of view.
+ */
+ pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
+ return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
+#else
+ return pte_same(pte, swp_pte);
+#endif
+}
+
/*
* No need to decide whether this PTE shares the swap entry with others,
* just let do_wp_page work it out if a write is requested later - to
@@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
}
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
+ if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
mem_cgroup_cancel_charge_swapin(memcg);
ret = 0;
goto out;
@@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
* swapoff spends a _lot_ of time in this loop!
* Test inline before going to call unuse_pte.
*/
- if (unlikely(pte_same(*pte, swp_pte))) {
+ if (unlikely(maybe_same_pte(*pte, swp_pte))) {
pte_unmap(pte);
ret = unuse_pte(vma, pmd, addr, entry, page);
if (ret)
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 736a6011c2c8..0c1e37d829fa 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -180,12 +180,12 @@ static void vmpressure_work_fn(struct work_struct *work)
if (!vmpr->scanned)
return;
- mutex_lock(&vmpr->sr_lock);
+ spin_lock(&vmpr->sr_lock);
scanned = vmpr->scanned;
reclaimed = vmpr->reclaimed;
vmpr->scanned = 0;
vmpr->reclaimed = 0;
- mutex_unlock(&vmpr->sr_lock);
+ spin_unlock(&vmpr->sr_lock);
do {
if (vmpressure_event(vmpr, scanned, reclaimed))
@@ -240,13 +240,13 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
if (!scanned)
return;
- mutex_lock(&vmpr->sr_lock);
+ spin_lock(&vmpr->sr_lock);
vmpr->scanned += scanned;
vmpr->reclaimed += reclaimed;
scanned = vmpr->scanned;
- mutex_unlock(&vmpr->sr_lock);
+ spin_unlock(&vmpr->sr_lock);
- if (scanned < vmpressure_win || work_pending(&vmpr->work))
+ if (scanned < vmpressure_win)
return;
schedule_work(&vmpr->work);
}
@@ -367,8 +367,24 @@ void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
*/
void vmpressure_init(struct vmpressure *vmpr)
{
- mutex_init(&vmpr->sr_lock);
+ spin_lock_init(&vmpr->sr_lock);
mutex_init(&vmpr->events_lock);
INIT_LIST_HEAD(&vmpr->events);
INIT_WORK(&vmpr->work, vmpressure_work_fn);
}
+
+/**
+ * vmpressure_cleanup() - shuts down vmpressure control structure
+ * @vmpr: Structure to be cleaned up
+ *
+ * This function should be called before the structure in which it is
+ * embedded is cleaned up.
+ */
+void vmpressure_cleanup(struct vmpressure *vmpr)
+{
+ /*
+ * Make sure there is no pending work before eventfd infrastructure
+ * goes away.
+ */
+ flush_work(&vmpr->work);
+}
diff --git a/mm/zbud.c b/mm/zbud.c
index 9bb4710e3589..ad1e781284fd 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -257,7 +257,7 @@ int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp,
if (size <= 0 || gfp & __GFP_HIGHMEM)
return -EINVAL;
- if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED)
+ if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
return -ENOSPC;
chunks = size_to_chunks(size);
spin_lock(&pool->lock);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4a78c4de9f20..6ee48aac776f 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
- return vlan_dev_priv(dev)->real_dev;
+ struct net_device *ret = vlan_dev_priv(dev)->real_dev;
+
+ while (is_vlan_dev(ret))
+ ret = vlan_dev_priv(ret)->real_dev;
+
+ return ret;
}
EXPORT_SYMBOL(vlan_dev_real_dev);
diff --git a/net/Kconfig b/net/Kconfig
index 37702491abe9..2b406608a1a4 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -244,7 +244,7 @@ config NETPRIO_CGROUP
Cgroup subsystem for use in assigning processes to network priorities on
a per-interface basis
-config NET_LL_RX_POLL
+config NET_RX_BUSY_POLL
boolean
default y
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index e14531f1ce1c..264de88db320 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1529,6 +1529,8 @@ out:
* in these cases, the skb is further handled by this function and
* returns 1, otherwise it returns 0 and the caller shall further
* process the skb.
+ *
+ * This call might reallocate skb data.
*/
int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid)
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index f105219f4a4b..7614af31daff 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -508,6 +508,7 @@ out:
return 0;
}
+/* this call might reallocate skb data */
static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
{
int ret = false;
@@ -568,6 +569,7 @@ out:
return ret;
}
+/* this call might reallocate skb data */
bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
{
struct ethhdr *ethhdr;
@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
return false;
+
+ /* skb->data might have been reallocated by pskb_may_pull() */
+ ethhdr = (struct ethhdr *)skb->data;
+ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
+ ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
+
udphdr = (struct udphdr *)(skb->data + *header_len);
*header_len += sizeof(*udphdr);
@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
return true;
}
+/* this call might reallocate skb data */
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
- struct sk_buff *skb, struct ethhdr *ethhdr)
+ struct sk_buff *skb)
{
struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
struct batadv_orig_node *orig_dst_node = NULL;
struct batadv_gw_node *curr_gw = NULL;
+ struct ethhdr *ethhdr;
bool ret, out_of_range = false;
unsigned int header_len = 0;
uint8_t curr_tq_avg;
@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
if (!ret)
goto out;
+ ethhdr = (struct ethhdr *)skb->data;
orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
ethhdr->h_dest);
if (!orig_dst_node)
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 039902dca4a6..1037d75da51f 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
void batadv_gw_node_purge(struct batadv_priv *bat_priv);
int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
-bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
- struct sk_buff *skb, struct ethhdr *ethhdr);
+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 700d0b49742d..0f04e1c302b4 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
if (batadv_bla_tx(bat_priv, skb, vid))
goto dropped;
+ /* skb->data might have been reallocated by batadv_bla_tx() */
+ ethhdr = (struct ethhdr *)skb->data;
+
/* Register the client MAC in the transtable */
if (!is_multicast_ether_addr(ethhdr->h_source))
batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
default:
break;
}
+
+ /* reminder: ethhdr might have become unusable from here on
+ * (batadv_gw_is_dhcp_target() might have reallocated skb data)
+ */
}
/* ethernet packet should be broadcasted */
@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
/* unicast packet */
} else {
if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
- ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
+ ret = batadv_gw_out_of_range(bat_priv, skb);
if (ret)
goto dropped;
}
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index dc8b5d4dd636..857e1b8349ee 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
* @skb: the skb containing the payload to encapsulate
* @orig_node: the destination node
*
- * Returns false if the payload could not be encapsulated or true otherwise
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ *
+ * This call might reallocate skb data.
*/
static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
struct batadv_orig_node *orig_node)
@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
* @orig_node: the destination node
* @packet_subtype: the batman 4addr packet subtype to use
*
- * Returns false if the payload could not be encapsulated or true otherwise
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ *
+ * This call might reallocate skb data.
*/
bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
struct sk_buff *skb,
@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
struct batadv_neigh_node *neigh_node;
int data_len = skb->len;
int ret = NET_RX_DROP;
- unsigned int dev_mtu;
+ unsigned int dev_mtu, header_len;
/* get routing information */
if (is_multicast_ether_addr(ethhdr->h_dest)) {
@@ -428,11 +432,17 @@ find_router:
switch (packet_type) {
case BATADV_UNICAST:
- batadv_unicast_prepare_skb(skb, orig_node);
+ if (!batadv_unicast_prepare_skb(skb, orig_node))
+ goto out;
+
+ header_len = sizeof(struct batadv_unicast_packet);
break;
case BATADV_UNICAST_4ADDR:
- batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
- packet_subtype);
+ if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
+ packet_subtype))
+ goto out;
+
+ header_len = sizeof(struct batadv_unicast_4addr_packet);
break;
default:
/* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -441,6 +451,7 @@ find_router:
goto out;
}
+ ethhdr = (struct ethhdr *)(skb->data + header_len);
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* inform the destination node that we are still missing a correct route
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index e3a349977595..cc27297da5a9 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -513,7 +513,10 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
hci_setup_event_mask(req);
- if (hdev->hci_ver > BLUETOOTH_VER_1_1)
+ /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
+ * local supported commands HCI command.
+ */
+ if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
if (lmp_ssp_capable(hdev)) {
@@ -2165,10 +2168,6 @@ int hci_register_dev(struct hci_dev *hdev)
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- write_lock(&hci_dev_list_lock);
- list_add(&hdev->list, &hci_dev_list);
- write_unlock(&hci_dev_list_lock);
-
hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
WQ_MEM_RECLAIM, 1, hdev->name);
if (!hdev->workqueue) {
@@ -2203,6 +2202,10 @@ int hci_register_dev(struct hci_dev *hdev)
if (hdev->dev_type != HCI_AMP)
set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+ write_lock(&hci_dev_list_lock);
+ list_add(&hdev->list, &hci_dev_list);
+ write_unlock(&hci_dev_list_lock);
+
hci_notify(hdev, HCI_DEV_REG);
hci_dev_hold(hdev);
@@ -2215,9 +2218,6 @@ err_wqueue:
destroy_workqueue(hdev->req_workqueue);
err:
ida_simple_remove(&hci_index_ida, hdev->id);
- write_lock(&hci_dev_list_lock);
- list_del(&hdev->list);
- write_unlock(&hci_dev_list_lock);
return error;
}
@@ -3399,8 +3399,16 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
*/
if (hdev->sent_cmd) {
req_complete = bt_cb(hdev->sent_cmd)->req.complete;
- if (req_complete)
+
+ if (req_complete) {
+ /* We must set the complete callback to NULL to
+ * avoid calling the callback more than once if
+ * this function gets called again.
+ */
+ bt_cb(hdev->sent_cmd)->req.complete = NULL;
+
goto call_complete;
+ }
}
/* Remove all pending commands belonging to this request */
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 2ef66781fedb..69363bd37f64 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -70,7 +70,8 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
}
mdst = br_mdb_get(br, skb, vid);
- if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
+ if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
+ br_multicast_querier_exists(br))
br_multicast_deliver(mdst, skb);
else
br_flood_deliver(br, skb, false);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 60aca9109a50..ffd5874f2592 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -161,7 +161,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
if (!pv)
return;
- for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
f = __br_fdb_get(br, br->dev->dev_addr, vid);
if (f && f->is_local && !f->dst)
fdb_delete(br, f);
@@ -730,7 +730,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/* VID was specified, so use it. */
err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
} else {
- if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+ if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
goto out;
}
@@ -739,7 +739,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
* specify a VLAN. To be nice, add/update entry for every
* vlan on this port.
*/
- for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
if (err)
goto out;
@@ -817,7 +817,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
err = __br_fdb_delete(p, addr, vid);
} else {
- if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+ if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
err = __br_fdb_delete(p, addr, 0);
goto out;
}
@@ -827,7 +827,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
* vlan on this port.
*/
err = -ENOENT;
- for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
err &= __br_fdb_delete(p, addr, vid);
}
}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 1b8b8b824cd7..8c561c0aa636 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -101,7 +101,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
unicast = false;
} else if (is_multicast_ether_addr(dest)) {
mdst = br_mdb_get(br, skb, vid);
- if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
+ if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
+ br_multicast_querier_exists(br)) {
if ((mdst && mdst->mglist) ||
br_multicast_is_router(br))
skb2 = skb;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 69af490cce44..08e576ada0b2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -619,6 +619,9 @@ rehash:
mp->br = br;
mp->addr = *group;
+ setup_timer(&mp->timer, br_multicast_group_expired,
+ (unsigned long)mp);
+
hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
mdb->size++;
@@ -1011,6 +1014,16 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
}
#endif
+static void br_multicast_update_querier_timer(struct net_bridge *br,
+ unsigned long max_delay)
+{
+ if (!timer_pending(&br->multicast_querier_timer))
+ br->multicast_querier_delay_time = jiffies + max_delay;
+
+ mod_timer(&br->multicast_querier_timer,
+ jiffies + br->multicast_querier_interval);
+}
+
/*
* Add port to router_list
* list is maintained ordered by pointer value
@@ -1061,11 +1074,11 @@ timer:
static void br_multicast_query_received(struct net_bridge *br,
struct net_bridge_port *port,
- int saddr)
+ int saddr,
+ unsigned long max_delay)
{
if (saddr)
- mod_timer(&br->multicast_querier_timer,
- jiffies + br->multicast_querier_interval);
+ br_multicast_update_querier_timer(br, max_delay);
else if (timer_pending(&br->multicast_querier_timer))
return;
@@ -1093,8 +1106,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
- br_multicast_query_received(br, port, !!iph->saddr);
-
group = ih->group;
if (skb->len == sizeof(*ih)) {
@@ -1118,6 +1129,8 @@ static int br_ip4_multicast_query(struct net_bridge *br,
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
}
+ br_multicast_query_received(br, port, !!iph->saddr, max_delay);
+
if (!group)
goto out;
@@ -1126,7 +1139,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
if (!mp)
goto out;
- setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp);
mod_timer(&mp->timer, now + br->multicast_membership_interval);
mp->timer_armed = true;
@@ -1174,8 +1186,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
- br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
-
if (skb->len == sizeof(*mld)) {
if (!pskb_may_pull(skb, sizeof(*mld))) {
err = -EINVAL;
@@ -1185,7 +1195,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
if (max_delay)
group = &mld->mld_mca;
- } else if (skb->len >= sizeof(*mld2q)) {
+ } else {
if (!pskb_may_pull(skb, sizeof(*mld2q))) {
err = -EINVAL;
goto out;
@@ -1196,6 +1206,9 @@ static int br_ip6_multicast_query(struct net_bridge *br,
max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1;
}
+ br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr),
+ max_delay);
+
if (!group)
goto out;
@@ -1204,7 +1217,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
if (!mp)
goto out;
- setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp);
mod_timer(&mp->timer, now + br->multicast_membership_interval);
mp->timer_armed = true;
@@ -1642,6 +1654,8 @@ void br_multicast_init(struct net_bridge *br)
br->multicast_querier_interval = 255 * HZ;
br->multicast_membership_interval = 260 * HZ;
+ br->multicast_querier_delay_time = 0;
+
spin_lock_init(&br->multicast_lock);
setup_timer(&br->multicast_router_timer,
br_multicast_local_router_expired, 0);
@@ -1830,6 +1844,8 @@ unlock:
int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
{
+ unsigned long max_delay;
+
val = !!val;
spin_lock_bh(&br->multicast_lock);
@@ -1837,8 +1853,14 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
goto unlock;
br->multicast_querier = val;
- if (val)
- br_multicast_start_querier(br);
+ if (!val)
+ goto unlock;
+
+ max_delay = br->multicast_query_response_interval;
+ if (!timer_pending(&br->multicast_querier_timer))
+ br->multicast_querier_delay_time = jiffies + max_delay;
+
+ br_multicast_start_querier(br);
unlock:
spin_unlock_bh(&br->multicast_lock);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 1fc30abd3a52..b9259efa636e 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -132,7 +132,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
else
pv = br_get_vlan_info(br);
- if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN))
+ if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
goto done;
af = nla_nest_start(skb, IFLA_AF_SPEC);
@@ -140,7 +140,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
goto nla_put_failure;
pvid = br_get_pvid(pv);
- for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
vinfo.vid = vid;
vinfo.flags = 0;
if (vid == pvid)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 3be89b3ce17b..2f7da41851bf 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -267,6 +267,7 @@ struct net_bridge
unsigned long multicast_query_interval;
unsigned long multicast_query_response_interval;
unsigned long multicast_startup_query_interval;
+ unsigned long multicast_querier_delay_time;
spinlock_t multicast_lock;
struct net_bridge_mdb_htable __rcu *mdb;
@@ -501,6 +502,13 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
(br->multicast_router == 1 &&
timer_pending(&br->multicast_router_timer));
}
+
+static inline bool br_multicast_querier_exists(struct net_bridge *br)
+{
+ return time_is_before_jiffies(br->multicast_querier_delay_time) &&
+ (br->multicast_querier ||
+ timer_pending(&br->multicast_querier_timer));
+}
#else
static inline int br_multicast_rcv(struct net_bridge *br,
struct net_bridge_port *port,
@@ -557,6 +565,10 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
{
return 0;
}
+static inline bool br_multicast_querier_exists(struct net_bridge *br)
+{
+ return false;
+}
static inline void br_mdb_init(void)
{
}
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 394bb96b6087..3b9637fb7939 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -1,5 +1,5 @@
/*
- * Sysfs attributes of bridge ports
+ * Sysfs attributes of bridge
* Linux ethernet bridge
*
* Authors:
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index bd58b45f5f90..9a9ffe7e4019 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -108,7 +108,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
clear_bit(vid, v->vlan_bitmap);
v->num_vlans--;
- if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+ if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
if (v->port_idx)
rcu_assign_pointer(v->parent.port->vlan_info, NULL);
else
@@ -122,7 +122,7 @@ static void __vlan_flush(struct net_port_vlans *v)
{
smp_wmb();
v->pvid = 0;
- bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN);
+ bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
if (v->port_idx)
rcu_assign_pointer(v->parent.port->vlan_info, NULL);
else
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 00ee068efc1c..b84a1b155bc1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -65,6 +65,7 @@ ipv6:
nhoff += sizeof(struct ipv6hdr);
break;
}
+ case __constant_htons(ETH_P_8021AD):
case __constant_htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan;
struct vlan_hdr _vlan;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index b7de821f98df..60533db8b72d 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
atomic_set(&p->refcnt, 1);
p->reachable_time =
neigh_rand_reach_time(p->base_reachable_time);
+ dev_hold(dev);
+ p->dev = dev;
+ write_pnet(&p->net, hold_net(net));
+ p->sysctl_table = NULL;
if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
+ release_net(net);
+ dev_put(dev);
kfree(p);
return NULL;
}
- dev_hold(dev);
- p->dev = dev;
- write_pnet(&p->net, hold_net(net));
- p->sysctl_table = NULL;
write_lock_bh(&tbl->lock);
p->next = tbl->parms.next;
tbl->parms.next = p;
@@ -2767,6 +2769,7 @@ EXPORT_SYMBOL(neigh_app_ns);
#ifdef CONFIG_SYSCTL
static int zero;
+static int int_max = INT_MAX;
static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
static int proc_unres_qlen(struct ctl_table *ctl, int write,
@@ -2819,19 +2822,25 @@ static struct neigh_sysctl_table {
.procname = "mcast_solicit",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
[NEIGH_VAR_UCAST_PROBE] = {
.procname = "ucast_solicit",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
[NEIGH_VAR_APP_PROBE] = {
.procname = "app_solicit",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
[NEIGH_VAR_RETRANS_TIME] = {
.procname = "retrans_time",
@@ -2874,7 +2883,9 @@ static struct neigh_sysctl_table {
.procname = "proxy_qlen",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
[NEIGH_VAR_ANYCAST_DELAY] = {
.procname = "anycast_delay",
@@ -2916,19 +2927,25 @@ static struct neigh_sysctl_table {
.procname = "gc_thresh1",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
[NEIGH_VAR_GC_THRESH2] = {
.procname = "gc_thresh2",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
[NEIGH_VAR_GC_THRESH3] = {
.procname = "gc_thresh3",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
{},
},
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 3de740834d1f..ca198c1d1d30 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2156,7 +2156,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
/* If aging addresses are supported device will need to
* implement its own handler for this.
*/
- if (ndm->ndm_state & NUD_PERMANENT) {
+ if (!(ndm->ndm_state & NUD_PERMANENT)) {
pr_info("%s: FDB only supports static addresses\n", dev->name);
return -EINVAL;
}
@@ -2384,7 +2384,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
struct nlattr *extfilt;
u32 filter_mask = 0;
- extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
+ extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
IFLA_EXT_MASK);
if (extfilt)
filter_mask = nla_get_u32(extfilt);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 20e02d2605ec..2c3d0f53d198 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -309,7 +309,8 @@ EXPORT_SYMBOL(__alloc_skb);
* @frag_size: size of fragment, or 0 if head was kmalloced
*
* Allocate a new &sk_buff. Caller provides space holding head and
- * skb_shared_info. @data must have been allocated by kmalloc()
+ * skb_shared_info. @data must have been allocated by kmalloc() only if
+ * @frag_size is 0, otherwise data should come from the page allocator.
* The return is the new skb buffer.
* On a failure the return is %NULL, and @data is not freed.
* Notes :
@@ -739,7 +740,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
skb_copy_secmark(new, old);
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
new->napi_id = old->napi_id;
#endif
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 548d716c5f62..2c097c5a35dd 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -900,7 +900,7 @@ set_rcvbuf:
sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
break;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
case SO_BUSY_POLL:
/* allow unprivileged users to decrease the value */
if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
@@ -1170,7 +1170,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
break;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
case SO_BUSY_POLL:
v.val = sk->sk_ll_usec;
break;
@@ -2292,7 +2292,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_stamp = ktime_set(-1L, 0);
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
sk->sk_napi_id = 0;
sk->sk_ll_usec = sysctl_net_busy_read;
#endif
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 660968616637..31107abd2783 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -21,7 +21,9 @@
#include <net/net_ratelimit.h>
#include <net/busy_poll.h>
+static int zero = 0;
static int one = 1;
+static int ushort_max = USHRT_MAX;
#ifdef CONFIG_RPS
static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
@@ -298,7 +300,7 @@ static struct ctl_table net_core_table[] = {
.proc_handler = flow_limit_table_len_sysctl
},
#endif /* CONFIG_NET_FLOW_LIMIT */
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
{
.procname = "busy_poll",
.data = &sysctl_net_busy_poll,
@@ -339,7 +341,9 @@ static struct ctl_table netns_core_table[] = {
.data = &init_net.core.sysctl_somaxconn,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .extra1 = &zero,
+ .extra2 = &ushort_max,
+ .proc_handler = proc_dointvec_minmax
},
{ }
};
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 8d48c392adcc..34ca6d5a3a4b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -772,7 +772,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
ci = nla_data(tb[IFA_CACHEINFO]);
if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
err = -EINVAL;
- goto errout;
+ goto errout_free;
}
*pvalid_lft = ci->ifa_valid;
*pprefered_lft = ci->ifa_prefered;
@@ -780,6 +780,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
return ifa;
+errout_free:
+ inet_free_ifa(ifa);
errout:
return ERR_PTR(err);
}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index ab3d814bc80a..109ee89f123e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
}
return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
- net_adj) & ~(align - 1)) + (net_adj - 2);
+ net_adj) & ~(align - 1)) + net_adj - 2;
}
static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 49616fed9340..3df6d3edb2a1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -71,7 +71,6 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/prefetch.h>
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/ip.h>
@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
if (!c)
continue;
- if (IS_LEAF(c)) {
- prefetch(rcu_dereference_rtnl(p->child[idx]));
+ if (IS_LEAF(c))
return (struct leaf *) c;
- }
/* Rescan start scanning in new node */
p = (struct tnode *) c;
@@ -2133,7 +2130,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
max--;
pointers = 0;
- for (i = 1; i <= max; i++)
+ for (i = 1; i < max; i++)
if (stat->nodesizes[i] != 0) {
seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
pointers += (1<<i) * stat->nodesizes[i];
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 1f6eab66f7ce..8d6939eeb492 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
if (daddr)
memcpy(&iph->daddr, daddr, 4);
if (iph->daddr)
- return t->hlen;
+ return t->hlen + sizeof(*iph);
return -(t->hlen + sizeof(*iph));
}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 7167b08977df..850525b34899 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt,
iph->daddr = dst;
iph->saddr = src;
iph->ttl = ttl;
- tunnel_ip_select_ident(skb,
- (const struct iphdr *)skb_inner_network_header(skb),
- &rt->dst);
+ __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
err = ip_local_out(skb);
if (unlikely(net_xmit_eval(err)))
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 6577a1149a47..463bd1273346 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -273,7 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
- SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS),
+ SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index b2c123c44d69..610e324348d1 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -36,6 +36,8 @@ static int tcp_adv_win_scale_min = -31;
static int tcp_adv_win_scale_max = 31;
static int ip_ttl_min = 1;
static int ip_ttl_max = 255;
+static int tcp_syn_retries_min = 1;
+static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
@@ -332,7 +334,9 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_syn_retries,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &tcp_syn_retries_min,
+ .extra2 = &tcp_syn_retries_max
},
{
.procname = "tcp_synack_retries",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5423223e93c2..b2f6c74861af 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1121,6 +1121,13 @@ new_segment:
goto wait_for_memory;
/*
+ * All packets are restored as if they have
+ * already been sent.
+ */
+ if (tp->repair)
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
+
+ /*
* Check whether we can use HW checksum.
*/
if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index a9077f441cb2..b6ae92a51f58 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -206,8 +206,8 @@ static u32 cubic_root(u64 a)
*/
static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
{
- u64 offs;
- u32 delta, t, bic_target, max_cnt;
+ u32 delta, bic_target, max_cnt;
+ u64 offs, t;
ca->ack_cnt++; /* count the number of ACKs */
@@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
* if the cwnd < 1 million packets !!!
*/
+ t = (s32)(tcp_time_stamp - ca->epoch_start);
+ t += msecs_to_jiffies(ca->delay_min >> 3);
/* change the unit from HZ to bictcp_HZ */
- t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
- - ca->epoch_start) << BICTCP_HZ) / HZ;
+ t <<= BICTCP_HZ;
+ do_div(t, HZ);
if (t < ca->bic_K) /* t - K */
offs = ca->bic_K - t;
@@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
return;
/* Discard delay samples right after fast recovery */
- if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+ if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
return;
delay = (rtt_us << 3) / USEC_PER_MSEC;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cfdcf7b2daf6..498ea99194af 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -813,8 +813,9 @@ static u32 inet6_addr_hash(const struct in6_addr *addr)
/* On success it returns ifp with increased reference count */
static struct inet6_ifaddr *
-ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
- int scope, u32 flags)
+ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
+ const struct in6_addr *peer_addr, int pfxlen,
+ int scope, u32 flags, u32 valid_lft, u32 prefered_lft)
{
struct inet6_ifaddr *ifa = NULL;
struct rt6_info *rt;
@@ -863,6 +864,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
}
ifa->addr = *addr;
+ if (peer_addr)
+ ifa->peer_addr = *peer_addr;
spin_lock_init(&ifa->lock);
spin_lock_init(&ifa->state_lock);
@@ -872,6 +875,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
ifa->scope = scope;
ifa->prefix_len = pfxlen;
ifa->flags = flags | IFA_F_TENTATIVE;
+ ifa->valid_lft = valid_lft;
+ ifa->prefered_lft = prefered_lft;
ifa->cstamp = ifa->tstamp = jiffies;
ifa->tokenized = false;
@@ -1121,11 +1126,10 @@ retry:
if (ifp->flags & IFA_F_OPTIMISTIC)
addr_flags |= IFA_F_OPTIMISTIC;
- ift = !max_addresses ||
- ipv6_count_addresses(idev) < max_addresses ?
- ipv6_add_addr(idev, &addr, tmp_plen, ipv6_addr_scope(&addr),
- addr_flags) : NULL;
- if (IS_ERR_OR_NULL(ift)) {
+ ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
+ ipv6_addr_scope(&addr), addr_flags,
+ tmp_valid_lft, tmp_prefered_lft);
+ if (IS_ERR(ift)) {
in6_ifa_put(ifp);
in6_dev_put(idev);
pr_info("%s: retry temporary address regeneration\n", __func__);
@@ -1136,8 +1140,6 @@ retry:
spin_lock_bh(&ift->lock);
ift->ifpub = ifp;
- ift->valid_lft = tmp_valid_lft;
- ift->prefered_lft = tmp_prefered_lft;
ift->cstamp = now;
ift->tstamp = tmp_tstamp;
spin_unlock_bh(&ift->lock);
@@ -2179,16 +2181,19 @@ ok:
*/
if (!max_addresses ||
ipv6_count_addresses(in6_dev) < max_addresses)
- ifp = ipv6_add_addr(in6_dev, &addr, pinfo->prefix_len,
+ ifp = ipv6_add_addr(in6_dev, &addr, NULL,
+ pinfo->prefix_len,
addr_type&IPV6_ADDR_SCOPE_MASK,
- addr_flags);
+ addr_flags, valid_lft,
+ prefered_lft);
if (IS_ERR_OR_NULL(ifp)) {
in6_dev_put(in6_dev);
return;
}
- update_lft = create = 1;
+ update_lft = 0;
+ create = 1;
ifp->cstamp = jiffies;
ifp->tokenized = tokenized;
addrconf_dad_start(ifp);
@@ -2209,7 +2214,7 @@ ok:
stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
else
stored_lft = 0;
- if (!update_lft && stored_lft) {
+ if (!update_lft && !create && stored_lft) {
if (valid_lft > MIN_VALID_LIFETIME ||
valid_lft > stored_lft)
update_lft = 1;
@@ -2455,17 +2460,10 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p
prefered_lft = timeout;
}
- ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags);
+ ifp = ipv6_add_addr(idev, pfx, peer_pfx, plen, scope, ifa_flags,
+ valid_lft, prefered_lft);
if (!IS_ERR(ifp)) {
- spin_lock_bh(&ifp->lock);
- ifp->valid_lft = valid_lft;
- ifp->prefered_lft = prefered_lft;
- ifp->tstamp = jiffies;
- if (peer_pfx)
- ifp->peer_addr = *peer_pfx;
- spin_unlock_bh(&ifp->lock);
-
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
expires, flags);
/*
@@ -2557,7 +2555,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
{
struct inet6_ifaddr *ifp;
- ifp = ipv6_add_addr(idev, addr, plen, scope, IFA_F_PERMANENT);
+ ifp = ipv6_add_addr(idev, addr, NULL, plen,
+ scope, IFA_F_PERMANENT, 0, 0);
if (!IS_ERR(ifp)) {
spin_lock_bh(&ifp->lock);
ifp->flags &= ~IFA_F_TENTATIVE;
@@ -2683,7 +2682,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
#endif
- ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags);
+ ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0);
if (!IS_ERR(ifp)) {
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
addrconf_dad_start(ifp);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 40ffd72243a4..aeac0dc3635d 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
net_adj = 0;
return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
- net_adj) & ~(align - 1)) + (net_adj - 2);
+ net_adj) & ~(align - 1)) + net_adj - 2;
}
static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5fc9c7a68d8d..c4ff5bbb45c4 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
#ifdef CONFIG_IPV6_SUBTREES
- if (fn->subtree)
- fn = fib6_lookup_1(fn->subtree, args + 1);
+ if (fn->subtree) {
+ struct fib6_node *sfn;
+ sfn = fib6_lookup_1(fn->subtree,
+ args + 1);
+ if (!sfn)
+ goto backtrack;
+ fn = sfn;
+ }
#endif
- if (!fn || fn->fn_flags & RTN_RTINFO)
+ if (fn->fn_flags & RTN_RTINFO)
return fn;
}
}
-
+#ifdef CONFIG_IPV6_SUBTREES
+backtrack:
+#endif
if (fn->fn_flags & RTN_ROOT)
break;
@@ -1632,27 +1640,28 @@ static int fib6_age(struct rt6_info *rt, void *arg)
static DEFINE_SPINLOCK(fib6_gc_lock);
-void fib6_run_gc(unsigned long expires, struct net *net)
+void fib6_run_gc(unsigned long expires, struct net *net, bool force)
{
- if (expires != ~0UL) {
+ unsigned long now;
+
+ if (force) {
spin_lock_bh(&fib6_gc_lock);
- gc_args.timeout = expires ? (int)expires :
- net->ipv6.sysctl.ip6_rt_gc_interval;
- } else {
- if (!spin_trylock_bh(&fib6_gc_lock)) {
- mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
- return;
- }
- gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval;
+ } else if (!spin_trylock_bh(&fib6_gc_lock)) {
+ mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
+ return;
}
+ gc_args.timeout = expires ? (int)expires :
+ net->ipv6.sysctl.ip6_rt_gc_interval;
gc_args.more = icmp6_dst_gc();
fib6_clean_all(net, fib6_age, 0, NULL);
+ now = jiffies;
+ net->ipv6.ip6_rt_last_gc = now;
if (gc_args.more)
mod_timer(&net->ipv6.ip6_fib_timer,
- round_jiffies(jiffies
+ round_jiffies(now
+ net->ipv6.sysctl.ip6_rt_gc_interval));
else
del_timer(&net->ipv6.ip6_fib_timer);
@@ -1661,7 +1670,7 @@ void fib6_run_gc(unsigned long expires, struct net *net)
static void fib6_gc_timer_cb(unsigned long arg)
{
- fib6_run_gc(0, (struct net *)arg);
+ fib6_run_gc(0, (struct net *)arg, true);
}
static int __net_init fib6_net_init(struct net *net)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 583e8d435f9a..03986d31fa41 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -259,10 +259,12 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
{
struct mr6_table *mrt, *next;
+ rtnl_lock();
list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
list_del(&mrt->list);
ip6mr_free_table(mrt);
}
+ rtnl_unlock();
fib_rules_unregister(net->ipv6.mr6_rules_ops);
}
#else
@@ -289,7 +291,10 @@ static int __net_init ip6mr_rules_init(struct net *net)
static void __net_exit ip6mr_rules_exit(struct net *net)
{
+ rtnl_lock();
ip6mr_free_table(net->ipv6.mrt6);
+ net->ipv6.mrt6 = NULL;
+ rtnl_unlock();
}
#endif
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 24c03396e008..04d31c2fbef1 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1369,8 +1369,10 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
return;
- if (!ndopts.nd_opts_rh)
+ if (!ndopts.nd_opts_rh) {
+ ip6_redirect_no_header(skb, dev_net(skb->dev), 0, 0);
return;
+ }
hdr = (u8 *)ndopts.nd_opts_rh;
hdr += 8;
@@ -1576,7 +1578,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_CHANGEADDR:
neigh_changeaddr(&nd_tbl, dev);
- fib6_run_gc(~0UL, net);
+ fib6_run_gc(0, net, false);
idev = in6_dev_get(dev);
if (!idev)
break;
@@ -1586,7 +1588,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
break;
case NETDEV_DOWN:
neigh_ifdown(&nd_tbl, dev);
- fib6_run_gc(~0UL, net);
+ fib6_run_gc(0, net, false);
break;
case NETDEV_NOTIFY_PEERS:
ndisc_send_unsol_na(dev);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 790d9f4b8b0b..1aeb473b2cc6 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -490,6 +490,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
ipv6_hdr(head)->payload_len = htons(payload_len);
ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
IP6CB(head)->nhoff = nhoff;
+ IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
/* Yes, and fold redundant checksum back. 8) */
if (head->ip_summed == CHECKSUM_COMPLETE)
@@ -524,6 +525,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
struct net *net = dev_net(skb_dst(skb)->dev);
int evicted;
+ if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
+ goto fail_hdr;
+
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
/* Jumbo payload inhibits frag. header */
@@ -544,6 +548,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
+ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
return 1;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a8c891aa2464..8d9a93ed9c59 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1178,6 +1178,27 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
}
EXPORT_SYMBOL_GPL(ip6_redirect);
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+ u32 mark)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
+ fl6.flowi6_mark = mark;
+ fl6.flowi6_flags = 0;
+ fl6.daddr = msg->dest;
+ fl6.saddr = iph->daddr;
+
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (!dst->error)
+ rt6_do_redirect(dst, NULL, skb);
+ dst_release(dst);
+}
+
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
@@ -1311,7 +1332,6 @@ static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
static int ip6_dst_gc(struct dst_ops *ops)
{
- unsigned long now = jiffies;
struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
@@ -1321,13 +1341,12 @@ static int ip6_dst_gc(struct dst_ops *ops)
int entries;
entries = dst_entries_get_fast(ops);
- if (time_after(rt_last_gc + rt_min_interval, now) &&
+ if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
entries <= rt_max_size)
goto out;
net->ipv6.ip6_rt_gc_expire++;
- fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
- net->ipv6.ip6_rt_last_gc = now;
+ fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
entries = dst_entries_get_slow(ops);
if (entries < ops->gc_thresh)
net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
@@ -2827,7 +2846,7 @@ int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
net = (struct net *)ctl->extra1;
delay = net->ipv6.sysctl.flush_delay;
proc_dointvec(ctl, write, buffer, lenp, ppos);
- fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
+ fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
return 0;
}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 9da862070dd8..ab8bd2cabfa0 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2081,6 +2081,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *
pol->sadb_x_policy_type = IPSEC_POLICY_NONE;
}
pol->sadb_x_policy_dir = dir+1;
+ pol->sadb_x_policy_reserved = 0;
pol->sadb_x_policy_id = xp->index;
pol->sadb_x_policy_priority = xp->priority;
@@ -3137,7 +3138,9 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
pol->sadb_x_policy_dir = XFRM_POLICY_OUT + 1;
+ pol->sadb_x_policy_reserved = 0;
pol->sadb_x_policy_id = xp->index;
+ pol->sadb_x_policy_priority = xp->priority;
/* Set sadb_comb's. */
if (x->id.proto == IPPROTO_AH)
@@ -3525,6 +3528,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
pol->sadb_x_policy_dir = dir + 1;
+ pol->sadb_x_policy_reserved = 0;
pol->sadb_x_policy_id = 0;
pol->sadb_x_policy_priority = 0;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 8184d121ff09..43dd7525bfcb 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -666,6 +666,8 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
if (sta->sdata->dev != dev)
continue;
+ sinfo.filled = 0;
+ sta_set_sinfo(sta, &sinfo);
i = 0;
ADD_STA_STATS(sta);
}
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 3b7bfc01ee36..22290a929b94 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -229,6 +229,10 @@ void ieee80211_mps_sta_status_update(struct sta_info *sta)
enum nl80211_mesh_power_mode pm;
bool do_buffer;
+ /* For non-assoc STA, prevent buffering or frame transmission */
+ if (sta->sta_state < IEEE80211_STA_ASSOC)
+ return;
+
/*
* use peer-specific power mode if peering is established and the
* peer's power mode is known
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ae31968d42d3..cc9e02d79b55 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -31,10 +31,12 @@
#include "led.h"
#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
+#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
#define IEEE80211_AUTH_MAX_TRIES 3
#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
+#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2)
#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
#define IEEE80211_ASSOC_MAX_TRIES 3
@@ -209,8 +211,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *channel,
const struct ieee80211_ht_operation *ht_oper,
const struct ieee80211_vht_operation *vht_oper,
- struct cfg80211_chan_def *chandef, bool verbose)
+ struct cfg80211_chan_def *chandef, bool tracking)
{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct cfg80211_chan_def vht_chandef;
u32 ht_cfreq, ret;
@@ -229,7 +232,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
channel->band);
/* check that channel matches the right operating channel */
- if (channel->center_freq != ht_cfreq) {
+ if (!tracking && channel->center_freq != ht_cfreq) {
/*
* It's possible that some APs are confused here;
* Netgear WNDR3700 sometimes reports 4 higher than
@@ -237,11 +240,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
* since we look at probe response/beacon data here
* it should be OK.
*/
- if (verbose)
- sdata_info(sdata,
- "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
- channel->center_freq, ht_cfreq,
- ht_oper->primary_chan, channel->band);
+ sdata_info(sdata,
+ "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
+ channel->center_freq, ht_cfreq,
+ ht_oper->primary_chan, channel->band);
ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
goto out;
}
@@ -295,7 +297,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
channel->band);
break;
default:
- if (verbose)
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
"AP VHT operation IE has invalid channel width (%d), disable VHT\n",
vht_oper->chan_width);
@@ -304,7 +306,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
}
if (!cfg80211_chandef_valid(&vht_chandef)) {
- if (verbose)
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
"AP VHT information is invalid, disable VHT\n");
ret = IEEE80211_STA_DISABLE_VHT;
@@ -317,7 +319,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
}
if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
- if (verbose)
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
"AP VHT information doesn't match HT, disable VHT\n");
ret = IEEE80211_STA_DISABLE_VHT;
@@ -333,18 +335,27 @@ out:
if (ret & IEEE80211_STA_DISABLE_VHT)
vht_chandef = *chandef;
+ /*
+ * Ignore the DISABLED flag when we're already connected and only
+ * tracking the APs beacon for bandwidth changes - otherwise we
+ * might get disconnected here if we connect to an AP, update our
+ * regulatory information based on the AP's country IE and the
+ * information we have is wrong/outdated and disables the channel
+ * that we're actually using for the connection to the AP.
+ */
while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
- IEEE80211_CHAN_DISABLED)) {
+ tracking ? 0 :
+ IEEE80211_CHAN_DISABLED)) {
if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
ret = IEEE80211_STA_DISABLE_HT |
IEEE80211_STA_DISABLE_VHT;
- goto out;
+ break;
}
ret |= chandef_downgrade(chandef);
}
- if (chandef->width != vht_chandef.width && verbose)
+ if (chandef->width != vht_chandef.width && !tracking)
sdata_info(sdata,
"capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
@@ -384,7 +395,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
/* calculate new channel (type) based on HT/VHT operation IEs */
flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
- vht_oper, &chandef, false);
+ vht_oper, &chandef, true);
/*
* Downgrade the new channel if we associated with restricted
@@ -3394,10 +3405,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
if (tx_flags == 0) {
auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
- ifmgd->auth_data->timeout_started = true;
+ auth_data->timeout_started = true;
run_again(sdata, auth_data->timeout);
} else {
- auth_data->timeout_started = false;
+ auth_data->timeout =
+ round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
+ auth_data->timeout_started = true;
+ run_again(sdata, auth_data->timeout);
}
return 0;
@@ -3434,7 +3448,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
assoc_data->timeout_started = true;
run_again(sdata, assoc_data->timeout);
} else {
- assoc_data->timeout_started = false;
+ assoc_data->timeout =
+ round_jiffies_up(jiffies +
+ IEEE80211_ASSOC_TIMEOUT_LONG);
+ assoc_data->timeout_started = true;
+ run_again(sdata, assoc_data->timeout);
}
return 0;
@@ -3829,7 +3847,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
cbss->channel,
ht_oper, vht_oper,
- &chandef, true);
+ &chandef, false);
sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
local->rx_chains);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 7fc5d0d8149a..340126204343 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -99,10 +99,13 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
}
mutex_unlock(&local->sta_mtx);
- /* remove all interfaces */
+ /* remove all interfaces that were created in the driver */
list_for_each_entry(sdata, &local->interfaces, list) {
- if (!ieee80211_sdata_running(sdata))
+ if (!ieee80211_sdata_running(sdata) ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MONITOR)
continue;
+
drv_remove_interface(local, sdata);
}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index ac7ef5414bde..e6512e2ffd20 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -290,7 +290,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
struct minstrel_rate *msr, *mr;
unsigned int ndx;
bool mrr_capable;
- bool prev_sample = mi->prev_sample;
+ bool prev_sample;
int delta;
int sampling_ratio;
@@ -314,6 +314,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
(mi->sample_count + mi->sample_deferred / 2);
/* delta < 0: no sampling required */
+ prev_sample = mi->prev_sample;
mi->prev_sample = false;
if (delta < 0 || (!mrr_capable && prev_sample))
return;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 5b2d3012b983..f5aed963b22e 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -804,10 +804,18 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+ rate->count = 1;
+
+ if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
+ int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
+ rate->idx = mp->cck_rates[idx];
+ rate->flags = 0;
+ return;
+ }
+
rate->idx = sample_idx % MCS_GROUP_RATES +
(sample_group->streams - 1) * MCS_GROUP_RATES;
rate->flags = IEEE80211_TX_RC_MCS | sample_group->flags;
- rate->count = 1;
}
static void
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 23dbcfc69b3b..2c5a79bd3777 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -936,8 +936,14 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
- /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
- if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
+ /*
+ * Drop duplicate 802.11 retransmissions
+ * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
+ */
+ if (rx->skb->len >= 24 && rx->sta &&
+ !ieee80211_is_ctl(hdr->frame_control) &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
+ !is_multicast_ether_addr(hdr->addr1)) {
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
rx->sta->last_seq_ctrl[rx->seqno_idx] ==
hdr->seq_ctrl)) {
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index c63b618cd619..4fd1ca94fd4a 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -293,6 +293,11 @@ void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
sizeof(exp->tuple.dst.u3) - len);
exp->tuple.dst.u.all = *dst;
+
+#ifdef CONFIG_NF_NAT_NEEDED
+ memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
+ memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
+#endif
}
EXPORT_SYMBOL_GPL(nf_ct_expect_init);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 7dcc376eea5f..2f8010707d01 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
__u32 seq, ack, sack, end, win, swin;
s16 receiver_offset;
- bool res;
+ bool res, in_recv_win;
/*
* Get the required data from the packet.
@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
receiver->td_scale);
+ /* Is the ending sequence in the receive window (if available)? */
+ in_recv_win = !receiver->td_maxwin ||
+ after(end, sender->td_end - receiver->td_maxwin - 1);
+
pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
before(seq, sender->td_maxend + 1),
- after(end, sender->td_end - receiver->td_maxwin - 1),
+ (in_recv_win ? 1 : 0),
before(sack, receiver->td_end + 1),
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
if (before(seq, sender->td_maxend + 1) &&
- after(end, sender->td_end - receiver->td_maxwin - 1) &&
+ in_recv_win &&
before(sack, receiver->td_end + 1) &&
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
/*
@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: %s ",
before(seq, sender->td_maxend + 1) ?
- after(end, sender->td_end - receiver->td_maxwin - 1) ?
+ in_recv_win ?
before(sack, receiver->td_end + 1) ?
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
: "ACK is under the lower bound (possible overly delayed ACK)"
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 962e9792e317..d92cc317bf8b 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(inst->group_num);
+ memset(&pmsg, 0, sizeof(pmsg));
pmsg.hw_protocol = skb->protocol;
pmsg.hook = hooknum;
@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
if (indev && skb->dev &&
skb->mac_header != skb->network_header) {
struct nfulnl_msg_packet_hw phw;
- int len = dev_parse_header(skb, phw.hw_addr);
+ int len;
+
+ memset(&phw, 0, sizeof(phw));
+ len = dev_parse_header(skb, phw.hw_addr);
if (len > 0) {
phw.hw_addrlen = htons(len);
if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 971ea145ab3e..8a703c3dd318 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
if (indev && entskb->dev &&
entskb->mac_header != entskb->network_header) {
struct nfqnl_msg_packet_hw phw;
- int len = dev_parse_header(entskb, phw.hw_addr);
+ int len;
+
+ memset(&phw, 0, sizeof(phw));
+ len = dev_parse_header(entskb, phw.hw_addr);
if (len) {
phw.hw_addrlen = htons(len);
if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 7011c71646f0..6113cc7efffc 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
{
const struct xt_tcpmss_info *info = par->targinfo;
struct tcphdr *tcph;
- unsigned int tcplen, i;
+ int len, tcp_hdrlen;
+ unsigned int i;
__be16 oldval;
u16 newmss;
u8 *opt;
@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
if (!skb_make_writable(skb, skb->len))
return -1;
- tcplen = skb->len - tcphoff;
+ len = skb->len - tcphoff;
+ if (len < (int)sizeof(struct tcphdr))
+ return -1;
+
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+ tcp_hdrlen = tcph->doff * 4;
- /* Header cannot be larger than the packet */
- if (tcplen < tcph->doff*4)
+ if (len < tcp_hdrlen)
return -1;
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
newmss = info->mss;
opt = (u_int8_t *)tcph;
- for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
- if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
- opt[i+1] == TCPOLEN_MSS) {
+ for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
+ if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
u_int16_t oldmss;
oldmss = (opt[i+2] << 8) | opt[i+3];
@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
}
/* There is data after the header so the option can't be added
- without moving it, and doing so may make the SYN packet
- itself too large. Accept the packet unmodified instead. */
- if (tcplen > tcph->doff*4)
+ * without moving it, and doing so may make the SYN packet
+ * itself too large. Accept the packet unmodified instead.
+ */
+ if (len > tcp_hdrlen)
return 0;
/*
@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
newmss = min(newmss, (u16)1220);
opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
- memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
+ memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
inet_proto_csum_replace2(&tcph->check, skb,
- htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
+ htons(len), htons(len + TCPOLEN_MSS), 1);
opt[0] = TCPOPT_MSS;
opt[1] = TCPOLEN_MSS;
opt[2] = (newmss & 0xff00) >> 8;
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index b68fa191710f..625fa1d636a0 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
struct tcphdr *tcph;
u_int16_t n, o;
u_int8_t *opt;
- int len;
+ int len, tcp_hdrlen;
/* This is a fragment, no TCP header is available */
if (par->fragoff != 0)
@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
return NF_DROP;
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
- if (tcph->doff * 4 > len)
+ tcp_hdrlen = tcph->doff * 4;
+
+ if (len < tcp_hdrlen)
return NF_DROP;
opt = (u_int8_t *)tcph;
@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
* Walk through all TCP options - if we find some option to remove,
* set all octets to %TCPOPT_NOP and adjust checksum.
*/
- for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) {
+ for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
optl = optlen(opt, i);
- if (i + optl > tcp_hdrlen(skb))
+ if (i + optl > tcp_hdrlen)
break;
if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index f8b71911037a..20b15916f403 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -172,7 +172,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
/* Ignore non-transparent sockets,
if XT_SOCKET_TRANSPARENT is used */
- if (info && info->flags & XT_SOCKET_TRANSPARENT)
+ if (info->flags & XT_SOCKET_TRANSPARENT)
transparent = ((sk->sk_state != TCP_TIME_WAIT &&
inet_sk(sk)->transparent) ||
(sk->sk_state == TCP_TIME_WAIT &&
@@ -196,7 +196,11 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
static bool
socket_mt4_v0(const struct sk_buff *skb, struct xt_action_param *par)
{
- return socket_match(skb, par, NULL);
+ static struct xt_socket_mtinfo1 xt_info_v0 = {
+ .flags = 0,
+ };
+
+ return socket_match(skb, par, &xt_info_v0);
}
static bool
@@ -314,7 +318,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
/* Ignore non-transparent sockets,
if XT_SOCKET_TRANSPARENT is used */
- if (info && info->flags & XT_SOCKET_TRANSPARENT)
+ if (info->flags & XT_SOCKET_TRANSPARENT)
transparent = ((sk->sk_state != TCP_TIME_WAIT &&
inet_sk(sk)->transparent) ||
(sk->sk_state == TCP_TIME_WAIT &&
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index c15042f987bd..a1100640495d 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -691,8 +691,8 @@ static int netlbl_cipsov4_remove_cb(struct netlbl_dom_map *entry, void *arg)
{
struct netlbl_domhsh_walk_arg *cb_arg = arg;
- if (entry->type == NETLBL_NLTYPE_CIPSOV4 &&
- entry->type_def.cipsov4->doi == cb_arg->doi)
+ if (entry->def.type == NETLBL_NLTYPE_CIPSOV4 &&
+ entry->def.cipso->doi == cb_arg->doi)
return netlbl_domhsh_remove_entry(entry, cb_arg->audit_info);
return 0;
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 6bb1d42f0fac..85d842e6e431 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -84,15 +84,15 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
#endif /* IPv6 */
ptr = container_of(entry, struct netlbl_dom_map, rcu);
- if (ptr->type == NETLBL_NLTYPE_ADDRSELECT) {
+ if (ptr->def.type == NETLBL_NLTYPE_ADDRSELECT) {
netlbl_af4list_foreach_safe(iter4, tmp4,
- &ptr->type_def.addrsel->list4) {
+ &ptr->def.addrsel->list4) {
netlbl_af4list_remove_entry(iter4);
kfree(netlbl_domhsh_addr4_entry(iter4));
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_safe(iter6, tmp6,
- &ptr->type_def.addrsel->list6) {
+ &ptr->def.addrsel->list6) {
netlbl_af6list_remove_entry(iter6);
kfree(netlbl_domhsh_addr6_entry(iter6));
}
@@ -213,21 +213,21 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
if (addr4 != NULL) {
struct netlbl_domaddr4_map *map4;
map4 = netlbl_domhsh_addr4_entry(addr4);
- type = map4->type;
- cipsov4 = map4->type_def.cipsov4;
+ type = map4->def.type;
+ cipsov4 = map4->def.cipso;
netlbl_af4list_audit_addr(audit_buf, 0, NULL,
addr4->addr, addr4->mask);
#if IS_ENABLED(CONFIG_IPV6)
} else if (addr6 != NULL) {
struct netlbl_domaddr6_map *map6;
map6 = netlbl_domhsh_addr6_entry(addr6);
- type = map6->type;
+ type = map6->def.type;
netlbl_af6list_audit_addr(audit_buf, 0, NULL,
&addr6->addr, &addr6->mask);
#endif /* IPv6 */
} else {
- type = entry->type;
- cipsov4 = entry->type_def.cipsov4;
+ type = entry->def.type;
+ cipsov4 = entry->def.cipso;
}
switch (type) {
case NETLBL_NLTYPE_UNLABELED:
@@ -265,26 +265,25 @@ static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
if (entry == NULL)
return -EINVAL;
- switch (entry->type) {
+ switch (entry->def.type) {
case NETLBL_NLTYPE_UNLABELED:
- if (entry->type_def.cipsov4 != NULL ||
- entry->type_def.addrsel != NULL)
+ if (entry->def.cipso != NULL || entry->def.addrsel != NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_CIPSOV4:
- if (entry->type_def.cipsov4 == NULL)
+ if (entry->def.cipso == NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_ADDRSELECT:
- netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) {
+ netlbl_af4list_foreach(iter4, &entry->def.addrsel->list4) {
map4 = netlbl_domhsh_addr4_entry(iter4);
- switch (map4->type) {
+ switch (map4->def.type) {
case NETLBL_NLTYPE_UNLABELED:
- if (map4->type_def.cipsov4 != NULL)
+ if (map4->def.cipso != NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_CIPSOV4:
- if (map4->type_def.cipsov4 == NULL)
+ if (map4->def.cipso == NULL)
return -EINVAL;
break;
default:
@@ -292,9 +291,9 @@ static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
}
}
#if IS_ENABLED(CONFIG_IPV6)
- netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) {
+ netlbl_af6list_foreach(iter6, &entry->def.addrsel->list6) {
map6 = netlbl_domhsh_addr6_entry(iter6);
- switch (map6->type) {
+ switch (map6->def.type) {
case NETLBL_NLTYPE_UNLABELED:
break;
default:
@@ -402,32 +401,31 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
rcu_assign_pointer(netlbl_domhsh_def, entry);
}
- if (entry->type == NETLBL_NLTYPE_ADDRSELECT) {
+ if (entry->def.type == NETLBL_NLTYPE_ADDRSELECT) {
netlbl_af4list_foreach_rcu(iter4,
- &entry->type_def.addrsel->list4)
+ &entry->def.addrsel->list4)
netlbl_domhsh_audit_add(entry, iter4, NULL,
ret_val, audit_info);
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6,
- &entry->type_def.addrsel->list6)
+ &entry->def.addrsel->list6)
netlbl_domhsh_audit_add(entry, NULL, iter6,
ret_val, audit_info);
#endif /* IPv6 */
} else
netlbl_domhsh_audit_add(entry, NULL, NULL,
ret_val, audit_info);
- } else if (entry_old->type == NETLBL_NLTYPE_ADDRSELECT &&
- entry->type == NETLBL_NLTYPE_ADDRSELECT) {
+ } else if (entry_old->def.type == NETLBL_NLTYPE_ADDRSELECT &&
+ entry->def.type == NETLBL_NLTYPE_ADDRSELECT) {
struct list_head *old_list4;
struct list_head *old_list6;
- old_list4 = &entry_old->type_def.addrsel->list4;
- old_list6 = &entry_old->type_def.addrsel->list6;
+ old_list4 = &entry_old->def.addrsel->list4;
+ old_list6 = &entry_old->def.addrsel->list6;
/* we only allow the addition of address selectors if all of
* the selectors do not exist in the existing domain map */
- netlbl_af4list_foreach_rcu(iter4,
- &entry->type_def.addrsel->list4)
+ netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4)
if (netlbl_af4list_search_exact(iter4->addr,
iter4->mask,
old_list4)) {
@@ -435,8 +433,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
goto add_return;
}
#if IS_ENABLED(CONFIG_IPV6)
- netlbl_af6list_foreach_rcu(iter6,
- &entry->type_def.addrsel->list6)
+ netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6)
if (netlbl_af6list_search_exact(&iter6->addr,
&iter6->mask,
old_list6)) {
@@ -446,7 +443,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
#endif /* IPv6 */
netlbl_af4list_foreach_safe(iter4, tmp4,
- &entry->type_def.addrsel->list4) {
+ &entry->def.addrsel->list4) {
netlbl_af4list_remove_entry(iter4);
iter4->valid = 1;
ret_val = netlbl_af4list_add(iter4, old_list4);
@@ -457,7 +454,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_safe(iter6, tmp6,
- &entry->type_def.addrsel->list6) {
+ &entry->def.addrsel->list6) {
netlbl_af6list_remove_entry(iter6);
iter6->valid = 1;
ret_val = netlbl_af6list_add(iter6, old_list6);
@@ -538,18 +535,18 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
struct netlbl_af4list *iter4;
struct netlbl_domaddr4_map *map4;
- switch (entry->type) {
+ switch (entry->def.type) {
case NETLBL_NLTYPE_ADDRSELECT:
netlbl_af4list_foreach_rcu(iter4,
- &entry->type_def.addrsel->list4) {
+ &entry->def.addrsel->list4) {
map4 = netlbl_domhsh_addr4_entry(iter4);
- cipso_v4_doi_putdef(map4->type_def.cipsov4);
+ cipso_v4_doi_putdef(map4->def.cipso);
}
/* no need to check the IPv6 list since we currently
* support only unlabeled protocols for IPv6 */
break;
case NETLBL_NLTYPE_CIPSOV4:
- cipso_v4_doi_putdef(entry->type_def.cipsov4);
+ cipso_v4_doi_putdef(entry->def.cipso);
break;
}
call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
@@ -590,20 +587,21 @@ int netlbl_domhsh_remove_af4(const char *domain,
entry_map = netlbl_domhsh_search(domain);
else
entry_map = netlbl_domhsh_search_def(domain);
- if (entry_map == NULL || entry_map->type != NETLBL_NLTYPE_ADDRSELECT)
+ if (entry_map == NULL ||
+ entry_map->def.type != NETLBL_NLTYPE_ADDRSELECT)
goto remove_af4_failure;
spin_lock(&netlbl_domhsh_lock);
entry_addr = netlbl_af4list_remove(addr->s_addr, mask->s_addr,
- &entry_map->type_def.addrsel->list4);
+ &entry_map->def.addrsel->list4);
spin_unlock(&netlbl_domhsh_lock);
if (entry_addr == NULL)
goto remove_af4_failure;
- netlbl_af4list_foreach_rcu(iter4, &entry_map->type_def.addrsel->list4)
+ netlbl_af4list_foreach_rcu(iter4, &entry_map->def.addrsel->list4)
goto remove_af4_single_addr;
#if IS_ENABLED(CONFIG_IPV6)
- netlbl_af6list_foreach_rcu(iter6, &entry_map->type_def.addrsel->list6)
+ netlbl_af6list_foreach_rcu(iter6, &entry_map->def.addrsel->list6)
goto remove_af4_single_addr;
#endif /* IPv6 */
/* the domain mapping is empty so remove it from the mapping table */
@@ -616,7 +614,7 @@ remove_af4_single_addr:
* shouldn't be a problem */
synchronize_rcu();
entry = netlbl_domhsh_addr4_entry(entry_addr);
- cipso_v4_doi_putdef(entry->type_def.cipsov4);
+ cipso_v4_doi_putdef(entry->def.cipso);
kfree(entry);
return 0;
@@ -693,8 +691,8 @@ struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain)
* responsible for ensuring that rcu_read_[un]lock() is called.
*
*/
-struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
- __be32 addr)
+struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain,
+ __be32 addr)
{
struct netlbl_dom_map *dom_iter;
struct netlbl_af4list *addr_iter;
@@ -702,15 +700,13 @@ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
dom_iter = netlbl_domhsh_search_def(domain);
if (dom_iter == NULL)
return NULL;
- if (dom_iter->type != NETLBL_NLTYPE_ADDRSELECT)
- return NULL;
- addr_iter = netlbl_af4list_search(addr,
- &dom_iter->type_def.addrsel->list4);
+ if (dom_iter->def.type != NETLBL_NLTYPE_ADDRSELECT)
+ return &dom_iter->def;
+ addr_iter = netlbl_af4list_search(addr, &dom_iter->def.addrsel->list4);
if (addr_iter == NULL)
return NULL;
-
- return netlbl_domhsh_addr4_entry(addr_iter);
+ return &(netlbl_domhsh_addr4_entry(addr_iter)->def);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -725,7 +721,7 @@ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
* responsible for ensuring that rcu_read_[un]lock() is called.
*
*/
-struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
+struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain,
const struct in6_addr *addr)
{
struct netlbl_dom_map *dom_iter;
@@ -734,15 +730,13 @@ struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
dom_iter = netlbl_domhsh_search_def(domain);
if (dom_iter == NULL)
return NULL;
- if (dom_iter->type != NETLBL_NLTYPE_ADDRSELECT)
- return NULL;
- addr_iter = netlbl_af6list_search(addr,
- &dom_iter->type_def.addrsel->list6);
+ if (dom_iter->def.type != NETLBL_NLTYPE_ADDRSELECT)
+ return &dom_iter->def;
+ addr_iter = netlbl_af6list_search(addr, &dom_iter->def.addrsel->list6);
if (addr_iter == NULL)
return NULL;
-
- return netlbl_domhsh_addr6_entry(addr_iter);
+ return &(netlbl_domhsh_addr6_entry(addr_iter)->def);
}
#endif /* IPv6 */
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h
index 90872c4ca30f..b9be0eed8980 100644
--- a/net/netlabel/netlabel_domainhash.h
+++ b/net/netlabel/netlabel_domainhash.h
@@ -43,37 +43,35 @@
#define NETLBL_DOMHSH_BITSIZE 7
/* Domain mapping definition structures */
+struct netlbl_domaddr_map {
+ struct list_head list4;
+ struct list_head list6;
+};
+struct netlbl_dommap_def {
+ u32 type;
+ union {
+ struct netlbl_domaddr_map *addrsel;
+ struct cipso_v4_doi *cipso;
+ };
+};
#define netlbl_domhsh_addr4_entry(iter) \
container_of(iter, struct netlbl_domaddr4_map, list)
struct netlbl_domaddr4_map {
- u32 type;
- union {
- struct cipso_v4_doi *cipsov4;
- } type_def;
+ struct netlbl_dommap_def def;
struct netlbl_af4list list;
};
#define netlbl_domhsh_addr6_entry(iter) \
container_of(iter, struct netlbl_domaddr6_map, list)
struct netlbl_domaddr6_map {
- u32 type;
-
- /* NOTE: no 'type_def' union needed at present since we don't currently
- * support any IPv6 labeling protocols */
+ struct netlbl_dommap_def def;
struct netlbl_af6list list;
};
-struct netlbl_domaddr_map {
- struct list_head list4;
- struct list_head list6;
-};
+
struct netlbl_dom_map {
char *domain;
- u32 type;
- union {
- struct cipso_v4_doi *cipsov4;
- struct netlbl_domaddr_map *addrsel;
- } type_def;
+ struct netlbl_dommap_def def;
u32 valid;
struct list_head list;
@@ -97,16 +95,16 @@ int netlbl_domhsh_remove_af4(const char *domain,
int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info);
int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info);
struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain);
-struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
- __be32 addr);
+struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain,
+ __be32 addr);
+#if IS_ENABLED(CONFIG_IPV6)
+struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain,
+ const struct in6_addr *addr);
+#endif /* IPv6 */
+
int netlbl_domhsh_walk(u32 *skip_bkt,
u32 *skip_chain,
int (*callback) (struct netlbl_dom_map *entry, void *arg),
void *cb_arg);
-#if IS_ENABLED(CONFIG_IPV6)
-struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
- const struct in6_addr *addr);
-#endif /* IPv6 */
-
#endif
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 7c94aedd0912..96a458e12f60 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -122,7 +122,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
}
if (addr == NULL && mask == NULL)
- entry->type = NETLBL_NLTYPE_UNLABELED;
+ entry->def.type = NETLBL_NLTYPE_UNLABELED;
else if (addr != NULL && mask != NULL) {
addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
if (addrmap == NULL)
@@ -137,7 +137,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
if (map4 == NULL)
goto cfg_unlbl_map_add_failure;
- map4->type = NETLBL_NLTYPE_UNLABELED;
+ map4->def.type = NETLBL_NLTYPE_UNLABELED;
map4->list.addr = addr4->s_addr & mask4->s_addr;
map4->list.mask = mask4->s_addr;
map4->list.valid = 1;
@@ -154,7 +154,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
if (map6 == NULL)
goto cfg_unlbl_map_add_failure;
- map6->type = NETLBL_NLTYPE_UNLABELED;
+ map6->def.type = NETLBL_NLTYPE_UNLABELED;
map6->list.addr = *addr6;
map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0];
map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1];
@@ -174,8 +174,8 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
break;
}
- entry->type_def.addrsel = addrmap;
- entry->type = NETLBL_NLTYPE_ADDRSELECT;
+ entry->def.addrsel = addrmap;
+ entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
} else {
ret_val = -EINVAL;
goto cfg_unlbl_map_add_failure;
@@ -355,8 +355,8 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
}
if (addr == NULL && mask == NULL) {
- entry->type_def.cipsov4 = doi_def;
- entry->type = NETLBL_NLTYPE_CIPSOV4;
+ entry->def.cipso = doi_def;
+ entry->def.type = NETLBL_NLTYPE_CIPSOV4;
} else if (addr != NULL && mask != NULL) {
addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
if (addrmap == NULL)
@@ -367,8 +367,8 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC);
if (addrinfo == NULL)
goto out_addrinfo;
- addrinfo->type_def.cipsov4 = doi_def;
- addrinfo->type = NETLBL_NLTYPE_CIPSOV4;
+ addrinfo->def.cipso = doi_def;
+ addrinfo->def.type = NETLBL_NLTYPE_CIPSOV4;
addrinfo->list.addr = addr->s_addr & mask->s_addr;
addrinfo->list.mask = mask->s_addr;
addrinfo->list.valid = 1;
@@ -376,8 +376,8 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
if (ret_val != 0)
goto cfg_cipsov4_map_add_failure;
- entry->type_def.addrsel = addrmap;
- entry->type = NETLBL_NLTYPE_ADDRSELECT;
+ entry->def.addrsel = addrmap;
+ entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
} else {
ret_val = -EINVAL;
goto out_addrmap;
@@ -657,14 +657,14 @@ int netlbl_sock_setattr(struct sock *sk,
}
switch (family) {
case AF_INET:
- switch (dom_entry->type) {
+ switch (dom_entry->def.type) {
case NETLBL_NLTYPE_ADDRSELECT:
ret_val = -EDESTADDRREQ;
break;
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_sock_setattr(sk,
- dom_entry->type_def.cipsov4,
- secattr);
+ dom_entry->def.cipso,
+ secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
ret_val = 0;
@@ -754,23 +754,22 @@ int netlbl_conn_setattr(struct sock *sk,
{
int ret_val;
struct sockaddr_in *addr4;
- struct netlbl_domaddr4_map *af4_entry;
+ struct netlbl_dommap_def *entry;
rcu_read_lock();
switch (addr->sa_family) {
case AF_INET:
addr4 = (struct sockaddr_in *)addr;
- af4_entry = netlbl_domhsh_getentry_af4(secattr->domain,
- addr4->sin_addr.s_addr);
- if (af4_entry == NULL) {
+ entry = netlbl_domhsh_getentry_af4(secattr->domain,
+ addr4->sin_addr.s_addr);
+ if (entry == NULL) {
ret_val = -ENOENT;
goto conn_setattr_return;
}
- switch (af4_entry->type) {
+ switch (entry->type) {
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_sock_setattr(sk,
- af4_entry->type_def.cipsov4,
- secattr);
+ entry->cipso, secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now
@@ -812,36 +811,21 @@ int netlbl_req_setattr(struct request_sock *req,
const struct netlbl_lsm_secattr *secattr)
{
int ret_val;
- struct netlbl_dom_map *dom_entry;
- struct netlbl_domaddr4_map *af4_entry;
- u32 proto_type;
- struct cipso_v4_doi *proto_cv4;
+ struct netlbl_dommap_def *entry;
rcu_read_lock();
- dom_entry = netlbl_domhsh_getentry(secattr->domain);
- if (dom_entry == NULL) {
- ret_val = -ENOENT;
- goto req_setattr_return;
- }
switch (req->rsk_ops->family) {
case AF_INET:
- if (dom_entry->type == NETLBL_NLTYPE_ADDRSELECT) {
- struct inet_request_sock *req_inet = inet_rsk(req);
- af4_entry = netlbl_domhsh_getentry_af4(secattr->domain,
- req_inet->rmt_addr);
- if (af4_entry == NULL) {
- ret_val = -ENOENT;
- goto req_setattr_return;
- }
- proto_type = af4_entry->type;
- proto_cv4 = af4_entry->type_def.cipsov4;
- } else {
- proto_type = dom_entry->type;
- proto_cv4 = dom_entry->type_def.cipsov4;
+ entry = netlbl_domhsh_getentry_af4(secattr->domain,
+ inet_rsk(req)->rmt_addr);
+ if (entry == NULL) {
+ ret_val = -ENOENT;
+ goto req_setattr_return;
}
- switch (proto_type) {
+ switch (entry->type) {
case NETLBL_NLTYPE_CIPSOV4:
- ret_val = cipso_v4_req_setattr(req, proto_cv4, secattr);
+ ret_val = cipso_v4_req_setattr(req,
+ entry->cipso, secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now
@@ -899,23 +883,21 @@ int netlbl_skbuff_setattr(struct sk_buff *skb,
{
int ret_val;
struct iphdr *hdr4;
- struct netlbl_domaddr4_map *af4_entry;
+ struct netlbl_dommap_def *entry;
rcu_read_lock();
switch (family) {
case AF_INET:
hdr4 = ip_hdr(skb);
- af4_entry = netlbl_domhsh_getentry_af4(secattr->domain,
- hdr4->daddr);
- if (af4_entry == NULL) {
+ entry = netlbl_domhsh_getentry_af4(secattr->domain,hdr4->daddr);
+ if (entry == NULL) {
ret_val = -ENOENT;
goto skbuff_setattr_return;
}
- switch (af4_entry->type) {
+ switch (entry->type) {
case NETLBL_NLTYPE_CIPSOV4:
- ret_val = cipso_v4_skbuff_setattr(skb,
- af4_entry->type_def.cipsov4,
- secattr);
+ ret_val = cipso_v4_skbuff_setattr(skb, entry->cipso,
+ secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index c5384ffc6146..dd1c37d7acbc 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -104,7 +104,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
ret_val = -ENOMEM;
goto add_failure;
}
- entry->type = nla_get_u32(info->attrs[NLBL_MGMT_A_PROTOCOL]);
+ entry->def.type = nla_get_u32(info->attrs[NLBL_MGMT_A_PROTOCOL]);
if (info->attrs[NLBL_MGMT_A_DOMAIN]) {
size_t tmp_size = nla_len(info->attrs[NLBL_MGMT_A_DOMAIN]);
entry->domain = kmalloc(tmp_size, GFP_KERNEL);
@@ -116,12 +116,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
info->attrs[NLBL_MGMT_A_DOMAIN], tmp_size);
}
- /* NOTE: internally we allow/use a entry->type value of
+ /* NOTE: internally we allow/use a entry->def.type value of
* NETLBL_NLTYPE_ADDRSELECT but we don't currently allow users
* to pass that as a protocol value because we need to know the
* "real" protocol */
- switch (entry->type) {
+ switch (entry->def.type) {
case NETLBL_NLTYPE_UNLABELED:
break;
case NETLBL_NLTYPE_CIPSOV4:
@@ -132,7 +132,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
cipsov4 = cipso_v4_doi_getdef(tmp_val);
if (cipsov4 == NULL)
goto add_failure;
- entry->type_def.cipsov4 = cipsov4;
+ entry->def.cipso = cipsov4;
break;
default:
goto add_failure;
@@ -172,9 +172,9 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
map->list.addr = addr->s_addr & mask->s_addr;
map->list.mask = mask->s_addr;
map->list.valid = 1;
- map->type = entry->type;
+ map->def.type = entry->def.type;
if (cipsov4)
- map->type_def.cipsov4 = cipsov4;
+ map->def.cipso = cipsov4;
ret_val = netlbl_af4list_add(&map->list, &addrmap->list4);
if (ret_val != 0) {
@@ -182,8 +182,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
goto add_failure;
}
- entry->type = NETLBL_NLTYPE_ADDRSELECT;
- entry->type_def.addrsel = addrmap;
+ entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
+ entry->def.addrsel = addrmap;
#if IS_ENABLED(CONFIG_IPV6)
} else if (info->attrs[NLBL_MGMT_A_IPV6ADDR]) {
struct in6_addr *addr;
@@ -223,7 +223,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
map->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
map->list.mask = *mask;
map->list.valid = 1;
- map->type = entry->type;
+ map->def.type = entry->def.type;
ret_val = netlbl_af6list_add(&map->list, &addrmap->list6);
if (ret_val != 0) {
@@ -231,8 +231,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
goto add_failure;
}
- entry->type = NETLBL_NLTYPE_ADDRSELECT;
- entry->type_def.addrsel = addrmap;
+ entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
+ entry->def.addrsel = addrmap;
#endif /* IPv6 */
}
@@ -281,14 +281,13 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
return ret_val;
}
- switch (entry->type) {
+ switch (entry->def.type) {
case NETLBL_NLTYPE_ADDRSELECT:
nla_a = nla_nest_start(skb, NLBL_MGMT_A_SELECTORLIST);
if (nla_a == NULL)
return -ENOMEM;
- netlbl_af4list_foreach_rcu(iter4,
- &entry->type_def.addrsel->list4) {
+ netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) {
struct netlbl_domaddr4_map *map4;
struct in_addr addr_struct;
@@ -310,13 +309,13 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
return ret_val;
map4 = netlbl_domhsh_addr4_entry(iter4);
ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL,
- map4->type);
+ map4->def.type);
if (ret_val != 0)
return ret_val;
- switch (map4->type) {
+ switch (map4->def.type) {
case NETLBL_NLTYPE_CIPSOV4:
ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI,
- map4->type_def.cipsov4->doi);
+ map4->def.cipso->doi);
if (ret_val != 0)
return ret_val;
break;
@@ -325,8 +324,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
nla_nest_end(skb, nla_b);
}
#if IS_ENABLED(CONFIG_IPV6)
- netlbl_af6list_foreach_rcu(iter6,
- &entry->type_def.addrsel->list6) {
+ netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) {
struct netlbl_domaddr6_map *map6;
nla_b = nla_nest_start(skb, NLBL_MGMT_A_ADDRSELECTOR);
@@ -345,7 +343,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
return ret_val;
map6 = netlbl_domhsh_addr6_entry(iter6);
ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL,
- map6->type);
+ map6->def.type);
if (ret_val != 0)
return ret_val;
@@ -356,14 +354,14 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
nla_nest_end(skb, nla_a);
break;
case NETLBL_NLTYPE_UNLABELED:
- ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, entry->type);
+ ret_val = nla_put_u32(skb,NLBL_MGMT_A_PROTOCOL,entry->def.type);
break;
case NETLBL_NLTYPE_CIPSOV4:
- ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, entry->type);
+ ret_val = nla_put_u32(skb,NLBL_MGMT_A_PROTOCOL,entry->def.type);
if (ret_val != 0)
return ret_val;
ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI,
- entry->type_def.cipsov4->doi);
+ entry->def.cipso->doi);
break;
}
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index af3531926ee0..8f0897407a2c 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1541,7 +1541,7 @@ int __init netlbl_unlabel_defconf(void)
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (entry == NULL)
return -ENOMEM;
- entry->type = NETLBL_NLTYPE_UNLABELED;
+ entry->def.type = NETLBL_NLTYPE_UNLABELED;
ret_val = netlbl_domhsh_add_default(entry, &audit_info);
if (ret_val != 0)
return ret_val;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 2fd6dbea327a..512718adb0d5 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -571,7 +571,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
!capable(CAP_NET_ADMIN))
return -EPERM;
- if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = ops->dumpit,
.done = ops->done,
@@ -877,8 +877,10 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
#ifdef CONFIG_MODULES
if (res == NULL) {
genl_unlock();
+ up_read(&cb_lock);
request_module("net-pf-%d-proto-%d-family-%s",
PF_NETLINK, NETLINK_GENERIC, name);
+ down_read(&cb_lock);
genl_lock();
res = genl_family_find_byname(name);
}
diff --git a/net/nfc/core.c b/net/nfc/core.c
index dc96a83aa6ab..1d074dd1650f 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -44,7 +44,7 @@ DEFINE_MUTEX(nfc_devlist_mutex);
/* NFC device ID bitmap */
static DEFINE_IDA(nfc_index_ida);
-int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name)
+int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name)
{
int rc = 0;
@@ -62,28 +62,28 @@ int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name)
goto error;
}
- if (!dev->ops->fw_upload) {
+ if (!dev->ops->fw_download) {
rc = -EOPNOTSUPP;
goto error;
}
- dev->fw_upload_in_progress = true;
- rc = dev->ops->fw_upload(dev, firmware_name);
+ dev->fw_download_in_progress = true;
+ rc = dev->ops->fw_download(dev, firmware_name);
if (rc)
- dev->fw_upload_in_progress = false;
+ dev->fw_download_in_progress = false;
error:
device_unlock(&dev->dev);
return rc;
}
-int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name)
+int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name)
{
- dev->fw_upload_in_progress = false;
+ dev->fw_download_in_progress = false;
- return nfc_genl_fw_upload_done(dev, firmware_name);
+ return nfc_genl_fw_download_done(dev, firmware_name);
}
-EXPORT_SYMBOL(nfc_fw_upload_done);
+EXPORT_SYMBOL(nfc_fw_download_done);
/**
* nfc_dev_up - turn on the NFC device
@@ -110,7 +110,7 @@ int nfc_dev_up(struct nfc_dev *dev)
goto error;
}
- if (dev->fw_upload_in_progress) {
+ if (dev->fw_download_in_progress) {
rc = -EBUSY;
goto error;
}
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 7b1c186736eb..fe66908401f5 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -809,14 +809,14 @@ static void nfc_hci_recv_from_llc(struct nfc_hci_dev *hdev, struct sk_buff *skb)
}
}
-static int hci_fw_upload(struct nfc_dev *nfc_dev, const char *firmware_name)
+static int hci_fw_download(struct nfc_dev *nfc_dev, const char *firmware_name)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
- if (!hdev->ops->fw_upload)
+ if (!hdev->ops->fw_download)
return -ENOTSUPP;
- return hdev->ops->fw_upload(hdev, firmware_name);
+ return hdev->ops->fw_download(hdev, firmware_name);
}
static struct nfc_ops hci_nfc_ops = {
@@ -831,7 +831,7 @@ static struct nfc_ops hci_nfc_ops = {
.im_transceive = hci_transceive,
.tm_send = hci_tm_send,
.check_presence = hci_check_presence,
- .fw_upload = hci_fw_upload,
+ .fw_download = hci_fw_download,
.discover_se = hci_discover_se,
.enable_se = hci_enable_se,
.disable_se = hci_disable_se,
diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig
index 2a2416080b4f..a4f1e42e3481 100644
--- a/net/nfc/nci/Kconfig
+++ b/net/nfc/nci/Kconfig
@@ -11,6 +11,7 @@ config NFC_NCI
config NFC_NCI_SPI
depends on NFC_NCI && SPI
+ select CRC_CCITT
bool "NCI over SPI protocol support"
default n
help
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index b05ad909778f..f16fd59d4160 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1089,7 +1089,7 @@ exit:
return rc;
}
-static int nfc_genl_fw_upload(struct sk_buff *skb, struct genl_info *info)
+static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
int rc;
@@ -1108,13 +1108,13 @@ static int nfc_genl_fw_upload(struct sk_buff *skb, struct genl_info *info)
nla_strlcpy(firmware_name, info->attrs[NFC_ATTR_FIRMWARE_NAME],
sizeof(firmware_name));
- rc = nfc_fw_upload(dev, firmware_name);
+ rc = nfc_fw_download(dev, firmware_name);
nfc_put_device(dev);
return rc;
}
-int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name)
+int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name)
{
struct sk_buff *msg;
void *hdr;
@@ -1124,7 +1124,7 @@ int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
- NFC_CMD_FW_UPLOAD);
+ NFC_CMD_FW_DOWNLOAD);
if (!hdr)
goto free_msg;
@@ -1251,8 +1251,8 @@ static struct genl_ops nfc_genl_ops[] = {
.policy = nfc_genl_policy,
},
{
- .cmd = NFC_CMD_FW_UPLOAD,
- .doit = nfc_genl_fw_upload,
+ .cmd = NFC_CMD_FW_DOWNLOAD,
+ .doit = nfc_genl_fw_download,
.policy = nfc_genl_policy,
},
{
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index ee85a1fc1b24..820a7850c36a 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -123,10 +123,10 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
class_dev_iter_exit(iter);
}
-int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name);
-int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name);
+int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name);
+int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name);
-int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name);
+int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name);
int nfc_dev_up(struct nfc_dev *dev);
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 22c5f399f1cf..ab101f715447 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
{
struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+ OVS_CB(skb)->tun_key = NULL;
return do_execute_actions(dp, skb, acts->actions,
acts->actions_len, false);
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f7e3a0d84c40..f2ed7600084e 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
return 0;
- rtnl_unlock();
- return 0;
-
exit_free:
kfree_skb(reply);
exit_unlock:
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 5c519b121e1b..1aa84dc58777 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
struct flex_array *buckets;
int i, err;
- buckets = flex_array_alloc(sizeof(struct hlist_head *),
+ buckets = flex_array_alloc(sizeof(struct hlist_head),
n_buckets, GFP_KERNEL);
if (!buckets)
return NULL;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4b66c752eae5..75c8bbf598c8 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3259,9 +3259,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
if (po->tp_version == TPACKET_V3) {
lv = sizeof(struct tpacket_stats_v3);
+ st.stats3.tp_packets += st.stats3.tp_drops;
data = &st.stats3;
} else {
lv = sizeof(struct tpacket_stats);
+ st.stats1.tp_packets += st.stats1.tp_drops;
data = &st.stats1;
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 281c1bded1f6..51b968d3febb 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
return q;
}
+/* The linklayer setting were not transferred from iproute2, in older
+ * versions, and the rate tables lookup systems have been dropped in
+ * the kernel. To keep backward compatible with older iproute2 tc
+ * utils, we detect the linklayer setting by detecting if the rate
+ * table were modified.
+ *
+ * For linklayer ATM table entries, the rate table will be aligned to
+ * 48 bytes, thus some table entries will contain the same value. The
+ * mpu (min packet unit) is also encoded into the old rate table, thus
+ * starting from the mpu, we find low and high table entries for
+ * mapping this cell. If these entries contain the same value, when
+ * the rate tables have been modified for linklayer ATM.
+ *
+ * This is done by rounding mpu to the nearest 48 bytes cell/entry,
+ * and then roundup to the next cell, calc the table entry one below,
+ * and compare.
+ */
+static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
+{
+ int low = roundup(r->mpu, 48);
+ int high = roundup(low+1, 48);
+ int cell_low = low >> r->cell_log;
+ int cell_high = (high >> r->cell_log) - 1;
+
+ /* rtab is too inaccurate at rates > 100Mbit/s */
+ if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
+ pr_debug("TC linklayer: Giving up ATM detection\n");
+ return TC_LINKLAYER_ETHERNET;
+ }
+
+ if ((cell_high > cell_low) && (cell_high < 256)
+ && (rtab[cell_low] == rtab[cell_high])) {
+ pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
+ cell_low, cell_high, rtab[cell_high]);
+ return TC_LINKLAYER_ATM;
+ }
+ return TC_LINKLAYER_ETHERNET;
+}
+
static struct qdisc_rate_table *qdisc_rtab_list;
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
@@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
rtab->rate = *r;
rtab->refcnt = 1;
memcpy(rtab->data, nla_data(tab), 1024);
+ if (r->linklayer == TC_LINKLAYER_UNAWARE)
+ r->linklayer = __detect_linklayer(r, rtab->data);
rtab->next = qdisc_rtab_list;
qdisc_rtab_list = rtab;
}
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index ca8e0a57d945..1f9c31411f19 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -605,6 +605,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
struct sockaddr_atmpvc pvc;
int state;
+ memset(&pvc, 0, sizeof(pvc));
pvc.sap_family = AF_ATMPVC;
pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
pvc.sap_addr.vpi = flow->vcc->vpi;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 71a568862557..7a42c81a19eb 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1465,6 +1465,7 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_wrropt opt;
+ memset(&opt, 0, sizeof(opt));
opt.flags = 0;
opt.allot = cl->allot;
opt.priority = cl->priority + 1;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4626cef4b76e..48be3d5c0d92 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -25,6 +25,7 @@
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/if_vlan.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
@@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q)
unsigned long dev_trans_start(struct net_device *dev)
{
- unsigned long val, res = dev->trans_start;
+ unsigned long val, res;
unsigned int i;
+ if (is_vlan_dev(dev))
+ dev = vlan_dev_real_dev(dev);
+ res = dev->trans_start;
for (i = 0; i < dev->num_tx_queues; i++) {
val = netdev_get_tx_queue(dev, i)->trans_start;
if (val && time_after(val, res))
res = val;
}
dev->trans_start = res;
+
return res;
}
EXPORT_SYMBOL(dev_trans_start);
@@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
memset(r, 0, sizeof(*r));
r->overhead = conf->overhead;
r->rate_bytes_ps = conf->rate;
+ r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
r->mult = 1;
/*
* The deal here is to replace a divide by a reciprocal one
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index c2124ea29f45..c2178b15ca6e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -100,7 +100,7 @@ struct htb_class {
struct psched_ratecfg ceil;
s64 buffer, cbuffer;/* token bucket depth/rate */
s64 mbuffer; /* max wait time */
- int prio; /* these two are used only by leaves... */
+ u32 prio; /* these two are used only by leaves... */
int quantum; /* but stored for parent-to-leaf return */
struct tcf_proto *filter_list; /* class attached filters */
@@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)*arg, *parent;
struct nlattr *opt = tca[TCA_OPTIONS];
+ struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
struct nlattr *tb[TCA_HTB_MAX + 1];
struct tc_htb_opt *hopt;
@@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!hopt->rate.rate || !hopt->ceil.rate)
goto failure;
+ /* Keeping backward compatible with rate_table based iproute2 tc */
+ if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
+ rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
+ if (rtab)
+ qdisc_put_rtab(rtab);
+ }
+ if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
+ ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
+ if (ctab)
+ qdisc_put_rtab(ctab);
+ }
+
if (!cl) { /* new class */
struct Qdisc *new_q;
int prio;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index bce5b79662a6..ab67efc64b24 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -846,12 +846,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
else
spc_state = SCTP_ADDR_AVAILABLE;
/* Don't inform ULP about transition from PF to
- * active state and set cwnd to 1, see SCTP
+ * active state and set cwnd to 1 MTU, see SCTP
* Quick failover draft section 5.1, point 5
*/
if (transport->state == SCTP_PF) {
ulp_notify = false;
- transport->cwnd = 1;
+ transport->cwnd = asoc->pathmtu;
}
transport->state = SCTP_ACTIVE;
break;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index bdbbc3fd7c14..8fdd16046d66 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -181,12 +181,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
return;
}
- call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
-
sctp_packet_free(&transport->packet);
if (transport->asoc)
sctp_association_put(transport->asoc);
+
+ call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
}
/* Start T3_rtx timer if it is not already running and update the heartbeat
diff --git a/net/socket.c b/net/socket.c
index 829b460acb87..b2d7c629eeb9 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -106,7 +106,7 @@
#include <linux/atalk.h>
#include <net/busy_poll.h>
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly;
#endif
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index d304f41260f2..af7ffd447fee 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -120,7 +120,7 @@ static int gssp_rpc_create(struct net *net, struct rpc_clnt **_clnt)
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create AF_LOCAL gssproxy "
"client (errno %ld).\n", PTR_ERR(clnt));
- result = -PTR_ERR(clnt);
+ result = PTR_ERR(clnt);
*_clnt = NULL;
goto out;
}
@@ -328,7 +328,6 @@ void gssp_free_upcall_data(struct gssp_upcall_data *data)
kfree(data->in_handle.data);
kfree(data->out_handle.data);
kfree(data->out_token.data);
- kfree(data->mech_oid.data);
free_svc_cred(&data->creds);
}
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 357f613df7ff..3c85d1c8a028 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -430,7 +430,7 @@ static int dummy_enc_nameattr_array(struct xdr_stream *xdr,
static int dummy_dec_nameattr_array(struct xdr_stream *xdr,
struct gssx_name_attr_array *naa)
{
- struct gssx_name_attr dummy;
+ struct gssx_name_attr dummy = { .attr = {.len = 0} };
u32 count, i;
__be32 *p;
@@ -493,12 +493,13 @@ static int gssx_enc_name(struct xdr_stream *xdr,
return err;
}
+
static int gssx_dec_name(struct xdr_stream *xdr,
struct gssx_name *name)
{
- struct xdr_netobj dummy_netobj;
- struct gssx_name_attr_array dummy_name_attr_array;
- struct gssx_option_array dummy_option_array;
+ struct xdr_netobj dummy_netobj = { .len = 0 };
+ struct gssx_name_attr_array dummy_name_attr_array = { .count = 0 };
+ struct gssx_option_array dummy_option_array = { .count = 0 };
int err;
/* name->display_name */
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index d0347d148b34..09fb638bcaa4 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1180,6 +1180,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
gm = gss_mech_get_by_OID(&ud->mech_oid);
if (!gm)
goto out;
+ rsci.cred.cr_gss_mech = gm;
status = -EINVAL;
/* mech-specific data: */
@@ -1195,7 +1196,6 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
rscp = rsc_update(cd, &rsci, rscp);
status = 0;
out:
- gss_mech_put(gm);
rsc_free(&rsci);
if (rscp)
cache_put(&rscp->h, cd);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 74f6a704e374..ecbc4e3d83ad 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1660,6 +1660,10 @@ call_connect(struct rpc_task *task)
task->tk_action = call_connect_status;
if (task->tk_status < 0)
return;
+ if (task->tk_flags & RPC_TASK_NOCONNECT) {
+ rpc_exit(task, -ENOTCONN);
+ return;
+ }
xprt_connect(task);
}
}
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
index 74d948f5d5a1..779742cfc1ff 100644
--- a/net/sunrpc/netns.h
+++ b/net/sunrpc/netns.h
@@ -23,6 +23,7 @@ struct sunrpc_net {
struct rpc_clnt *rpcb_local_clnt4;
spinlock_t rpcb_clnt_lock;
unsigned int rpcb_users;
+ unsigned int rpcb_is_af_local : 1;
struct mutex gssp_lock;
wait_queue_head_t gssp_wq;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 3df764dc330c..1891a1022c17 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -204,13 +204,15 @@ void rpcb_put_local(struct net *net)
}
static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
- struct rpc_clnt *clnt4)
+ struct rpc_clnt *clnt4,
+ bool is_af_local)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
/* Protected by rpcb_create_local_mutex */
sn->rpcb_local_clnt = clnt;
sn->rpcb_local_clnt4 = clnt4;
+ sn->rpcb_is_af_local = is_af_local ? 1 : 0;
smp_wmb();
sn->rpcb_users = 1;
dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: "
@@ -238,6 +240,14 @@ static int rpcb_create_local_unix(struct net *net)
.program = &rpcb_program,
.version = RPCBVERS_2,
.authflavor = RPC_AUTH_NULL,
+ /*
+ * We turn off the idle timeout to prevent the kernel
+ * from automatically disconnecting the socket.
+ * Otherwise, we'd have to cache the mount namespace
+ * of the caller and somehow pass that to the socket
+ * reconnect code.
+ */
+ .flags = RPC_CLNT_CREATE_NO_IDLE_TIMEOUT,
};
struct rpc_clnt *clnt, *clnt4;
int result = 0;
@@ -263,7 +273,7 @@ static int rpcb_create_local_unix(struct net *net)
clnt4 = NULL;
}
- rpcb_set_local(net, clnt, clnt4);
+ rpcb_set_local(net, clnt, clnt4, true);
out:
return result;
@@ -315,7 +325,7 @@ static int rpcb_create_local_net(struct net *net)
clnt4 = NULL;
}
- rpcb_set_local(net, clnt, clnt4);
+ rpcb_set_local(net, clnt, clnt4, false);
out:
return result;
@@ -376,13 +386,16 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
return rpc_create(&args);
}
-static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
+static int rpcb_register_call(struct sunrpc_net *sn, struct rpc_clnt *clnt, struct rpc_message *msg, bool is_set)
{
- int result, error = 0;
+ int flags = RPC_TASK_NOCONNECT;
+ int error, result = 0;
+ if (is_set || !sn->rpcb_is_af_local)
+ flags = RPC_TASK_SOFTCONN;
msg->rpc_resp = &result;
- error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
+ error = rpc_call_sync(clnt, msg, flags);
if (error < 0) {
dprintk("RPC: failed to contact local rpcbind "
"server (errno %d).\n", -error);
@@ -439,16 +452,19 @@ int rpcb_register(struct net *net, u32 prog, u32 vers, int prot, unsigned short
.rpc_argp = &map,
};
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+ bool is_set = false;
dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
"rpcbind\n", (port ? "" : "un"),
prog, vers, prot, port);
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET];
- if (port)
+ if (port != 0) {
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
+ is_set = true;
+ }
- return rpcb_register_call(sn->rpcb_local_clnt, &msg);
+ return rpcb_register_call(sn, sn->rpcb_local_clnt, &msg, is_set);
}
/*
@@ -461,6 +477,7 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin->sin_port);
+ bool is_set = false;
int result;
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -471,10 +488,12 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
map->r_addr, map->r_netid);
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- if (port)
+ if (port != 0) {
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
+ is_set = true;
+ }
- result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
+ result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
kfree(map->r_addr);
return result;
}
@@ -489,6 +508,7 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin6->sin6_port);
+ bool is_set = false;
int result;
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -499,10 +519,12 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
map->r_addr, map->r_netid);
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- if (port)
+ if (port != 0) {
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
+ is_set = true;
+ }
- result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
+ result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
kfree(map->r_addr);
return result;
}
@@ -519,7 +541,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
map->r_addr = "";
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- return rpcb_register_call(sn->rpcb_local_clnt4, msg);
+ return rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, false);
}
/**
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 305374d4fb98..7762b9f8a8b7 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1193,7 +1193,9 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
if (test_bit(XPT_LISTENER, &xprt->xpt_flags))
return 1;
required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg;
- if (sk_stream_wspace(svsk->sk_sk) >= required)
+ if (sk_stream_wspace(svsk->sk_sk) >= required ||
+ (sk_stream_min_wspace(svsk->sk_sk) == 0 &&
+ atomic_read(&xprt->xpt_reserved) == 0))
return 1;
set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
return 0;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index cb29ef7ba2f0..609c30c80816 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
{
struct tipc_link *l_ptr;
struct tipc_link *temp_l_ptr;
+ struct tipc_link_req *temp_req;
pr_info("Disabling bearer <%s>\n", b_ptr->name);
spin_lock_bh(&b_ptr->lock);
@@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
tipc_link_delete(l_ptr);
}
- if (b_ptr->link_req)
- tipc_disc_delete(b_ptr->link_req);
+ temp_req = b_ptr->link_req;
+ b_ptr->link_req = NULL;
spin_unlock_bh(&b_ptr->lock);
+
+ if (temp_req)
+ tipc_disc_delete(temp_req);
+
memset(b_ptr, 0, sizeof(struct tipc_bearer));
}
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 19da5abe0fa6..fd3fa57a410e 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -355,8 +355,12 @@ static int tipc_open_listening_sock(struct tipc_server *s)
return PTR_ERR(con);
sock = tipc_create_listen_sock(con);
- if (!sock)
+ if (!sock) {
+ idr_remove(&s->conn_idr, con->conid);
+ s->idr_in_use--;
+ kfree(con);
return -EINVAL;
+ }
tipc_register_callbacks(sock, con);
return 0;
@@ -563,9 +567,14 @@ int tipc_server_start(struct tipc_server *s)
kmem_cache_destroy(s->rcvbuf_cache);
return ret;
}
+ ret = tipc_open_listening_sock(s);
+ if (ret < 0) {
+ tipc_work_stop(s);
+ kmem_cache_destroy(s->rcvbuf_cache);
+ return ret;
+ }
s->enabled = 1;
-
- return tipc_open_listening_sock(s);
+ return ret;
}
void tipc_server_stop(struct tipc_server *s)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 593071dabd1c..4d9334683f84 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -347,7 +347,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
struct vsock_sock *vsk;
list_for_each_entry(vsk, &vsock_connected_table[i],
- connected_table);
+ connected_table)
fn(sk_vsock(vsk));
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 4f9f216665e9..a8c29fa4f1b3 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -765,6 +765,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
cfg80211_leave_mesh(rdev, dev);
break;
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
cfg80211_stop_ap(rdev, dev);
break;
default:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 1cc47aca7f05..5f6e982cdcf4 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -441,10 +441,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
goto out_unlock;
}
*rdev = wiphy_to_dev((*wdev)->wiphy);
- cb->args[0] = (*rdev)->wiphy_idx;
+ /* 0 is the first index - add 1 to parse only once */
+ cb->args[0] = (*rdev)->wiphy_idx + 1;
cb->args[1] = (*wdev)->identifier;
} else {
- struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]);
+ /* subtract the 1 again here */
+ struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
struct wireless_dev *tmp;
if (!wiphy) {
@@ -2620,8 +2622,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_NEW_KEY);
- if (IS_ERR(hdr))
- return PTR_ERR(hdr);
+ if (!hdr)
+ return -ENOBUFS;
cookie.msg = msg;
cookie.idx = key_idx;
@@ -4770,9 +4772,9 @@ do { \
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 0, 1,
mask, NL80211_MESHCONF_FORWARDING,
nla_get_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, 1, 255,
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0,
mask, NL80211_MESHCONF_RSSI_THRESHOLD,
- nla_get_u32);
+ nla_get_s32);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16,
mask, NL80211_MESHCONF_HT_OPMODE,
nla_get_u16);
@@ -6505,6 +6507,9 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
NL80211_CMD_TESTMODE);
struct nlattr *tmdata;
+ if (!hdr)
+ break;
+
if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
genlmsg_cancel(skb, hdr);
break;
@@ -6613,12 +6618,14 @@ EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb);
void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
{
+ struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
void *hdr = ((void **)skb->cb)[1];
struct nlattr *data = ((void **)skb->cb)[2];
nla_nest_end(skb, data);
genlmsg_end(skb, hdr);
- genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp);
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
+ nl80211_testmode_mcgrp.id, gfp);
}
EXPORT_SYMBOL(cfg80211_testmode_event);
#endif
@@ -6947,9 +6954,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_REMAIN_ON_CHANNEL);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
@@ -7247,9 +7253,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_FRAME);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
}
@@ -8128,9 +8133,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_PROBE_CLIENT);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
@@ -10064,7 +10068,8 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
genlmsg_end(msg, hdr);
- genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
return;
nla_put_failure:
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 5a24c986f34b..de06d5d1287f 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2247,10 +2247,13 @@ int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
void wiphy_regulatory_register(struct wiphy *wiphy)
{
+ struct regulatory_request *lr;
+
if (!reg_dev_ignore_cell_hint(wiphy))
reg_num_devs_support_basehint++;
- wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
+ lr = get_last_request();
+ wiphy_update_regulatory(wiphy, lr->initiator);
}
void wiphy_regulatory_deregister(struct wiphy *wiphy)
@@ -2279,7 +2282,9 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy)
static void reg_timeout_work(struct work_struct *work)
{
REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
+ rtnl_lock();
restore_regulatory_settings(true);
+ rtnl_unlock();
}
int __init regulatory_init(void)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 1d3cfb1a3f28..20e86a95dc4e 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -34,8 +34,10 @@ struct cfg80211_conn {
CFG80211_CONN_SCAN_AGAIN,
CFG80211_CONN_AUTHENTICATE_NEXT,
CFG80211_CONN_AUTHENTICATING,
+ CFG80211_CONN_AUTH_FAILED,
CFG80211_CONN_ASSOCIATE_NEXT,
CFG80211_CONN_ASSOCIATING,
+ CFG80211_CONN_ASSOC_FAILED,
CFG80211_CONN_DEAUTH,
CFG80211_CONN_CONNECTED,
} state;
@@ -164,6 +166,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
NULL, 0,
params->key, params->key_len,
params->key_idx, NULL, 0);
+ case CFG80211_CONN_AUTH_FAILED:
+ return -ENOTCONN;
case CFG80211_CONN_ASSOCIATE_NEXT:
BUG_ON(!rdev->ops->assoc);
wdev->conn->state = CFG80211_CONN_ASSOCIATING;
@@ -188,10 +192,17 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
WLAN_REASON_DEAUTH_LEAVING,
false);
return err;
+ case CFG80211_CONN_ASSOC_FAILED:
+ cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
+ NULL, 0,
+ WLAN_REASON_DEAUTH_LEAVING, false);
+ return -ENOTCONN;
case CFG80211_CONN_DEAUTH:
cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
NULL, 0,
WLAN_REASON_DEAUTH_LEAVING, false);
+ /* free directly, disconnected event already sent */
+ cfg80211_sme_free(wdev);
return 0;
default:
return 0;
@@ -371,7 +382,7 @@ bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status)
return true;
}
- wdev->conn->state = CFG80211_CONN_DEAUTH;
+ wdev->conn->state = CFG80211_CONN_ASSOC_FAILED;
schedule_work(&rdev->conn_work);
return false;
}
@@ -383,7 +394,13 @@ void cfg80211_sme_deauth(struct wireless_dev *wdev)
void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
{
- cfg80211_sme_free(wdev);
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+ if (!wdev->conn)
+ return;
+
+ wdev->conn->state = CFG80211_CONN_AUTH_FAILED;
+ schedule_work(&rdev->conn_work);
}
void cfg80211_sme_disassoc(struct wireless_dev *wdev)
@@ -399,7 +416,13 @@ void cfg80211_sme_disassoc(struct wireless_dev *wdev)
void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev)
{
- cfg80211_sme_disassoc(wdev);
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+ if (!wdev->conn)
+ return;
+
+ wdev->conn->state = CFG80211_CONN_ASSOC_FAILED;
+ schedule_work(&rdev->conn_work);
}
static int cfg80211_sme_connect(struct wireless_dev *wdev,
@@ -953,21 +976,19 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
struct net_device *dev, u16 reason, bool wextev)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err;
+ int err = 0;
ASSERT_WDEV_LOCK(wdev);
kfree(wdev->connect_keys);
wdev->connect_keys = NULL;
- if (wdev->conn) {
+ if (wdev->conn)
err = cfg80211_sme_disconnect(wdev, reason);
- } else if (!rdev->ops->disconnect) {
+ else if (!rdev->ops->disconnect)
cfg80211_mlme_down(rdev, dev);
- err = 0;
- } else {
+ else if (wdev->current_bss)
err = rdev_disconnect(rdev, dev, reason);
- }
return err;
}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 3f7682a387b7..eefbd10e408f 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1998,12 +1998,11 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
*
* Create or update the port list entry
*/
-static int smk_ipv6_port_check(struct sock *sk, struct sockaddr *address,
+static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
int act)
{
__be16 *bep;
__be32 *be32p;
- struct sockaddr_in6 *addr6;
struct smk_port_label *spp;
struct socket_smack *ssp = sk->sk_security;
struct smack_known *skp;
@@ -2025,10 +2024,9 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr *address,
/*
* Get the IP address and port from the address.
*/
- addr6 = (struct sockaddr_in6 *)address;
- port = ntohs(addr6->sin6_port);
- bep = (__be16 *)(&addr6->sin6_addr);
- be32p = (__be32 *)(&addr6->sin6_addr);
+ port = ntohs(address->sin6_port);
+ bep = (__be16 *)(&address->sin6_addr);
+ be32p = (__be32 *)(&address->sin6_addr);
/*
* It's remote, so port lookup does no good.
@@ -2060,9 +2058,9 @@ auditout:
ad.a.u.net->family = sk->sk_family;
ad.a.u.net->dport = port;
if (act == SMK_RECEIVING)
- ad.a.u.net->v6info.saddr = addr6->sin6_addr;
+ ad.a.u.net->v6info.saddr = address->sin6_addr;
else
- ad.a.u.net->v6info.daddr = addr6->sin6_addr;
+ ad.a.u.net->v6info.daddr = address->sin6_addr;
#endif
return smk_access(skp, object, MAY_WRITE, &ad);
}
@@ -2201,7 +2199,8 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
case PF_INET6:
if (addrlen < sizeof(struct sockaddr_in6))
return -EINVAL;
- rc = smk_ipv6_port_check(sock->sk, sap, SMK_CONNECTING);
+ rc = smk_ipv6_port_check(sock->sk, (struct sockaddr_in6 *)sap,
+ SMK_CONNECTING);
break;
}
return rc;
@@ -3034,7 +3033,7 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
int size)
{
struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name;
- struct sockaddr *sap = (struct sockaddr *) msg->msg_name;
+ struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name;
int rc = 0;
/*
@@ -3121,9 +3120,8 @@ static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap,
return smack_net_ambient;
}
-static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr *sap)
+static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
{
- struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
u8 nexthdr;
int offset;
int proto = -EINVAL;
@@ -3181,7 +3179,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
struct netlbl_lsm_secattr secattr;
struct socket_smack *ssp = sk->sk_security;
struct smack_known *skp;
- struct sockaddr sadd;
+ struct sockaddr_in6 sadd;
int rc = 0;
struct smk_audit_info ad;
#ifdef CONFIG_AUDIT
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 99db892d7299..98969541cbcc 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -743,7 +743,7 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
mutex_lock(&stream->device->lock);
switch (_IOC_NR(cmd)) {
case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
- put_user(SNDRV_COMPRESS_VERSION,
+ retval = put_user(SNDRV_COMPRESS_VERSION,
(int __user *)arg) ? -EFAULT : 0;
break;
case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 7c11d46b84d3..48a9d004d6d9 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -860,7 +860,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
}
}
if (id < 0 && quirk) {
- for (q = quirk; q->subvendor; q++) {
+ for (q = quirk; q->subvendor || q->subdevice; q++) {
unsigned int vendorid =
q->subdevice | (q->subvendor << 16);
unsigned int mask = 0xffff0000 | q->subdevice_mask;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 8e77cbbad871..e3c7ba8d7582 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -522,7 +522,7 @@ static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1,
}
#define nid_has_mute(codec, nid, dir) \
- check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE)
+ check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE))
#define nid_has_volume(codec, nid, dir) \
check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS)
@@ -624,7 +624,7 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
if (enable)
val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT;
}
- if (caps & AC_AMPCAP_MUTE) {
+ if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
if (!enable)
val |= HDA_AMP_MUTE;
}
@@ -648,7 +648,7 @@ static unsigned int get_amp_mask_to_modify(struct hda_codec *codec,
{
unsigned int mask = 0xff;
- if (caps & AC_AMPCAP_MUTE) {
+ if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL))
mask &= ~0x80;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8bd226149868..f303cd898515 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1031,6 +1031,7 @@ enum {
ALC880_FIXUP_GPIO2,
ALC880_FIXUP_MEDION_RIM,
ALC880_FIXUP_LG,
+ ALC880_FIXUP_LG_LW25,
ALC880_FIXUP_W810,
ALC880_FIXUP_EAPD_COEF,
ALC880_FIXUP_TCL_S700,
@@ -1089,6 +1090,14 @@ static const struct hda_fixup alc880_fixups[] = {
{ }
}
},
+ [ALC880_FIXUP_LG_LW25] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x0181344f }, /* line-in */
+ { 0x1b, 0x0321403f }, /* headphone */
+ { }
+ }
+ },
[ALC880_FIXUP_W810] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -1341,6 +1350,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG),
SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG),
SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG),
+ SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25),
SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700),
/* Below is the copied entries from alc880_quirks.c.
@@ -4329,6 +4339,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 92b9b4324372..6d1924c19abf 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2819,6 +2819,7 @@ static const struct hda_pintbl ecs202_pin_configs[] = {
/* codec SSIDs for Intel Mac sharing the same PCI SSID 8384:7680 */
static const struct snd_pci_quirk stac922x_intel_mac_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x0000, 0x0100, "Mac Mini", STAC_INTEL_MAC_V3),
SND_PCI_QUIRK(0x106b, 0x0800, "Mac", STAC_INTEL_MAC_V1),
SND_PCI_QUIRK(0x106b, 0x0600, "Mac", STAC_INTEL_MAC_V2),
SND_PCI_QUIRK(0x106b, 0x0700, "Mac", STAC_INTEL_MAC_V2),
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
index d6f7694fcad4..c8a2de103c5f 100644
--- a/sound/soc/au1x/ac97c.c
+++ b/sound/soc/au1x/ac97c.c
@@ -341,7 +341,7 @@ static struct platform_driver au1xac97c_driver = {
.remove = au1xac97c_drvremove,
};
-module_platform_driver(&au1xac97c_driver);
+module_platform_driver(au1xac97c_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Au1000/1500/1100 AC97C ASoC driver");
diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
index efb1daecd0dd..e82eb373a731 100644
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -294,11 +294,12 @@ static int asoc_bfin_ac97_probe(struct platform_device *pdev)
/* Request PB3 as reset pin */
ret = devm_gpio_request_one(&pdev->dev,
CONFIG_SND_BF5XX_RESET_GPIO_NUM,
- GPIOF_OUT_INIT_HIGH, "SND_AD198x RESET") {
+ GPIOF_OUT_INIT_HIGH, "SND_AD198x RESET");
+ if (ret) {
dev_err(&pdev->dev,
"Failed to request GPIO_%d for reset: %d\n",
CONFIG_SND_BF5XX_RESET_GPIO_NUM, ret);
- goto gpio_err;
+ return ret;
}
#endif
diff --git a/sound/soc/blackfin/bf5xx-ac97.h b/sound/soc/blackfin/bf5xx-ac97.h
index 15c635e33f4d..0c3e22d90a8d 100644
--- a/sound/soc/blackfin/bf5xx-ac97.h
+++ b/sound/soc/blackfin/bf5xx-ac97.h
@@ -9,7 +9,6 @@
#ifndef _BF5XX_AC97_H
#define _BF5XX_AC97_H
-extern struct snd_ac97_bus_ops bf5xx_ac97_ops;
extern struct snd_ac97 *ac97;
/* Frame format in memory, only support stereo currently */
struct ac97_frame {
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 987f728718c5..be2ba1b6fe4a 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -195,6 +195,8 @@ static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0);
+static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0);
+
static const unsigned int limiter_tlv[] = {
TLV_DB_RANGE_HEAD(2),
0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
@@ -451,7 +453,8 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
SOC_ENUM("Beep Pitch", beep_pitch_enum),
SOC_ENUM("Beep on Time", beep_ontime_enum),
SOC_ENUM("Beep off Time", beep_offtime_enum),
- SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv),
+ SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL,
+ 0, 0x07, 0x1f, beep_tlv),
SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1),
SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum),
SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum),
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 6c8a9e7bee25..760e8bfeacaa 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -153,6 +153,8 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
static int power_vag_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
+
switch (event) {
case SND_SOC_DAPM_POST_PMU:
snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
@@ -160,9 +162,17 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
break;
case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
- SGTL5000_VAG_POWERUP, 0);
- msleep(400);
+ /*
+ * Don't clear VAG_POWERUP, when both DAC and ADC are
+ * operational to prevent inadvertently starving the
+ * other one of them.
+ */
+ if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) &
+ mask) != mask) {
+ snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_VAG_POWERUP, 0);
+ msleep(400);
+ }
break;
default:
break;
@@ -388,7 +398,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0),
SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)",
SGTL5000_CHIP_ANA_ADC_CTRL,
- 8, 2, 0, capture_6db_attenuate),
+ 8, 1, 0, capture_6db_attenuate),
SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0),
SOC_DOUBLE_TLV("Headphone Playback Volume",
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index f5e835662cdc..10adc4145d46 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -410,6 +410,16 @@ static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec)
rec->command, rec->length);
len = rec->length + 8;
+ xfer = kzalloc(sizeof(*xfer), GFP_KERNEL);
+ if (!xfer) {
+ dev_err(codec->dev, "Failed to allocate xfer\n");
+ ret = -ENOMEM;
+ goto abort;
+ }
+
+ xfer->codec = codec;
+ list_add_tail(&xfer->list, &xfer_list);
+
out = kzalloc(len, GFP_KERNEL);
if (!out) {
dev_err(codec->dev,
@@ -417,6 +427,7 @@ static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec)
ret = -ENOMEM;
goto abort1;
}
+ xfer->t.rx_buf = out;
img = kzalloc(len, GFP_KERNEL);
if (!img) {
@@ -425,24 +436,13 @@ static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec)
ret = -ENOMEM;
goto abort1;
}
+ xfer->t.tx_buf = img;
byte_swap_64((u64 *)&rec->command, img, len);
- xfer = kzalloc(sizeof(*xfer), GFP_KERNEL);
- if (!xfer) {
- dev_err(codec->dev, "Failed to allocate xfer\n");
- ret = -ENOMEM;
- goto abort1;
- }
-
- xfer->codec = codec;
- list_add_tail(&xfer->list, &xfer_list);
-
spi_message_init(&xfer->m);
xfer->m.complete = wm0010_boot_xfer_complete;
xfer->m.context = xfer;
- xfer->t.tx_buf = img;
- xfer->t.rx_buf = out;
xfer->t.len = len;
xfer->t.bits_per_word = 8;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index b94190820e8c..4375c9f2b791 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -679,13 +679,14 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
return -EINVAL;
}
- path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
- list_sink);
- if (!path) {
+ if (list_empty(&w->sources)) {
dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name);
return -EINVAL;
}
+ path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
+ list_sink);
+
ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path);
if (ret < 0)
return ret;
@@ -2733,7 +2734,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
}
mutex_unlock(&card->dapm_mutex);
- return 0;
+ return change;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_put_volsw);
@@ -2861,7 +2862,6 @@ int snd_soc_dapm_put_enum_virt(struct snd_kcontrol *kcontrol,
struct soc_enum *e =
(struct soc_enum *)kcontrol->private_value;
int change;
- int ret = 0;
int wi;
if (ucontrol->value.enumerated.item[0] >= e->max)
@@ -2881,7 +2881,7 @@ int snd_soc_dapm_put_enum_virt(struct snd_kcontrol *kcontrol,
}
mutex_unlock(&card->dapm_mutex);
- return ret;
+ return change;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_virt);
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index d04146cad61f..47565fd04505 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -228,7 +228,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
reg = TEGRA30_I2S_CIF_RX_CTRL;
} else {
val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
- reg = TEGRA30_I2S_CIF_RX_CTRL;
+ reg = TEGRA30_I2S_CIF_TX_CTRL;
}
regmap_write(i2s->regmap, reg, val);
diff --git a/sound/usb/6fire/comm.c b/sound/usb/6fire/comm.c
index 9e6e3ffd86bb..23452ee617e1 100644
--- a/sound/usb/6fire/comm.c
+++ b/sound/usb/6fire/comm.c
@@ -110,19 +110,37 @@ static int usb6fire_comm_send_buffer(u8 *buffer, struct usb_device *dev)
static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request,
u8 reg, u8 value)
{
- u8 buffer[13]; /* 13: maximum length of message */
+ u8 *buffer;
+ int ret;
+
+ /* 13: maximum length of message */
+ buffer = kmalloc(13, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00);
- return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+ ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+
+ kfree(buffer);
+ return ret;
}
static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request,
u8 reg, u8 vl, u8 vh)
{
- u8 buffer[13]; /* 13: maximum length of message */
+ u8 *buffer;
+ int ret;
+
+ /* 13: maximum length of message */
+ buffer = kmalloc(13, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh);
- return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+ ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+
+ kfree(buffer);
+ return ret;
}
int usb6fire_comm_init(struct sfire_chip *chip)
@@ -135,6 +153,12 @@ int usb6fire_comm_init(struct sfire_chip *chip)
if (!rt)
return -ENOMEM;
+ rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL);
+ if (!rt->receiver_buffer) {
+ kfree(rt);
+ return -ENOMEM;
+ }
+
urb = &rt->receiver;
rt->serial = 1;
rt->chip = chip;
@@ -153,6 +177,7 @@ int usb6fire_comm_init(struct sfire_chip *chip)
urb->interval = 1;
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret < 0) {
+ kfree(rt->receiver_buffer);
kfree(rt);
snd_printk(KERN_ERR PREFIX "cannot create comm data receiver.");
return ret;
@@ -171,6 +196,9 @@ void usb6fire_comm_abort(struct sfire_chip *chip)
void usb6fire_comm_destroy(struct sfire_chip *chip)
{
- kfree(chip->comm);
+ struct comm_runtime *rt = chip->comm;
+
+ kfree(rt->receiver_buffer);
+ kfree(rt);
chip->comm = NULL;
}
diff --git a/sound/usb/6fire/comm.h b/sound/usb/6fire/comm.h
index 6a0840b0dcff..780d5ed8e5d8 100644
--- a/sound/usb/6fire/comm.h
+++ b/sound/usb/6fire/comm.h
@@ -24,7 +24,7 @@ struct comm_runtime {
struct sfire_chip *chip;
struct urb receiver;
- u8 receiver_buffer[COMM_RECEIVER_BUFSIZE];
+ u8 *receiver_buffer;
u8 serial; /* urb serial */
diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c
index 26722423330d..f3dd7266c391 100644
--- a/sound/usb/6fire/midi.c
+++ b/sound/usb/6fire/midi.c
@@ -19,6 +19,10 @@
#include "chip.h"
#include "comm.h"
+enum {
+ MIDI_BUFSIZE = 64
+};
+
static void usb6fire_midi_out_handler(struct urb *urb)
{
struct midi_runtime *rt = urb->context;
@@ -156,6 +160,12 @@ int usb6fire_midi_init(struct sfire_chip *chip)
if (!rt)
return -ENOMEM;
+ rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL);
+ if (!rt->out_buffer) {
+ kfree(rt);
+ return -ENOMEM;
+ }
+
rt->chip = chip;
rt->in_received = usb6fire_midi_in_received;
rt->out_buffer[0] = 0x80; /* 'send midi' command */
@@ -169,6 +179,7 @@ int usb6fire_midi_init(struct sfire_chip *chip)
ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance);
if (ret < 0) {
+ kfree(rt->out_buffer);
kfree(rt);
snd_printk(KERN_ERR PREFIX "unable to create midi.\n");
return ret;
@@ -197,6 +208,9 @@ void usb6fire_midi_abort(struct sfire_chip *chip)
void usb6fire_midi_destroy(struct sfire_chip *chip)
{
- kfree(chip->midi);
+ struct midi_runtime *rt = chip->midi;
+
+ kfree(rt->out_buffer);
+ kfree(rt);
chip->midi = NULL;
}
diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h
index c321006e5430..84851b9f5559 100644
--- a/sound/usb/6fire/midi.h
+++ b/sound/usb/6fire/midi.h
@@ -16,10 +16,6 @@
#include "common.h"
-enum {
- MIDI_BUFSIZE = 64
-};
-
struct midi_runtime {
struct sfire_chip *chip;
struct snd_rawmidi *instance;
@@ -32,7 +28,7 @@ struct midi_runtime {
struct snd_rawmidi_substream *out;
struct urb out_urb;
u8 out_serial; /* serial number of out packet */
- u8 out_buffer[MIDI_BUFSIZE];
+ u8 *out_buffer;
int buffer_offset;
void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index 3d2551cc10f2..b5eb97fdc842 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -582,6 +582,33 @@ static void usb6fire_pcm_init_urb(struct pcm_urb *urb,
urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
}
+static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt)
+{
+ int i;
+
+ for (i = 0; i < PCM_N_URBS; i++) {
+ rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
+ * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
+ if (!rt->out_urbs[i].buffer)
+ return -ENOMEM;
+ rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
+ * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
+ if (!rt->in_urbs[i].buffer)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt)
+{
+ int i;
+
+ for (i = 0; i < PCM_N_URBS; i++) {
+ kfree(rt->out_urbs[i].buffer);
+ kfree(rt->in_urbs[i].buffer);
+ }
+}
+
int usb6fire_pcm_init(struct sfire_chip *chip)
{
int i;
@@ -593,6 +620,13 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
if (!rt)
return -ENOMEM;
+ ret = usb6fire_pcm_buffers_init(rt);
+ if (ret) {
+ usb6fire_pcm_buffers_destroy(rt);
+ kfree(rt);
+ return ret;
+ }
+
rt->chip = chip;
rt->stream_state = STREAM_DISABLED;
rt->rate = ARRAY_SIZE(rates);
@@ -614,6 +648,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm);
if (ret < 0) {
+ usb6fire_pcm_buffers_destroy(rt);
kfree(rt);
snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n");
return ret;
@@ -625,6 +660,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops);
if (ret) {
+ usb6fire_pcm_buffers_destroy(rt);
kfree(rt);
snd_printk(KERN_ERR PREFIX
"error preallocating pcm buffers.\n");
@@ -669,6 +705,9 @@ void usb6fire_pcm_abort(struct sfire_chip *chip)
void usb6fire_pcm_destroy(struct sfire_chip *chip)
{
- kfree(chip->pcm);
+ struct pcm_runtime *rt = chip->pcm;
+
+ usb6fire_pcm_buffers_destroy(rt);
+ kfree(rt);
chip->pcm = NULL;
}
diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h
index 9b01133ee3fe..f5779d6182c6 100644
--- a/sound/usb/6fire/pcm.h
+++ b/sound/usb/6fire/pcm.h
@@ -32,7 +32,7 @@ struct pcm_urb {
struct urb instance;
struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB];
/* END DO NOT SEPARATE */
- u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE];
+ u8 *buffer;
struct pcm_urb *peer;
};
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 7a444b5501d9..659950e5b94f 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -591,17 +591,16 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
ep->stride = frame_bits >> 3;
ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
- /* calculate max. frequency */
- if (ep->maxpacksize) {
+ /* assume max. frequency is 25% higher than nominal */
+ ep->freqmax = ep->freqn + (ep->freqn >> 2);
+ maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
+ >> (16 - ep->datainterval);
+ /* but wMaxPacketSize might reduce this */
+ if (ep->maxpacksize && ep->maxpacksize < maxsize) {
/* whatever fits into a max. size packet */
maxsize = ep->maxpacksize;
ep->freqmax = (maxsize / (frame_bits >> 3))
<< (16 - ep->datainterval);
- } else {
- /* no max. packet size: just take 25% higher than nominal */
- ep->freqmax = ep->freqn + (ep->freqn >> 2);
- maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
- >> (16 - ep->datainterval);
}
if (ep->fill_max)
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index d5438083fd6a..95558ef4a7a0 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -888,6 +888,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
+ case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
case USB_ID(0x046d, 0x0991):
/* Most audio usb devices lie about volume resolution.
* Most Logitech webcams have res = 384.
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 1bc45e71f1fe..0df9ede99dfd 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -319,19 +319,19 @@ static int create_auto_midi_quirk(struct snd_usb_audio *chip,
if (altsd->bNumEndpoints < 1)
return -ENODEV;
epd = get_endpoint(alts, 0);
- if (!usb_endpoint_xfer_bulk(epd) ||
+ if (!usb_endpoint_xfer_bulk(epd) &&
!usb_endpoint_xfer_int(epd))
return -ENODEV;
switch (USB_ID_VENDOR(chip->usb_id)) {
case 0x0499: /* Yamaha */
err = create_yamaha_midi_quirk(chip, iface, driver, alts);
- if (err < 0 && err != -ENODEV)
+ if (err != -ENODEV)
return err;
break;
case 0x0582: /* Roland */
err = create_roland_midi_quirk(chip, iface, driver, alts);
- if (err < 0 && err != -ENODEV)
+ if (err != -ENODEV)
return err;
break;
}