aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-24 12:42:54 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-24 12:42:54 -0700
commitf2fde3a65e88330017b816faf2ef75f141d21375 (patch)
tree57152ab5756e7ed1c58742e7e16f13a45ff11f21 /drivers
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff)
parentMerge branch 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6 into drm-core-next (diff)
downloadlinux-dev-f2fde3a65e88330017b816faf2ef75f141d21375.tar.xz
linux-dev-f2fde3a65e88330017b816faf2ef75f141d21375.zip
Merge branch 'drm-core-next' of git://people.freedesktop.org/~airlied/linux
Pull main drm updates from Dave Airlie: "This is the main merge window request for the drm. It's big, but jam packed will lots of features and of course 0 regressions. (okay maybe there'll be one). Highlights: - new KMS drivers for server GPU chipsets: ast, mgag200 and cirrus (qemu only). These drivers use the generic modesetting drivers. - initial prime/dma-buf support for i915, nouveau, radeon, udl and exynos - switcheroo audio support: so GPUs with HDMI can turn off the sound driver without crashing stuff. - There are some patches drifting outside drivers/gpu into x86 and EFI for better handling of multiple video adapters in Apple Macs, they've got correct acks except one trivial fixup. - Core: edid parser has better DMT and reduced blanking support, crtc properties, plane properties, - Drivers: exynos: add 2D core accel support, prime support, hdmi features intel: more Haswell support, initial Valleyview support, more hdmi infoframe fixes, update MAINTAINERS for Daniel, lots of cleanups and fixes radeon: more HDMI audio support, improved GPU lockup recovery support, remove nested mutexes, less memory copying on PCIE, fix bus master enable race (kexec), improved fence handling gma500: cleanups, 1080p support, acpi fixes nouveau: better nva3 memory reclocking, kepler accel (needs external firmware rip), async buffer moves on nv84+ hw. I've some more dma-buf patches that rely on the dma-buf merge for vmap stuff, and I've a few fixes building up, but I'd decided I'd better get rid of the main pull sooner rather than later, so the audio guys are also unblocked." Fix up trivial conflict due to some duplicated changes in drivers/gpu/drm/i915/intel_ringbuffer.c * 'drm-core-next' of git://people.freedesktop.org/~airlied/linux: (605 commits) drm/nouveau/nvd9: Fix GPIO initialisation sequence. drm/nouveau: Unregister switcheroo client on exit drm/nouveau: Check dsm on switcheroo unregister drm/nouveau: fix a minor annoyance in an output string drm/nouveau: turn a BUG into a WARN drm/nv50: decode PGRAPH DATA_ERROR = 0x24 drm/nouveau/disp: fix dithering not being enabled on some eDP macbooks drm/nvd9/copy: initialise copy engine, seems to work like nvc0 drm/nvc0/ttm: use copy engines for async buffer moves drm/nva3/ttm: use copy engine for async buffer moves drm/nv98/ttm: add in a (disabled) crypto engine buffer copy method drm/nv84/ttm: use crypto engine for async buffer copies drm/nouveau/ttm: untangle code to support accelerated buffer moves drm/nouveau/fbcon: use fence for sync, rather than notifier drm/nv98/crypt: non-stub implementation of the engine hooks drm/nouveau/fifo: turn all fifo modules into engine modules drm/nv50/graph: remove ability to do interrupt-driven context switching drm/nv50: remove manual context unload on context destruction drm/nv50: remove execution engine context saves on suspend drm/nv50/fifo: use hardware channel kickoff functionality ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/agp/generic.c4
-rw-r--r--drivers/char/agp/intel-agp.c5
-rw-r--r--drivers/char/agp/intel-agp.h14
-rw-r--r--drivers/char/agp/intel-gtt.c45
-rw-r--r--drivers/char/agp/sgi-agp.c1
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/ast/Kconfig16
-rw-r--r--drivers/gpu/drm/ast/Makefile9
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h144
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c244
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h356
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c341
-rw-r--r--drivers/gpu/drm/ast/ast_main.c527
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1160
-rw-r--r--drivers/gpu/drm/ast/ast_post.c1780
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h265
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c453
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig12
-rw-r--r--drivers/gpu/drm/cirrus/Makefile5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c108
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h246
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c307
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c335
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c629
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c453
-rw-r--r--drivers/gpu/drm/drm_cache.c23
-rw-r--r--drivers/gpu/drm/drm_context.c9
-rw-r--r--drivers/gpu/drm/drm_crtc.c583
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c35
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c222
-rw-r--r--drivers/gpu/drm/drm_edid_load.c12
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h292
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c35
-rw-r--r--drivers/gpu/drm/drm_ioctl.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c23
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c48
-rw-r--r--drivers/gpu/drm/drm_stub.c7
-rw-r--r--drivers/gpu/drm/drm_sysfs.c10
-rw-r--r--drivers/gpu/drm/drm_vm.c18
-rw-r--r--drivers/gpu/drm/exynos/Kconfig12
-rw-r--r--drivers/gpu/drm/exynos/Makefile2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c272
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c937
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.h36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c77
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c429
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c401
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h6
-rw-r--r--drivers/gpu/drm/gma500/Makefile5
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c231
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c30
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c697
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c7
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c76
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c57
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/gma500/gtt.c32
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c274
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h161
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c452
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c1
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c24
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c330
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c295
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h25
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c134
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c138
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c66
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/opregion.c344
-rw-r--r--drivers/gpu/drm/gma500/opregion.h (renamed from drivers/gpu/drm/gma500/intel_opregion.c)64
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c66
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h208
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c348
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h35
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c9
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c36
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c14
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c383
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1160
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c277
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h245
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1916
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c171
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c200
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c96
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c202
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c18
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1732
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h488
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c18
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c111
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c2
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c45
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c76
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c755
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4338
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c46
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h105
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c310
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c384
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c11
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c3
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c71
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c209
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c29
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3796
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c725
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h23
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c107
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c102
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c16
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig15
-rw-r--r--drivers/gpu/drm/mgag200/Makefile5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c116
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h276
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c294
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c156
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c388
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c1533
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h661
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c452
-rw-r--r--drivers/gpu/drm/nouveau/Makefile11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c385
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c88
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h176
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c578
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fifo.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c215
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c163
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_software.h69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c270
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c48
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c140
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c419
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c39
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv04_software.c147
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c214
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c278
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fifo.c177
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv31_mpeg.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c351
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c37
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c102
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c75
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c59
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c596
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c229
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c33
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c214
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c177
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fifo.c241
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.c166
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.fuc698
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.fuc.h584
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c31
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c290
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c184
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c310
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_software.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c16
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c423
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.c831
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.h89
-rw-r--r--drivers/gpu/drm/nouveau/nve0_grctx.c2777
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c27
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c155
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c216
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h220
-rw-r--r--drivers/gpu/drm/radeon/ni.c41
-rw-r--r--drivers/gpu/drm/radeon/r100.c121
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c34
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r520.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c191
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c215
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c101
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c22
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c464
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h45
-rw-r--r--drivers/gpu/drm/radeon/r600d.h233
-rw-r--r--drivers/gpu/drm/radeon/radeon.h235
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h27
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c143
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c113
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c621
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h30
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c176
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c396
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c325
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c187
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c67
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c48
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h14
-rw-r--r--drivers/gpu/drm/radeon/rs690.c7
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv770.c12
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h191
-rw-r--r--drivers/gpu/drm/radeon/si.c30
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c8
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h3
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c9
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c75
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/vga/Kconfig1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c284
-rw-r--r--drivers/gpu/vga/vgaarb.c9
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c7
-rw-r--r--drivers/staging/omapdrm/omap_drv.c4
-rw-r--r--drivers/video/efifb.c79
302 files changed, 41423 insertions, 14710 deletions
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 17e05d1076b3..a0df182f6f7d 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -958,7 +958,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
if (set_memory_uc((unsigned long)table, 1 << page_order))
printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
- bridge->gatt_table = (void *)table;
+ bridge->gatt_table = (u32 __iomem *)table;
#else
bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
(PAGE_SIZE * (1 << page_order)));
@@ -1010,7 +1010,6 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
case LVL2_APER_SIZE:
/* The generic routines can't deal with 2 level gatt's */
return -EINVAL;
- break;
default:
page_order = 0;
break;
@@ -1077,7 +1076,6 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
case LVL2_APER_SIZE:
/* The generic routines can't deal with 2 level gatt's */
return -EINVAL;
- break;
default:
num_entries = 0;
break;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 962e75dc4781..764f70c5e690 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -907,6 +907,11 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
+ ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB),
+ ID(PCI_DEVICE_ID_INTEL_HASWELL_HB),
+ ID(PCI_DEVICE_ID_INTEL_HASWELL_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_HASWELL_S_HB),
+ ID(PCI_DEVICE_ID_INTEL_HASWELL_E_HB),
{ }
};
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 7ea18a5fe71c..c0091753a0d1 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -96,6 +96,7 @@
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
+#define GFX_FLSH_CNTL_VLV 0x101008
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
@@ -235,6 +236,19 @@
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
+#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
+#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
+#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
+#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */
+#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
int intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 7f025fb620de..1237e7575c3f 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1179,6 +1179,20 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
writel(addr | pte_flags, intel_private.gtt + entry);
}
+static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ u32 pte_flags;
+
+ pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
+
+ /* gen6 has bit11-4 for physical addr bit39-32 */
+ addr |= (addr >> 28) & 0xff0;
+ writel(addr | pte_flags, intel_private.gtt + entry);
+
+ writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
+}
+
static void gen6_cleanup(void)
{
}
@@ -1205,12 +1219,16 @@ static inline int needs_idle_maps(void)
static int i9xx_setup(void)
{
u32 reg_addr;
+ int size = KB(512);
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
reg_addr &= 0xfff80000;
- intel_private.registers = ioremap(reg_addr, 128 * 4096);
+ if (INTEL_GTT_GEN >= 7)
+ size = MB(2);
+
+ intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
@@ -1354,6 +1372,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
+static const struct intel_gtt_driver valleyview_gtt_driver = {
+ .gen = 7,
+ .setup = i9xx_setup,
+ .cleanup = gen6_cleanup,
+ .write_entry = valleyview_write_entry,
+ .dma_mask_size = 40,
+ .check_flags = gen6_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1460,6 +1487,22 @@ static const struct intel_gtt_driver_description {
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
+ "ValleyView", &valleyview_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV,
+ "Haswell", &sandybridge_gtt_driver },
{ 0, NULL, NULL }
};
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index ffa888cd1c88..192000377737 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -158,7 +158,6 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
break;
case LVL2_APER_SIZE:
return -EINVAL;
- break;
default:
num_entries = 0;
break;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e354bc0b052a..23120c00a881 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -186,3 +186,9 @@ source "drivers/gpu/drm/vmwgfx/Kconfig"
source "drivers/gpu/drm/gma500/Kconfig"
source "drivers/gpu/drm/udl/Kconfig"
+
+source "drivers/gpu/drm/ast/Kconfig"
+
+source "drivers/gpu/drm/mgag200/Kconfig"
+
+source "drivers/gpu/drm/cirrus/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index c20da5bda355..f65f65ed0ddf 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -34,6 +34,8 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
obj-$(CONFIG_DRM_I915) += i915/
+obj-$(CONFIG_DRM_MGAG200) += mgag200/
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
@@ -42,4 +44,5 @@ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
+obj-$(CONFIG_DRM_AST) += ast/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
new file mode 100644
index 000000000000..a277b1257888
--- /dev/null
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -0,0 +1,16 @@
+config DRM_AST
+ tristate "AST server chips"
+ depends on DRM && PCI && EXPERIMENTAL
+ select DRM_TTM
+ select FB_SYS_COPYAREA
+ select FB_SYS_FILLRECT
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ Say yes for experimental AST GPU driver. Do not enable
+ this driver without having a working -modesetting,
+ and a version of AST that knows to fail if KMS
+ is bound to the driver. These GPUs are commonly found
+ in server chipsets.
+
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
new file mode 100644
index 000000000000..8df4f284ee24
--- /dev/null
+++ b/drivers/gpu/drm/ast/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o
+
+obj-$(CONFIG_DRM_AST) := ast.o \ No newline at end of file
diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h
new file mode 100644
index 000000000000..cc04539c0ff3
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_dram_tables.h
@@ -0,0 +1,144 @@
+#ifndef AST_DRAM_TABLES_H
+#define AST_DRAM_TABLES_H
+
+/* DRAM timing tables */
+struct ast_dramstruct {
+ u16 index;
+ u32 data;
+};
+
+static const struct ast_dramstruct ast2000_dram_table_data[] = {
+ { 0x0108, 0x00000000 },
+ { 0x0120, 0x00004a21 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xFFFFFFFF },
+ { 0x0004, 0x00000089 },
+ { 0x0008, 0x22331353 },
+ { 0x000C, 0x0d07000b },
+ { 0x0010, 0x11113333 },
+ { 0x0020, 0x00110350 },
+ { 0x0028, 0x1e0828f0 },
+ { 0x0024, 0x00000001 },
+ { 0x001C, 0x00000000 },
+ { 0x0014, 0x00000003 },
+ { 0xFF00, 0x00000043 },
+ { 0x0018, 0x00000131 },
+ { 0x0014, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x0018, 0x00000031 },
+ { 0x0014, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x0028, 0x1e0828f1 },
+ { 0x0024, 0x00000003 },
+ { 0x002C, 0x1f0f28fb },
+ { 0x0030, 0xFFFFFE01 },
+ { 0xFFFF, 0xFFFFFFFF }
+};
+
+static const struct ast_dramstruct ast1100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x000041f0 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00050000 },
+ { 0x0004, 0x00000585 },
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x22201724 },
+ { 0x0018, 0x1e29011a },
+ { 0x0020, 0x00c82222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x032aa02a },
+ { 0x0064, 0x002d3000 },
+ { 0x0068, 0x00000000 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x002C, 0x00000732 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000632 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00004c41 },
+ { 0xffff, 0xffffffff },
+};
+
+static const struct ast_dramstruct ast2100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x00004120 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00070000 },
+ { 0x0004, 0x00000489 },
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x32302926 },
+ { 0x0018, 0x274c0122 },
+ { 0x0020, 0x00ce2222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x0f2aa02a },
+ { 0x0064, 0x003f3005 },
+ { 0x0068, 0x02020202 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x002C, 0x00000942 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000842 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00005061 },
+ { 0xffff, 0xffffffff },
+};
+
+#endif
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
new file mode 100644
index 000000000000..d0c4574ef49c
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "ast_drv.h"
+
+int ast_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, ast_modeset, int, 0400);
+
+#define PCI_VENDOR_ASPEED 0x1a03
+
+static struct drm_driver driver;
+
+#define AST_VGA_DEVICE(id, info) { \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, \
+ .class_mask = 0xff0000, \
+ .vendor = PCI_VENDOR_ASPEED, \
+ .device = id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .driver_data = (unsigned long) info }
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
+ AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
+ /* AST_VGA_DEVICE(PCI_CHIP_AST1180, NULL), - don't bind to 1180 for now */
+ {0, 0, 0},
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int __devinit
+ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void
+ast_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+
+
+static int ast_drm_freeze(struct drm_device *dev)
+{
+ drm_kms_helper_poll_disable(dev);
+
+ pci_save_state(dev->pdev);
+
+ console_lock();
+ ast_fbdev_set_suspend(dev, 1);
+ console_unlock();
+ return 0;
+}
+
+static int ast_drm_thaw(struct drm_device *dev)
+{
+ int error = 0;
+
+ ast_post_gpu(dev);
+
+ drm_mode_config_reset(dev);
+ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ console_lock();
+ ast_fbdev_set_suspend(dev, 0);
+ console_unlock();
+ return error;
+}
+
+static int ast_drm_resume(struct drm_device *dev)
+{
+ int ret;
+
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ ret = ast_drm_thaw(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+ return 0;
+}
+
+static int ast_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ int error;
+
+ error = ast_drm_freeze(ddev);
+ if (error)
+ return error;
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+static int ast_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ return ast_drm_resume(ddev);
+}
+
+static int ast_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ if (!ddev || !ddev->dev_private)
+ return -ENODEV;
+ return ast_drm_freeze(ddev);
+
+}
+
+static int ast_pm_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ return ast_drm_thaw(ddev);
+}
+
+static int ast_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ return ast_drm_freeze(ddev);
+}
+
+static const struct dev_pm_ops ast_pm_ops = {
+ .suspend = ast_pm_suspend,
+ .resume = ast_pm_resume,
+ .freeze = ast_pm_freeze,
+ .thaw = ast_pm_thaw,
+ .poweroff = ast_pm_poweroff,
+ .restore = ast_pm_resume,
+};
+
+static struct pci_driver ast_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = ast_pci_probe,
+ .remove = ast_pci_remove,
+ .driver.pm = &ast_pm_ops,
+};
+
+static const struct file_operations ast_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = ast_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM,
+ .dev_priv_size = 0,
+
+ .load = ast_driver_load,
+ .unload = ast_driver_unload,
+
+ .fops = &ast_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_init_object = ast_gem_init_object,
+ .gem_free_object = ast_gem_free_object,
+ .dumb_create = ast_dumb_create,
+ .dumb_map_offset = ast_dumb_mmap_offset,
+ .dumb_destroy = ast_dumb_destroy,
+
+};
+
+static int __init ast_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && ast_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (ast_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &ast_pci_driver);
+}
+static void __exit ast_exit(void)
+{
+ drm_pci_exit(&driver, &ast_pci_driver);
+}
+
+module_init(ast_init);
+module_exit(ast_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
new file mode 100644
index 000000000000..d4af9edcbb97
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#ifndef __AST_DRV_H__
+#define __AST_DRV_H__
+
+#include "drm_fb_helper.h"
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#define DRIVER_AUTHOR "Dave Airlie"
+
+#define DRIVER_NAME "ast"
+#define DRIVER_DESC "AST"
+#define DRIVER_DATE "20120228"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
+
+#define PCI_CHIP_AST2000 0x2000
+#define PCI_CHIP_AST2100 0x2010
+#define PCI_CHIP_AST1180 0x1180
+
+
+enum ast_chip {
+ AST2000,
+ AST2100,
+ AST1100,
+ AST2200,
+ AST2150,
+ AST2300,
+ AST1180,
+};
+
+#define AST_DRAM_512Mx16 0
+#define AST_DRAM_1Gx16 1
+#define AST_DRAM_512Mx32 2
+#define AST_DRAM_1Gx32 3
+#define AST_DRAM_2Gx16 6
+#define AST_DRAM_4Gx16 7
+
+struct ast_fbdev;
+
+struct ast_private {
+ struct drm_device *dev;
+
+ void __iomem *regs;
+ void __iomem *ioregs;
+
+ enum ast_chip chip;
+ bool vga2_clone;
+ uint32_t dram_bus_width;
+ uint32_t dram_type;
+ uint32_t mclk;
+ uint32_t vram_size;
+
+ struct ast_fbdev *fbdev;
+
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ atomic_t validate_sequence;
+ } ttm;
+
+ struct drm_gem_object *cursor_cache;
+ uint64_t cursor_cache_gpu_addr;
+ struct ttm_bo_kmap_obj cache_kmap;
+ int next_cursor;
+};
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags);
+int ast_driver_unload(struct drm_device *dev);
+
+struct ast_gem_object;
+
+#define AST_IO_AR_PORT_WRITE (0x40)
+#define AST_IO_MISC_PORT_WRITE (0x42)
+#define AST_IO_SEQ_PORT (0x44)
+#define AST_DAC_INDEX_READ (0x3c7)
+#define AST_IO_DAC_INDEX_WRITE (0x48)
+#define AST_IO_DAC_DATA (0x49)
+#define AST_IO_GR_PORT (0x4E)
+#define AST_IO_CRTC_PORT (0x54)
+#define AST_IO_INPUT_STATUS1_READ (0x5A)
+#define AST_IO_MISC_PORT_READ (0x4C)
+
+#define __ast_read(x) \
+static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->regs + reg); \
+return val;\
+}
+
+__ast_read(8);
+__ast_read(16);
+__ast_read(32)
+
+#define __ast_io_read(x) \
+static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->ioregs + reg); \
+return val;\
+}
+
+__ast_io_read(8);
+__ast_io_read(16);
+__ast_io_read(32);
+
+#define __ast_write(x) \
+static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+ iowrite##x(val, ast->regs + reg);\
+ }
+
+__ast_write(8);
+__ast_write(16);
+__ast_write(32);
+
+#define __ast_io_write(x) \
+static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+ iowrite##x(val, ast->ioregs + reg);\
+ }
+
+__ast_io_write(8);
+__ast_io_write(16);
+#undef __ast_io_write
+
+static inline void ast_set_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t val)
+{
+ ast_io_write16(ast, base, ((u16)val << 8) | index);
+}
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t mask, uint8_t val);
+uint8_t ast_get_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index);
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index, uint8_t mask);
+
+static inline void ast_open_key(struct ast_private *ast)
+{
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04);
+}
+
+#define AST_VIDMEM_SIZE_8M 0x00800000
+#define AST_VIDMEM_SIZE_16M 0x01000000
+#define AST_VIDMEM_SIZE_32M 0x02000000
+#define AST_VIDMEM_SIZE_64M 0x04000000
+#define AST_VIDMEM_SIZE_128M 0x08000000
+
+#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
+
+#define AST_MAX_HWC_WIDTH 64
+#define AST_MAX_HWC_HEIGHT 64
+
+#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2)
+#define AST_HWC_SIGNATURE_SIZE 32
+
+#define AST_DEFAULT_HWC_NUM 2
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_CHECKSUM 0x00
+#define AST_HWC_SIGNATURE_SizeX 0x04
+#define AST_HWC_SIGNATURE_SizeY 0x08
+#define AST_HWC_SIGNATURE_X 0x0C
+#define AST_HWC_SIGNATURE_Y 0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+
+
+struct ast_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ struct i2c_algo_bit_data bit;
+};
+
+struct ast_connector {
+ struct drm_connector base;
+ struct ast_i2c_chan *i2c;
+};
+
+struct ast_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ struct drm_gem_object *cursor_bo;
+ uint64_t cursor_addr;
+ int cursor_width, cursor_height;
+ u8 offset_x, offset_y;
+};
+
+struct ast_encoder {
+ struct drm_encoder base;
+};
+
+struct ast_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct ast_fbdev {
+ struct drm_fb_helper helper;
+ struct ast_framebuffer afb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+ struct ttm_bo_kmap_obj mapping;
+};
+
+#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
+#define to_ast_connector(x) container_of(x, struct ast_connector, base)
+#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
+#define to_ast_framebuffer(x) container_of(x, struct ast_framebuffer, base)
+
+struct ast_vbios_stdtable {
+ u8 misc;
+ u8 seq[4];
+ u8 crtc[25];
+ u8 ar[20];
+ u8 gr[9];
+};
+
+struct ast_vbios_enhtable {
+ u32 ht;
+ u32 hde;
+ u32 hfp;
+ u32 hsync;
+ u32 vt;
+ u32 vde;
+ u32 vfp;
+ u32 vsync;
+ u32 dclk_index;
+ u32 flags;
+ u32 refresh_rate;
+ u32 refresh_rate_index;
+ u32 mode_id;
+};
+
+struct ast_vbios_dclk_info {
+ u8 param1;
+ u8 param2;
+ u8 param3;
+};
+
+struct ast_vbios_mode_info {
+ struct ast_vbios_stdtable *std_table;
+ struct ast_vbios_enhtable *enh_table;
+};
+
+extern int ast_mode_init(struct drm_device *dev);
+extern void ast_mode_fini(struct drm_device *dev);
+
+int ast_framebuffer_init(struct drm_device *dev,
+ struct ast_framebuffer *ast_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+int ast_fbdev_init(struct drm_device *dev);
+void ast_fbdev_fini(struct drm_device *dev);
+void ast_fbdev_set_suspend(struct drm_device *dev, int state);
+
+struct ast_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem)
+
+static inline struct ast_bo *
+ast_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct ast_bo, bo);
+}
+
+
+#define to_ast_obj(x) container_of(x, struct ast_gem_object, base)
+
+#define AST_MM_ALIGN_SHIFT 4
+#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
+
+extern int ast_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+extern int ast_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+
+extern int ast_gem_init_object(struct drm_gem_object *obj);
+extern void ast_gem_free_object(struct drm_gem_object *obj);
+extern int ast_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+int ast_mm_init(struct ast_private *ast);
+void ast_mm_fini(struct ast_private *ast);
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct ast_bo **pastbo);
+
+int ast_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int ast_bo_unpin(struct ast_bo *bo);
+
+int ast_bo_reserve(struct ast_bo *bo, bool no_wait);
+void ast_bo_unreserve(struct ast_bo *bo);
+void ast_ttm_placement(struct ast_bo *bo, int domain);
+int ast_bo_push_sysram(struct ast_bo *bo);
+int ast_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* ast post */
+void ast_post_gpu(struct drm_device *dev);
+#endif
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
new file mode 100644
index 000000000000..2fc8e9e860b1
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+#include "ast_drv.h"
+
+static void ast_dirty_update(struct ast_fbdev *afbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
+ int ret;
+ bool unmap = false;
+
+ obj = afbdev->afb.obj;
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("failed to reserve fb bo\n");
+ return;
+ }
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ ast_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i < y + height; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ ast_bo_unreserve(bo);
+}
+
+static void ast_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_fillrect(info, rect);
+ ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void ast_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_copyarea(info, area);
+ ast_dirty_update(afbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void ast_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_imageblit(info, image);
+ ast_dirty_update(afbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+static struct fb_ops astfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = ast_fillrect,
+ .fb_copyarea = ast_copyarea,
+ .fb_imageblit = ast_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int astfb_create_object(struct ast_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = ast_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int astfb_create(struct ast_fbdev *afbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_framebuffer *fb;
+ struct fb_info *info;
+ int size, ret;
+ struct device *device = &dev->pdev->dev;
+ void *sysram;
+ struct drm_gem_object *gobj = NULL;
+ struct ast_bo *bo = NULL;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = astfb_create_object(afbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+ bo = gem_to_ast_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->par = afbdev;
+
+ ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
+ if (ret)
+ goto out;
+
+ afbdev->sysram = sysram;
+ afbdev->size = size;
+
+ fb = &afbdev->afb.base;
+ afbdev->helper.fb = fb;
+ afbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "astdrmfb");
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &astfb_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n",
+ fb->width, fb->height);
+
+ return 0;
+out:
+ return ret;
+}
+
+static void ast_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ ast_crtc->lut_r[regno] = red >> 8;
+ ast_crtc->lut_g[regno] = green >> 8;
+ ast_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ *red = ast_crtc->lut_r[regno] << 8;
+ *green = ast_crtc->lut_g[regno] << 8;
+ *blue = ast_crtc->lut_b[regno] << 8;
+}
+
+static int ast_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = astfb_create(afbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
+ .gamma_set = ast_fb_gamma_set,
+ .gamma_get = ast_fb_gamma_get,
+ .fb_probe = ast_find_or_create_single,
+};
+
+static void ast_fbdev_destroy(struct drm_device *dev,
+ struct ast_fbdev *afbdev)
+{
+ struct fb_info *info;
+ struct ast_framebuffer *afb = &afbdev->afb;
+ if (afbdev->helper.fbdev) {
+ info = afbdev->helper.fbdev;
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (afb->obj) {
+ drm_gem_object_unreference_unlocked(afb->obj);
+ afb->obj = NULL;
+ }
+ drm_fb_helper_fini(&afbdev->helper);
+
+ vfree(afbdev->sysram);
+ drm_framebuffer_cleanup(&afb->base);
+}
+
+int ast_fbdev_init(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_fbdev *afbdev;
+ int ret;
+
+ afbdev = kzalloc(sizeof(struct ast_fbdev), GFP_KERNEL);
+ if (!afbdev)
+ return -ENOMEM;
+
+ ast->fbdev = afbdev;
+ afbdev->helper.funcs = &ast_fb_helper_funcs;
+ ret = drm_fb_helper_init(dev, &afbdev->helper,
+ 1, 1);
+ if (ret) {
+ kfree(afbdev);
+ return ret;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+ drm_fb_helper_initial_config(&afbdev->helper, 32);
+ return 0;
+}
+
+void ast_fbdev_fini(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (!ast->fbdev)
+ return;
+
+ ast_fbdev_destroy(dev, ast->fbdev);
+ kfree(ast->fbdev);
+ ast->fbdev = NULL;
+}
+
+void ast_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (!ast->fbdev)
+ return;
+
+ fb_set_suspend(ast->fbdev->helper.fbdev, state);
+}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
new file mode 100644
index 000000000000..95ae55b8214b
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -0,0 +1,527 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "ast_drv.h"
+
+
+#include "drm_fb_helper.h"
+#include "drm_crtc_helper.h"
+
+#include "ast_dram_tables.h"
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t mask, uint8_t val)
+{
+ u8 tmp;
+ ast_io_write8(ast, base, index);
+ tmp = (ast_io_read8(ast, base + 1) & mask) | val;
+ ast_set_index_reg(ast, base, index, tmp);
+}
+
+uint8_t ast_get_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index)
+{
+ uint8_t ret;
+ ast_io_write8(ast, base, index);
+ ret = ast_io_read8(ast, base + 1);
+ return ret;
+}
+
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index, uint8_t mask)
+{
+ uint8_t ret;
+ ast_io_write8(ast, base, index);
+ ret = ast_io_read8(ast, base + 1) & mask;
+ return ret;
+}
+
+
+static int ast_detect_chip(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (dev->pdev->device == PCI_CHIP_AST1180) {
+ ast->chip = AST1100;
+ DRM_INFO("AST 1180 detected\n");
+ } else {
+ if (dev->pdev->revision >= 0x20) {
+ ast->chip = AST2300;
+ DRM_INFO("AST 2300 detected\n");
+ } else if (dev->pdev->revision >= 0x10) {
+ uint32_t data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+ data = ast_read32(ast, 0x1207c);
+ switch (data & 0x0300) {
+ case 0x0200:
+ ast->chip = AST1100;
+ DRM_INFO("AST 1100 detected\n");
+ break;
+ case 0x0100:
+ ast->chip = AST2200;
+ DRM_INFO("AST 2200 detected\n");
+ break;
+ case 0x0000:
+ ast->chip = AST2150;
+ DRM_INFO("AST 2150 detected\n");
+ break;
+ default:
+ ast->chip = AST2100;
+ DRM_INFO("AST 2100 detected\n");
+ break;
+ }
+ ast->vga2_clone = false;
+ } else {
+ ast->chip = 2000;
+ DRM_INFO("AST 2000 detected\n");
+ }
+ }
+ return 0;
+}
+
+static int ast_get_dram_info(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ uint32_t data, data2;
+ uint32_t denum, num, div, ref_pll;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+ data = ast_read32(ast, 0x10004);
+
+ if (data & 0x400)
+ ast->dram_bus_width = 16;
+ else
+ ast->dram_bus_width = 32;
+
+ if (ast->chip == AST2300) {
+ switch (data & 0x03) {
+ case 0:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ default:
+ case 1:
+ ast->dram_type = AST_DRAM_1Gx16;
+ break;
+ case 2:
+ ast->dram_type = AST_DRAM_2Gx16;
+ break;
+ case 3:
+ ast->dram_type = AST_DRAM_4Gx16;
+ break;
+ }
+ } else {
+ switch (data & 0x0c) {
+ case 0:
+ case 4:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (data & 0x40)
+ ast->dram_type = AST_DRAM_1Gx16;
+ else
+ ast->dram_type = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ ast->dram_type = AST_DRAM_1Gx32;
+ break;
+ }
+ }
+
+ data = ast_read32(ast, 0x10120);
+ data2 = ast_read32(ast, 0x10170);
+ if (data2 & 0x2000)
+ ref_pll = 14318;
+ else
+ ref_pll = 12000;
+
+ denum = data & 0x1f;
+ num = (data & 0x3fe0) >> 5;
+ data = (data & 0xc000) >> 14;
+ switch (data) {
+ case 3:
+ div = 0x4;
+ break;
+ case 2:
+ case 1:
+ div = 0x2;
+ break;
+ default:
+ div = 0x1;
+ break;
+ }
+ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
+ return 0;
+}
+
+uint32_t ast_get_max_dclk(struct drm_device *dev, int bpp)
+{
+ struct ast_private *ast = dev->dev_private;
+ uint32_t dclk, jreg;
+ uint32_t dram_bus_width, mclk, dram_bandwidth, actual_dram_bandwidth, dram_efficency = 500;
+
+ dram_bus_width = ast->dram_bus_width;
+ mclk = ast->mclk;
+
+ if (ast->chip == AST2100 ||
+ ast->chip == AST1100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2150 ||
+ ast->dram_bus_width == 16)
+ dram_efficency = 600;
+ else if (ast->chip == AST2300)
+ dram_efficency = 400;
+
+ dram_bandwidth = mclk * dram_bus_width * 2 / 8;
+ actual_dram_bandwidth = dram_bandwidth * dram_efficency / 1000;
+
+ if (ast->chip == AST1180)
+ dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
+ else {
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ if ((jreg & 0x08) && (ast->chip == AST2000))
+ dclk = actual_dram_bandwidth / ((bpp + 1 + 16) / 8);
+ else if ((jreg & 0x08) && (bpp == 8))
+ dclk = actual_dram_bandwidth / ((bpp + 1 + 24) / 8);
+ else
+ dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
+ }
+
+ if (ast->chip == AST2100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2300 ||
+ ast->chip == AST1180) {
+ if (dclk > 200)
+ dclk = 200;
+ } else {
+ if (dclk > 165)
+ dclk = 165;
+ }
+
+ return dclk;
+}
+
+static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
+ if (ast_fb->obj)
+ drm_gem_object_unreference_unlocked(ast_fb->obj);
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static int ast_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int *handle)
+{
+ return -EINVAL;
+}
+
+static const struct drm_framebuffer_funcs ast_fb_funcs = {
+ .destroy = ast_user_framebuffer_destroy,
+ .create_handle = ast_user_framebuffer_create_handle,
+};
+
+
+int ast_framebuffer_init(struct drm_device *dev,
+ struct ast_framebuffer *ast_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
+ ast_fb->obj = obj;
+ return 0;
+}
+
+static struct drm_framebuffer *
+ast_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct ast_framebuffer *ast_fb;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
+ if (!ast_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(ast_fb);
+ return ERR_PTR(ret);
+ }
+ return &ast_fb->base;
+}
+
+static const struct drm_mode_config_funcs ast_mode_funcs = {
+ .fb_create = ast_user_framebuffer_create,
+};
+
+static u32 ast_get_vram_info(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+
+ ast_open_key(ast);
+
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
+ switch (jreg & 3) {
+ case 0: return AST_VIDMEM_SIZE_8M;
+ case 1: return AST_VIDMEM_SIZE_16M;
+ case 2: return AST_VIDMEM_SIZE_32M;
+ case 3: return AST_VIDMEM_SIZE_64M;
+ }
+ return AST_VIDMEM_DEFAULT_SIZE;
+}
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct ast_private *ast;
+ int ret = 0;
+
+ ast = kzalloc(sizeof(struct ast_private), GFP_KERNEL);
+ if (!ast)
+ return -ENOMEM;
+
+ dev->dev_private = ast;
+ ast->dev = dev;
+
+ ast->regs = pci_iomap(dev->pdev, 1, 0);
+ if (!ast->regs) {
+ ret = -EIO;
+ goto out_free;
+ }
+ ast->ioregs = pci_iomap(dev->pdev, 2, 0);
+ if (!ast->ioregs) {
+ ret = -EIO;
+ goto out_free;
+ }
+
+ ast_detect_chip(dev);
+
+ if (ast->chip != AST1180) {
+ ast_get_dram_info(dev);
+ ast->vram_size = ast_get_vram_info(dev);
+ DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ goto out_free;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.funcs = (void *)&ast_mode_funcs;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ if (ast->chip == AST2100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2300 ||
+ ast->chip == AST1180) {
+ dev->mode_config.max_width = 1920;
+ dev->mode_config.max_height = 2048;
+ } else {
+ dev->mode_config.max_width = 1600;
+ dev->mode_config.max_height = 1200;
+ }
+
+ ret = ast_mode_init(dev);
+ if (ret)
+ goto out_free;
+
+ ret = ast_fbdev_init(dev);
+ if (ret)
+ goto out_free;
+
+ return 0;
+out_free:
+ kfree(ast);
+ dev->dev_private = NULL;
+ return ret;
+}
+
+int ast_driver_unload(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ ast_mode_fini(dev);
+ ast_fbdev_fini(dev);
+ drm_mode_config_cleanup(dev);
+
+ ast_mm_fini(ast);
+ pci_iounmap(dev->pdev, ast->ioregs);
+ pci_iounmap(dev->pdev, ast->regs);
+ kfree(ast);
+ return 0;
+}
+
+int ast_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct ast_bo *astbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = ast_bo_create(dev, size, 0, 0, &astbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &astbo->gem;
+ return 0;
+}
+
+int ast_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = ast_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int ast_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+int ast_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
+ return 0;
+}
+
+void ast_bo_unref(struct ast_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+void ast_gem_free_object(struct drm_gem_object *obj)
+{
+ struct ast_bo *ast_bo = gem_to_ast_bo(obj);
+
+ if (!ast_bo)
+ return;
+ ast_bo_unref(&ast_bo);
+}
+
+
+static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
+{
+ return bo->bo.addr_space_offset;
+}
+int
+ast_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct ast_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_ast_bo(obj);
+ *offset = ast_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
+
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
new file mode 100644
index 000000000000..65f9d231af14
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -0,0 +1,1160 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ * Parts based on xf86-video-ast
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "ast_drv.h"
+
+#include "ast_tables.h"
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c);
+static int ast_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height);
+static int ast_cursor_move(struct drm_crtc *crtc,
+ int x, int y);
+
+static inline void ast_load_palette_index(struct ast_private *ast,
+ u8 index, u8 red, u8 green,
+ u8 blue)
+{
+ ast_io_write8(ast, AST_IO_DAC_INDEX_WRITE, index);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, red);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, green);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, blue);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+}
+
+static void ast_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ for (i = 0; i < 256; i++)
+ ast_load_palette_index(ast, i, ast_crtc->lut_r[i],
+ ast_crtc->lut_g[i], ast_crtc->lut_b[i]);
+}
+
+static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
+ u32 hborder, vborder;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
+ color_index = VGAModeIndex - 1;
+ break;
+ case 16:
+ vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
+ color_index = HiCModeIndex;
+ break;
+ case 24:
+ case 32:
+ vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
+ color_index = TrueCModeIndex;
+ break;
+ default:
+ return false;
+ }
+
+ switch (crtc->mode.crtc_hdisplay) {
+ case 640:
+ vbios_mode->enh_table = &res_640x480[refresh_rate_index];
+ break;
+ case 800:
+ vbios_mode->enh_table = &res_800x600[refresh_rate_index];
+ break;
+ case 1024:
+ vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
+ break;
+ case 1280:
+ if (crtc->mode.crtc_vdisplay == 800)
+ vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
+ else
+ vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
+ break;
+ case 1440:
+ vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
+ break;
+ case 1600:
+ vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
+ break;
+ case 1680:
+ vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
+ break;
+ case 1920:
+ if (crtc->mode.crtc_vdisplay == 1080)
+ vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
+ else
+ vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
+ break;
+ default:
+ return false;
+ }
+
+ refresh_rate = drm_mode_vrefresh(mode);
+ while (vbios_mode->enh_table->refresh_rate < refresh_rate) {
+ vbios_mode->enh_table++;
+ if ((vbios_mode->enh_table->refresh_rate > refresh_rate) ||
+ (vbios_mode->enh_table->refresh_rate == 0xff)) {
+ vbios_mode->enh_table--;
+ break;
+ }
+ }
+
+ hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
+ vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
+
+ adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
+ adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
+ adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
+ adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder +
+ vbios_mode->enh_table->hfp;
+ adjusted_mode->crtc_hsync_end = (vbios_mode->enh_table->hde + hborder +
+ vbios_mode->enh_table->hfp +
+ vbios_mode->enh_table->hsync);
+
+ adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
+ adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
+ adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
+ adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder +
+ vbios_mode->enh_table->vfp;
+ adjusted_mode->crtc_vsync_end = (vbios_mode->enh_table->vde + vborder +
+ vbios_mode->enh_table->vfp +
+ vbios_mode->enh_table->vsync);
+
+ refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
+ mode_id = vbios_mode->enh_table->mode_id;
+
+ if (ast->chip == AST1180) {
+ /* TODO 1180 */
+ } else {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0xf) << 4));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->fb->bits_per_pixel);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
+ }
+
+ return true;
+
+
+}
+static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_vbios_stdtable *stdtable;
+ u32 i;
+ u8 jreg;
+
+ stdtable = vbios_mode->std_table;
+
+ jreg = stdtable->misc;
+ ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+
+ /* Set SEQ */
+ ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03);
+ for (i = 0; i < 4; i++) {
+ jreg = stdtable->seq[i];
+ if (!i)
+ jreg |= 0x20;
+ ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1) , jreg);
+ }
+
+ /* Set CRTC */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+ for (i = 0; i < 25; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
+
+ /* set AR */
+ jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+ for (i = 0; i < 20; i++) {
+ jreg = stdtable->ar[i];
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, (u8)i);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, jreg);
+ }
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x14);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x00);
+
+ jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x20);
+
+ /* Set GR */
+ for (i = 0; i < 9; i++)
+ ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
+}
+
+static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
+ u16 temp;
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+
+ temp = (mode->crtc_htotal >> 3) - 5;
+ if (temp & 0x100)
+ jregAC |= 0x01; /* HT D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x00, 0x00, temp);
+
+ temp = (mode->crtc_hdisplay >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x04; /* HDE D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x01, 0x00, temp);
+
+ temp = (mode->crtc_hblank_start >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x10; /* HBS D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x02, 0x00, temp);
+
+ temp = ((mode->crtc_hblank_end >> 3) - 1) & 0x7f;
+ if (temp & 0x20)
+ jreg05 |= 0x80; /* HBE D[5] */
+ if (temp & 0x40)
+ jregAD |= 0x01; /* HBE D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, (temp & 0x1f));
+
+ temp = (mode->crtc_hsync_start >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x40; /* HRS D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp);
+
+ temp = ((mode->crtc_hsync_end >> 3) - 1) & 0x3f;
+ if (temp & 0x20)
+ jregAD |= 0x04; /* HRE D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
+
+ /* vert timings */
+ temp = (mode->crtc_vtotal) - 2;
+ if (temp & 0x100)
+ jreg07 |= 0x01;
+ if (temp & 0x200)
+ jreg07 |= 0x20;
+ if (temp & 0x400)
+ jregAE |= 0x01;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x06, 0x00, temp);
+
+ temp = (mode->crtc_vsync_start) - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x04;
+ if (temp & 0x200)
+ jreg07 |= 0x80;
+ if (temp & 0x400)
+ jregAE |= 0x08;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x10, 0x00, temp);
+
+ temp = (mode->crtc_vsync_end - 1) & 0x3f;
+ if (temp & 0x10)
+ jregAE |= 0x20;
+ if (temp & 0x20)
+ jregAE |= 0x40;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x70, temp & 0xf);
+
+ temp = mode->crtc_vdisplay - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x02;
+ if (temp & 0x200)
+ jreg07 |= 0x40;
+ if (temp & 0x400)
+ jregAE |= 0x02;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x12, 0x00, temp);
+
+ temp = mode->crtc_vblank_start - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x08;
+ if (temp & 0x200)
+ jreg09 |= 0x20;
+ if (temp & 0x400)
+ jregAE |= 0x04;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x15, 0x00, temp);
+
+ temp = mode->crtc_vblank_end - 1;
+ if (temp & 0x100)
+ jregAE |= 0x10;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x16, 0x00, temp);
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x07, 0x00, jreg07);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, (jregAE | 0x80));
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
+}
+
+static void ast_set_offset_reg(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ u16 offset;
+
+ offset = crtc->fb->pitches[0] >> 3;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
+}
+
+static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_vbios_dclk_info *clk_info;
+
+ clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, clk_info->param1);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, clk_info->param2);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f,
+ (clk_info->param3 & 0x80) | ((clk_info->param3 & 0x3) << 4));
+}
+
+static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ jregA0 = 0x70;
+ jregA3 = 0x01;
+ jregA8 = 0x00;
+ break;
+ case 15:
+ case 16:
+ jregA0 = 0x70;
+ jregA3 = 0x04;
+ jregA8 = 0x02;
+ break;
+ case 32:
+ jregA0 = 0x70;
+ jregA3 = 0x08;
+ jregA8 = 0x02;
+ break;
+ }
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
+
+ /* Set Threshold */
+ if (ast->chip == AST2300) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
+ } else if (ast->chip == AST2100 ||
+ ast->chip == AST1100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2150) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f);
+ } else {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x2f);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x1f);
+ }
+}
+
+void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+
+ jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
+ jreg |= (vbios_mode->enh_table->flags & SyncNN);
+ ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+}
+
+bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u32 addr;
+
+ addr = offset >> 2;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0d, (u8)(addr & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0c, (u8)((addr >> 8) & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xaf, (u8)((addr >> 16) & 0xff));
+
+}
+
+static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ if (ast->chip == AST1180)
+ return;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+ ast_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_OFF:
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
+ break;
+ }
+}
+
+static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/* ast is different - we will force move buffers out of VRAM */
+static int ast_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct ast_framebuffer *ast_fb;
+ struct ast_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ ast_fb = to_ast_framebuffer(fb);
+ obj = ast_fb->obj;
+ bo = gem_to_ast_bo(obj);
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ ast_bo_push_sysram(bo);
+ ast_bo_unreserve(bo);
+ }
+
+ ast_fb = to_ast_framebuffer(crtc->fb);
+ obj = ast_fb->obj;
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ ast_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&ast->fbdev->afb == ast_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+ }
+ ast_bo_unreserve(bo);
+
+ ast_set_start_address_crt1(crtc, (u32)gpu_addr);
+
+ return 0;
+}
+
+static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return ast_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int ast_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_vbios_mode_info vbios_mode;
+ bool ret;
+ if (ast->chip == AST1180) {
+ DRM_ERROR("AST 1180 modesetting not supported\n");
+ return -EINVAL;
+ }
+
+ ret = ast_get_vbios_mode_info(crtc, mode, adjusted_mode, &vbios_mode);
+ if (ret == false)
+ return -EINVAL;
+ ast_open_key(ast);
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+
+ ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_offset_reg(crtc);
+ ast_set_dclk_reg(dev, adjusted_mode, &vbios_mode);
+ ast_set_ext_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_sync_reg(dev, adjusted_mode, &vbios_mode);
+ ast_set_dac_reg(crtc, adjusted_mode, &vbios_mode);
+
+ ast_crtc_mode_set_base(crtc, x, y, old_fb);
+
+ return 0;
+}
+
+static void ast_crtc_disable(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_prepare(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_commit(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+}
+
+
+static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
+ .dpms = ast_crtc_dpms,
+ .mode_fixup = ast_crtc_mode_fixup,
+ .mode_set = ast_crtc_mode_set,
+ .mode_set_base = ast_crtc_mode_set_base,
+ .disable = ast_crtc_disable,
+ .load_lut = ast_crtc_load_lut,
+ .disable = ast_crtc_disable,
+ .prepare = ast_crtc_prepare,
+ .commit = ast_crtc_commit,
+
+};
+
+static void ast_crtc_reset(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ int end = (start + size > 256) ? 256 : start + size, i;
+
+ /* userspace palettes are always correct as is */
+ for (i = start; i < end; i++) {
+ ast_crtc->lut_r[i] = red[i] >> 8;
+ ast_crtc->lut_g[i] = green[i] >> 8;
+ ast_crtc->lut_b[i] = blue[i] >> 8;
+ }
+ ast_crtc_load_lut(crtc);
+}
+
+
+static void ast_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_funcs ast_crtc_funcs = {
+ .cursor_set = ast_cursor_set,
+ .cursor_move = ast_cursor_move,
+ .reset = ast_crtc_reset,
+ .set_config = drm_crtc_helper_set_config,
+ .gamma_set = ast_crtc_gamma_set,
+ .destroy = ast_crtc_destroy,
+};
+
+int ast_crtc_init(struct drm_device *dev)
+{
+ struct ast_crtc *crtc;
+ int i;
+
+ crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL);
+ if (!crtc)
+ return -ENOMEM;
+
+ drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(&crtc->base, 256);
+ drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs);
+
+ for (i = 0; i < 256; i++) {
+ crtc->lut_r[i] = i;
+ crtc->lut_g[i] = i;
+ crtc->lut_b[i] = i;
+ }
+ return 0;
+}
+
+static void ast_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+
+static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+
+static const struct drm_encoder_funcs ast_enc_funcs = {
+ .destroy = ast_encoder_destroy,
+};
+
+static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool ast_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void ast_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void ast_encoder_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void ast_encoder_commit(struct drm_encoder *encoder)
+{
+
+}
+
+
+static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
+ .dpms = ast_encoder_dpms,
+ .mode_fixup = ast_mode_fixup,
+ .prepare = ast_encoder_prepare,
+ .commit = ast_encoder_commit,
+ .mode_set = ast_encoder_mode_set,
+};
+
+int ast_encoder_init(struct drm_device *dev)
+{
+ struct ast_encoder *ast_encoder;
+
+ ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL);
+ if (!ast_encoder)
+ return -ENOMEM;
+
+ drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
+
+ ast_encoder->base.possible_crtcs = 1;
+ return 0;
+}
+
+static int ast_get_modes(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(&ast_connector->base, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ return ret;
+ } else
+ drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
+ return 0;
+}
+
+static int ast_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void ast_connector_destroy(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ ast_i2c_destroy(ast_connector->i2c);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static enum drm_connector_status
+ast_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
+ .mode_valid = ast_mode_valid,
+ .get_modes = ast_get_modes,
+ .best_encoder = ast_best_single_encoder,
+};
+
+static const struct drm_connector_funcs ast_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = ast_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = ast_connector_destroy,
+};
+
+int ast_connector_init(struct drm_device *dev)
+{
+ struct ast_connector *ast_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ ast_connector = kzalloc(sizeof(struct ast_connector), GFP_KERNEL);
+ if (!ast_connector)
+ return -ENOMEM;
+
+ connector = &ast_connector->base;
+ drm_connector_init(dev, connector, &ast_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &ast_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_sysfs_connector_add(connector);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ast_connector->i2c = ast_i2c_create(dev);
+ if (!ast_connector->i2c)
+ DRM_ERROR("failed to add ddc bus for connector\n");
+
+ return 0;
+}
+
+/* allocate cursor cache and pin at start of VRAM */
+int ast_cursor_init(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ int size;
+ int ret;
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ uint64_t gpu_addr;
+
+ size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
+
+ ret = ast_gem_create(dev, size, true, &obj);
+ if (ret)
+ return ret;
+ bo = gem_to_ast_bo(obj);
+ ret = ast_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ goto fail;
+
+ ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ ast_bo_unreserve(bo);
+ if (ret)
+ goto fail;
+
+ /* kmap the object */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap);
+ if (ret)
+ goto fail;
+
+ ast->cursor_cache = obj;
+ ast->cursor_cache_gpu_addr = gpu_addr;
+ DRM_ERROR("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
+ return 0;
+fail:
+ return ret;
+}
+
+void ast_cursor_fini(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ ttm_bo_kunmap(&ast->cache_kmap);
+ drm_gem_object_unreference_unlocked(ast->cursor_cache);
+}
+
+int ast_mode_init(struct drm_device *dev)
+{
+ ast_cursor_init(dev);
+ ast_crtc_init(dev);
+ ast_encoder_init(dev);
+ ast_connector_init(dev);
+ return 0;
+}
+
+void ast_mode_fini(struct drm_device *dev)
+{
+ ast_cursor_fini(dev);
+}
+
+static int get_clock(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ uint32_t val;
+
+ val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
+ return val & 1 ? 1 : 0;
+}
+
+static int get_data(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ uint32_t val;
+
+ val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
+ return val & 1 ? 1 : 0;
+}
+
+static void set_clock(void *i2c_priv, int clock)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((clock & 0x01) ? 0 : 1);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static void set_data(void *i2c_priv, int data)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
+{
+ struct ast_i2c_chan *i2c;
+ int ret;
+
+ i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "AST i2c bit bus");
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 20;
+ i2c->bit.timeout = 2;
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = set_data;
+ i2c->bit.setscl = set_clock;
+ i2c->bit.getsda = get_data;
+ i2c->bit.getscl = get_clock;
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register bit i2c\n");
+ goto out_free;
+ }
+
+ return i2c;
+out_free:
+ kfree(i2c);
+ return NULL;
+}
+
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
+void ast_show_cursor(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jreg;
+
+ jreg = 0x2;
+ /* enable ARGB cursor */
+ jreg |= 1;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+}
+
+void ast_hide_cursor(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
+}
+
+static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
+{
+ union {
+ u32 ul;
+ u8 b[4];
+ } srcdata32[2], data32;
+ union {
+ u16 us;
+ u8 b[2];
+ } data16;
+ u32 csum = 0;
+ s32 alpha_dst_delta, last_alpha_dst_delta;
+ u8 *srcxor, *dstxor;
+ int i, j;
+ u32 per_pixel_copy, two_pixel_copy;
+
+ alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
+ last_alpha_dst_delta = alpha_dst_delta - (width << 1);
+
+ srcxor = src;
+ dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
+ per_pixel_copy = width & 1;
+ two_pixel_copy = width >> 1;
+
+ for (j = 0; j < height; j++) {
+ for (i = 0; i < two_pixel_copy; i++) {
+ srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+ srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
+ data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+ data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
+ data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
+
+ writel(data32.ul, dstxor);
+ csum += data32.ul;
+
+ dstxor += 4;
+ srcxor += 8;
+
+ }
+
+ for (i = 0; i < per_pixel_copy; i++) {
+ srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+ data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+ writew(data16.us, dstxor);
+ csum += (u32)data16.us;
+
+ dstxor += 2;
+ srcxor += 4;
+ }
+ dstxor += last_alpha_dst_delta;
+ }
+ return csum;
+}
+
+static int ast_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ uint64_t gpu_addr;
+ u32 csum;
+ int ret;
+ struct ttm_bo_kmap_obj uobj_map;
+ u8 *src, *dst;
+ bool src_isiomem, dst_isiomem;
+ if (!handle) {
+ ast_hide_cursor(crtc);
+ return 0;
+ }
+
+ if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
+ return -ENOENT;
+ }
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ goto fail;
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
+
+ src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
+ dst = ttm_kmap_obj_virtual(&ast->cache_kmap, &dst_isiomem);
+
+ if (src_isiomem == true)
+ DRM_ERROR("src cursor bo should be in main memory\n");
+ if (dst_isiomem == false)
+ DRM_ERROR("dst bo should be in VRAM\n");
+
+ dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+
+ /* do data transfer to cursor cache */
+ csum = copy_cursor_image(src, dst, width, height);
+
+ /* write checksum + signature */
+ ttm_bo_kunmap(&uobj_map);
+ ast_bo_unreserve(bo);
+ {
+ u8 *dst = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ writel(csum, dst);
+ writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+ writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+
+ /* set pattern offset */
+ gpu_addr = ast->cursor_cache_gpu_addr;
+ gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+ gpu_addr >>= 3;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
+ }
+ ast_crtc->cursor_width = width;
+ ast_crtc->cursor_height = height;
+ ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
+ ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
+
+ ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM;
+
+ ast_show_cursor(crtc);
+
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
+fail:
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+}
+
+static int ast_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct ast_private *ast = crtc->dev->dev_private;
+ int x_offset, y_offset;
+ u8 *sig;
+
+ sig = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ writel(x, sig + AST_HWC_SIGNATURE_X);
+ writel(y, sig + AST_HWC_SIGNATURE_Y);
+
+ x_offset = ast_crtc->offset_x;
+ y_offset = ast_crtc->offset_y;
+ if (x < 0) {
+ x_offset = (-x) + ast_crtc->offset_x;
+ x = 0;
+ }
+
+ if (y < 0) {
+ y_offset = (-y) + ast_crtc->offset_y;
+ y = 0;
+ }
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, (x & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, ((x >> 8) & 0x0f));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, (y & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
+
+ /* dummy write to fire HWC */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
new file mode 100644
index 000000000000..6edbee63b0cb
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -0,0 +1,1780 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include "drmP.h"
+#include "ast_drv.h"
+
+#include "ast_dram_tables.h"
+
+static void ast_init_dram_2300(struct drm_device *dev);
+
+static void
+ast_enable_vga(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ ast_io_write8(ast, 0x43, 0x01);
+ ast_io_write8(ast, 0x42, 0x01);
+}
+
+#if 0 /* will use later */
+static bool
+ast_is_vga_enabled(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 ch;
+
+ if (ast->chip == AST1180) {
+ /* TODO 1180 */
+ } else {
+ ch = ast_io_read8(ast, 0x43);
+ if (ch) {
+ ast_open_key(ast);
+ ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
+ return ch & 0x04;
+ }
+ }
+ return 0;
+}
+#endif
+
+static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
+
+static void
+ast_set_def_ext_reg(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 i, index, reg;
+ const u8 *ext_reg_info;
+
+ /* reset scratch */
+ for (i = 0x81; i <= 0x8f; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
+
+ if (ast->chip == AST2300) {
+ if (dev->pdev->revision >= 0x20)
+ ext_reg_info = extreginfo_ast2300;
+ else
+ ext_reg_info = extreginfo_ast2300a0;
+ } else
+ ext_reg_info = extreginfo;
+
+ index = 0xa0;
+ while (*ext_reg_info != 0xff) {
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, *ext_reg_info);
+ index++;
+ ext_reg_info++;
+ }
+
+ /* disable standard IO/MEM decode if secondary */
+ /* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */
+
+ /* Set Ext. Default */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00);
+
+ /* Enable RAMDAC for A1 */
+ reg = 0x04;
+ if (ast->chip == AST2300)
+ reg |= 0x20;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
+}
+
+static inline u32 mindwm(struct ast_private *ast, u32 r)
+{
+ ast_write32(ast, 0xf004, r & 0xffff0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+ return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
+}
+
+static inline void moutdwm(struct ast_private *ast, u32 r, u32 v)
+{
+ ast_write32(ast, 0xf004, r & 0xffff0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
+}
+
+/*
+ * AST2100/2150 DLL CBR Setting
+ */
+#define CBR_SIZE_AST2150 ((16 << 10) - 1)
+#define CBR_PASSNUM_AST2150 5
+#define CBR_THRESHOLD_AST2150 10
+#define CBR_THRESHOLD2_AST2150 10
+#define TIMEOUT_AST2150 5000000
+
+#define CBR_PATNUM_AST2150 8
+
+static const u32 pattern_AST2150[14] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0xFFFE0001,
+ 0x683501FE,
+ 0x0F1929B0,
+ 0x2D0B4346,
+ 0x60767F02,
+ 0x6FBE36A6,
+ 0x3A253035,
+ 0x3019686D,
+ 0x41C6167E,
+ 0x620152BF,
+ 0x20F050E0
+};
+
+static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+
+#if 0 /* unused in DDX driver - here for completeness */
+static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+#endif
+
+static int cbrtest_ast2150(struct ast_private *ast)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ if (mmctestburst2_ast2150(ast, i))
+ return 0;
+ return 1;
+}
+
+static int cbrscan_ast2150(struct ast_private *ast, int busw)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
+ moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
+ if (cbrtest_ast2150(ast))
+ break;
+ }
+ if (loop == CBR_PASSNUM_AST2150)
+ return 0;
+ }
+ return 1;
+}
+
+
+static void cbrdlli_ast2150(struct ast_private *ast, int busw)
+{
+ u32 dll_min[4], dll_max[4], dlli, data, passcnt;
+
+cbr_start:
+ dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
+ dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
+ passcnt = 0;
+
+ for (dlli = 0; dlli < 100; dlli++) {
+ moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+ data = cbrscan_ast2150(ast, busw);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dll_min[0] > dlli)
+ dll_min[0] = dlli;
+ if (dll_max[0] < dlli)
+ dll_max[0] = dlli;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+ }
+ if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+
+ dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
+ moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+}
+
+
+
+static void ast_init_dram_reg(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 j;
+ u32 data, temp, i;
+ const struct ast_dramstruct *dram_reg_info;
+
+ j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+
+ if ((j & 0x80) == 0) { /* VGA only */
+ if (ast->chip == AST2000) {
+ dram_reg_info = ast2000_dram_table_data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x10100, 0xa8);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10100) != 0xa8);
+ } else {/* AST2100/1100 */
+ if (ast->chip == AST2100 || ast->chip == 2200)
+ dram_reg_info = ast2100_dram_table_data;
+ else
+ dram_reg_info = ast1100_dram_table_data;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688A8A8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x01);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+ }
+
+ while (dram_reg_info->index != 0xffff) {
+ if (dram_reg_info->index == 0xff00) {/* delay fn */
+ for (i = 0; i < 15; i++)
+ udelay(dram_reg_info->data);
+ } else if (dram_reg_info->index == 0x4 && ast->chip != AST2000) {
+ data = dram_reg_info->data;
+ if (ast->dram_type == AST_DRAM_1Gx16)
+ data = 0x00000d89;
+ else if (ast->dram_type == AST_DRAM_1Gx32)
+ data = 0x00000c8d;
+
+ temp = ast_read32(ast, 0x12070);
+ temp &= 0xc;
+ temp <<= 2;
+ ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
+ } else
+ ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
+ dram_reg_info++;
+ }
+
+ /* AST 2100/2150 DRAM calibration */
+ data = ast_read32(ast, 0x10120);
+ if (data == 0x5061) { /* 266Mhz */
+ data = ast_read32(ast, 0x10004);
+ if (data & 0x40)
+ cbrdlli_ast2150(ast, 16); /* 16 bits */
+ else
+ cbrdlli_ast2150(ast, 32); /* 32 bits */
+ }
+
+ switch (ast->chip) {
+ case AST2000:
+ temp = ast_read32(ast, 0x10140);
+ ast_write32(ast, 0x10140, temp | 0x40);
+ break;
+ case AST1100:
+ case AST2100:
+ case AST2200:
+ case AST2150:
+ temp = ast_read32(ast, 0x1200c);
+ ast_write32(ast, 0x1200c, temp & 0xfffffffd);
+ temp = ast_read32(ast, 0x12040);
+ ast_write32(ast, 0x12040, temp | 0x40);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* wait ready */
+ do {
+ j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ } while ((j & 0x40) == 0);
+}
+
+void ast_post_gpu(struct drm_device *dev)
+{
+ u32 reg;
+ struct ast_private *ast = dev->dev_private;
+
+ pci_read_config_dword(ast->dev->pdev, 0x04, &reg);
+ reg |= 0x3;
+ pci_write_config_dword(ast->dev->pdev, 0x04, reg);
+
+ ast_enable_vga(dev);
+ ast_open_key(ast);
+ ast_set_def_ext_reg(dev);
+
+ if (ast->chip == AST2300)
+ ast_init_dram_2300(dev);
+ else
+ ast_init_dram_reg(dev);
+}
+
+/* AST 2300 DRAM settings */
+#define AST_DDR3 0
+#define AST_DDR2 1
+
+struct ast2300_dram_param {
+ u32 dram_type;
+ u32 dram_chipid;
+ u32 dram_freq;
+ u32 vram_size;
+ u32 odt;
+ u32 wodt;
+ u32 rodt;
+ u32 dram_config;
+ u32 reg_PERIOD;
+ u32 reg_MADJ;
+ u32 reg_SADJ;
+ u32 reg_MRS;
+ u32 reg_EMRS;
+ u32 reg_AC1;
+ u32 reg_AC2;
+ u32 reg_DQSIC;
+ u32 reg_DRV;
+ u32 reg_IOZ;
+ u32 reg_DQIDLY;
+ u32 reg_FREQ;
+ u32 madj_max;
+ u32 dll2_finetune_step;
+};
+
+/*
+ * DQSI DLL CBR Setting
+ */
+#define CBR_SIZE1 ((4 << 10) - 1)
+#define CBR_SIZE2 ((64 << 10) - 1)
+#define CBR_PASSNUM 5
+#define CBR_PASSNUM2 5
+#define CBR_THRESHOLD 10
+#define CBR_THRESHOLD2 10
+#define TIMEOUT 5000000
+#define CBR_PATNUM 8
+
+static const u32 pattern[8] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0x88778877,
+ 0x92CC4D6E,
+ 0x543D3CDE,
+ 0xF1E843C7,
+ 0x7C61D253
+};
+
+#if 0 /* unused in DDX, included for completeness */
+static int mmc_test_burst(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x3000;
+ if (data & 0x2000) {
+ return 0;
+ }
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0;
+ }
+ } while (!data);
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 1;
+}
+#endif
+
+static int mmc_test_burst2(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return -1;
+ }
+ } while (!data);
+ data = mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return data;
+}
+
+#if 0 /* Unused in DDX here for completeness */
+static int mmc_test_single(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x3000;
+ if (data & 0x2000)
+ return 0;
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return 0;
+ }
+ } while (!data);
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return 1;
+}
+#endif
+
+static int mmc_test_single2(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return -1;
+ }
+ } while (!data);
+ data = mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return data;
+}
+
+static int cbr_test(struct ast_private *ast)
+{
+ u32 data;
+ int i;
+ data = mmc_test_single2(ast, 0);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ for (i = 0; i < 8; i++) {
+ data = mmc_test_burst2(ast, i);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ }
+ if (!data)
+ return 3;
+ else if (data & 0xff)
+ return 2;
+ return 1;
+}
+
+static int cbr_scan(struct ast_private *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 3;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ if ((data = cbr_test(ast)) != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static u32 cbr_test2(struct ast_private *ast)
+{
+ u32 data;
+
+ data = mmc_test_burst2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+ data |= mmc_test_single2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+
+ return ~data & 0xffff;
+}
+
+static u32 cbr_scan2(struct ast_private *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 0xffff;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ if ((data = cbr_test2(ast)) != 0) {
+ data2 &= data;
+ if (!data)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+#if 0 /* unused in DDX - added for completeness */
+static void finetuneDQI(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
+
+ gold_sadj[0] = (mindwm(ast, 0x1E6E0024) >> 16) & 0xffff;
+ gold_sadj[1] = gold_sadj[0] >> 8;
+ gold_sadj[0] = gold_sadj[0] & 0xff;
+ gold_sadj[0] = (gold_sadj[0] + gold_sadj[1]) >> 1;
+ gold_sadj[1] = gold_sadj[0];
+
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli) {
+ dllmin[cnt] = dlli;
+ }
+ if (dllmax[cnt] < dlli) {
+ dllmax[cnt] = dlli;
+ }
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD) {
+ break;
+ }
+ }
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[0] >= dlli) {
+ dlli = (gold_sadj[0] - dlli) >> 1;
+ if (dlli > 3) {
+ dlli = 3;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[0]) >> 1;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0080, data);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[1] >= dlli) {
+ dlli = (gold_sadj[1] - dlli) >> 1;
+ if (dlli > 3) {
+ dlli = 3;
+ } else {
+ dlli = (dlli - 1) & 0x7;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[1]) >> 1;
+ dlli += 1;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI */
+#endif
+
+static void finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
+
+FINETUNE_START:
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli) {
+ dllmin[cnt] = dlli;
+ }
+ if (dllmax[cnt] < dlli) {
+ dllmax[cnt] = dlli;
+ }
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD2) {
+ break;
+ }
+ }
+ gold_sadj[0] = 0x0;
+ passcnt = 0;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ gold_sadj[0] += dllmin[cnt];
+ passcnt++;
+ }
+ }
+ if (passcnt != 16) {
+ goto FINETUNE_START;
+ }
+ gold_sadj[0] = gold_sadj[0] >> 4;
+ gold_sadj[1] = gold_sadj[0];
+
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[0] >= dlli) {
+ dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
+ if (dlli > 3) {
+ dlli = 3;
+ }
+ } else {
+ dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0080, data);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[1] >= dlli) {
+ dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
+ if (dlli > 3) {
+ dlli = 3;
+ } else {
+ dlli = (dlli - 1) & 0x7;
+ }
+ } else {
+ dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
+ dlli += 1;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI_L */
+
+static void finetuneDQI_L2(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, data2;
+
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli) {
+ dllmin[cnt] = dlli;
+ }
+ if (dllmax[cnt] < dlli) {
+ dllmax[cnt] = dlli;
+ }
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD2) {
+ break;
+ }
+ }
+ gold_sadj[0] = 0x0;
+ gold_sadj[1] = 0xFF;
+ for (cnt = 0; cnt < 8; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ if (gold_sadj[0] < dllmin[cnt]) {
+ gold_sadj[0] = dllmin[cnt];
+ }
+ if (gold_sadj[1] > dllmax[cnt]) {
+ gold_sadj[1] = dllmax[cnt];
+ }
+ }
+ }
+ gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
+ gold_sadj[1] = mindwm(ast, 0x1E6E0080);
+
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ data2 = gold_sadj[1] & 0x7;
+ gold_sadj[1] >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[0] >= dlli) {
+ dlli = (gold_sadj[0] - dlli) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 3) {
+ data2 = (data2 + dlli) & 0x7;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[0]) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 4) {
+ data2 = (data2 - dlli) & 0x7;
+ }
+ }
+ }
+ data |= data2 << 21;
+ }
+ moutdwm(ast, 0x1E6E0080, data);
+
+ gold_sadj[0] = 0x0;
+ gold_sadj[1] = 0xFF;
+ for (cnt = 8; cnt < 16; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ if (gold_sadj[0] < dllmin[cnt]) {
+ gold_sadj[0] = dllmin[cnt];
+ }
+ if (gold_sadj[1] > dllmax[cnt]) {
+ gold_sadj[1] = dllmax[cnt];
+ }
+ }
+ }
+ gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
+ gold_sadj[1] = mindwm(ast, 0x1E6E0084);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ data2 = gold_sadj[1] & 0x7;
+ gold_sadj[1] >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[0] >= dlli) {
+ dlli = (gold_sadj[0] - dlli) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 3) {
+ data2 = (data2 + dlli) & 0x7;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[0]) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 4) {
+ data2 = (data2 - dlli) & 0x7;
+ }
+ }
+ }
+ data |= data2 << 21;
+ }
+ moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI_L2 */
+
+static void cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 dllmin[2], dllmax[2], dlli, data, data2, passcnt;
+
+
+ finetuneDQI_L(ast, param);
+ finetuneDQI_L2(ast, param);
+
+CBR_START2:
+ dllmin[0] = dllmin[1] = 0xff;
+ dllmax[0] = dllmax[1] = 0x0;
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ data = cbr_scan(ast);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dllmin[0] > dlli) {
+ dllmin[0] = dlli;
+ }
+ if (dllmax[0] < dlli) {
+ dllmax[0] = dlli;
+ }
+ }
+ if (data & 0x2) {
+ if (dllmin[1] > dlli) {
+ dllmin[1] = dlli;
+ }
+ if (dllmax[1] < dlli) {
+ dllmax[1] = dlli;
+ }
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD) {
+ break;
+ }
+ }
+ if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
+ goto CBR_START2;
+ }
+ if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
+ goto CBR_START2;
+ }
+ dlli = (dllmin[1] + dllmax[1]) >> 1;
+ dlli <<= 8;
+ dlli += (dllmin[0] + dllmax[0]) >> 1;
+ moutdwm(ast, 0x1E6E0068, (mindwm(ast, 0x1E6E0068) & 0xFFFF) | (dlli << 16));
+
+ data = (mindwm(ast, 0x1E6E0080) >> 24) & 0x1F;
+ data2 = (mindwm(ast, 0x1E6E0018) & 0xff80ffff) | (data << 16);
+ moutdwm(ast, 0x1E6E0018, data2);
+ moutdwm(ast, 0x1E6E0024, 0x8001 | (data << 1) | (param->dll2_finetune_step << 8));
+
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+} /* CBRDLL2 */
+
+static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = 0x00020000 + (trap << 16);
+ trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
+ trap_MRS = 0x00000010 + (trap << 4);
+ trap_MRS |= ((trap & 0x2) << 18);
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 336:
+ moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 0;
+ param->reg_AC1 = 0x22202725;
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x04001400 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ break;
+ default:
+ case 396:
+ moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+ break;
+
+ case 408:
+ moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xCD44961A;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00081830;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 4;
+ break;
+ case 504:
+ moutdwm(ast, 0x1E6E2020, 0x0270);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xDE44A61D;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x070000BB;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 4;
+ break;
+ case 528:
+ moutdwm(ast, 0x1E6E2020, 0x0290);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xEF44B61E;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000088;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302A37;
+ param->reg_AC2 = 0xEF56B61E;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 136;
+ param->dll2_finetune_step = 3;
+ break;
+ case 600:
+ moutdwm(ast, 0x1E6E2020, 0x02E1);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xDF56B61F;
+ param->reg_DQSIC = 0x0000014D;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000058C0;
+ param->madj_max = 132;
+ param->dll2_finetune_step = 3;
+ break;
+ case 624:
+ moutdwm(ast, 0x1E6E2020, 0x0160);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xEF56B621;
+ param->reg_DQSIC = 0x0000015A;
+ param->reg_MRS = 0x02101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000059C0;
+ param->madj_max = 128;
+ param->dll2_finetune_step = 3;
+ break;
+ } /* switch freq */
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x130;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x131;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x132;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x133;
+ break;
+ }; /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case AST_VIDMEM_SIZE_8M:
+ param->dram_config |= 0x00;
+ break;
+ case AST_VIDMEM_SIZE_16M:
+ param->dram_config |= 0x04;
+ break;
+ case AST_VIDMEM_SIZE_32M:
+ param->dram_config |= 0x08;
+ break;
+ case AST_VIDMEM_SIZE_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+
+}
+
+static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2;
+
+ moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ moutdwm(ast, 0x1E6E0018, 0x00000100);
+ moutdwm(ast, 0x1E6E0024, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ moutdwm(ast, 0x1E6E0004, param->dram_config);
+ moutdwm(ast, 0x1E6E0008, 0x90040f);
+ moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ moutdwm(ast, 0x1E6E0080, 0x00000000);
+ moutdwm(ast, 0x1E6E0084, 0x00000000);
+ moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ moutdwm(ast, 0x1E6E0018, 0x4040A170);
+ moutdwm(ast, 0x1E6E0018, 0x20402370);
+ moutdwm(ast, 0x1E6E0038, 0x00000000);
+ moutdwm(ast, 0x1E6E0040, 0xFF444444);
+ moutdwm(ast, 0x1E6E0044, 0x22222222);
+ moutdwm(ast, 0x1E6E0048, 0x22222222);
+ moutdwm(ast, 0x1E6E004C, 0x00000002);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+ moutdwm(ast, 0x1E6E0054, 0);
+ moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0074, 0x00000000);
+ moutdwm(ast, 0x1E6E0078, 0x00000000);
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max) {
+ break;
+ }
+ moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000) {
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ } else {
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ }
+ data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ data = mindwm(ast, 0x1E6E0018) | 0xC00;
+ moutdwm(ast, 0x1E6E0018, data);
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00000040);
+ udelay(50);
+ /* Mode Register Setting */
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000005);
+ moutdwm(ast, 0x1E6E0028, 0x00000007);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ data = 0;
+ if (param->wodt) {
+ data = 0x300;
+ }
+ if (param->rodt) {
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ }
+ moutdwm(ast, 0x1E6E0034, data | 0x3);
+
+ /* Wait DQI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0080);
+ } while (!(data & 0x40000000));
+ /* Wait DQSI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0020);
+ } while (!(data & 0x00000800));
+ /* Calibrate the DQSI delay */
+ cbr_dll2(ast, param);
+
+ moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+ /* ECC Memory Initialization */
+#ifdef ECC
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+ moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+
+}
+
+static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = (trap << 20) | (trap << 16);
+ trap_AC2 += 0x00110000;
+ trap_MRS = 0x00000040 | (trap << 4);
+
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 264:
+ moutdwm(ast, 0x1E6E2020, 0x0130);
+ param->wodt = 0;
+ param->reg_AC1 = 0x11101513;
+ param->reg_AC2 = 0x78117011;
+ param->reg_DQSIC = 0x00000092;
+ param->reg_MRS = 0x00000842;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x000000F0;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x0000005A;
+ param->reg_FREQ = 0x00004AC0;
+ param->madj_max = 138;
+ param->dll2_finetune_step = 3;
+ break;
+ case 336:
+ moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 1;
+ param->reg_AC1 = 0x22202613;
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x00000A02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ break;
+ default:
+ case 396:
+ moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+
+ case 408:
+ moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xCD44B01E;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 3;
+ break;
+ case 504:
+ moutdwm(ast, 0x1E6E2020, 0x0261);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xDE44C022;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 3;
+ break;
+ case 528:
+ moutdwm(ast, 0x1E6E2020, 0x0120);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xEF44D024;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F9;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A7;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 552:
+ moutdwm(ast, 0x1E6E2020, 0x02A1);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E025;
+ param->reg_DQSIC = 0x00000132;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000AD;
+ param->reg_FREQ = 0x000056C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E027;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000B3;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ }
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x100;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x121;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x122;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x123;
+ break;
+ }; /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case AST_VIDMEM_SIZE_8M:
+ param->dram_config |= 0x00;
+ break;
+ case AST_VIDMEM_SIZE_16M:
+ param->dram_config |= 0x04;
+ break;
+ case AST_VIDMEM_SIZE_32M:
+ param->dram_config |= 0x08;
+ break;
+ case AST_VIDMEM_SIZE_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+}
+
+static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2;
+
+ moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ moutdwm(ast, 0x1E6E0018, 0x00000100);
+ moutdwm(ast, 0x1E6E0024, 0x00000000);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ moutdwm(ast, 0x1E6E0004, param->dram_config);
+ moutdwm(ast, 0x1E6E0008, 0x90040f);
+ moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ moutdwm(ast, 0x1E6E0080, 0x00000000);
+ moutdwm(ast, 0x1E6E0084, 0x00000000);
+ moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ moutdwm(ast, 0x1E6E0018, 0x4040A130);
+ moutdwm(ast, 0x1E6E0018, 0x20402330);
+ moutdwm(ast, 0x1E6E0038, 0x00000000);
+ moutdwm(ast, 0x1E6E0040, 0xFF808000);
+ moutdwm(ast, 0x1E6E0044, 0x88848466);
+ moutdwm(ast, 0x1E6E0048, 0x44440008);
+ moutdwm(ast, 0x1E6E004C, 0x00000000);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+ moutdwm(ast, 0x1E6E0054, 0);
+ moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0074, 0x00000000);
+ moutdwm(ast, 0x1E6E0078, 0x00000000);
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max) {
+ break;
+ }
+ moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000) {
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ } else {
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ }
+ data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ data = mindwm(ast, 0x1E6E0018) | 0xC00;
+ moutdwm(ast, 0x1E6E0018, data);
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ udelay(50);
+ /* Mode Register Setting */
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000005);
+ moutdwm(ast, 0x1E6E0028, 0x00000007);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+
+ moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ data = 0;
+ if (param->wodt) {
+ data = 0x500;
+ }
+ if (param->rodt) {
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ }
+ moutdwm(ast, 0x1E6E0034, data | 0x3);
+ moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+
+ /* Wait DQI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0080);
+ } while (!(data & 0x40000000));
+ /* Wait DQSI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0020);
+ } while (!(data & 0x00000800));
+ /* Calibrate the DQSI delay */
+ cbr_dll2(ast, param);
+
+ /* ECC Memory Initialization */
+#ifdef ECC
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+ moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+}
+
+static void ast_init_dram_2300(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast2300_dram_param param;
+ u32 temp;
+ u8 reg;
+
+ reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ if ((reg & 0x80) == 0) {/* vga only */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688a8a8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x1);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x1);
+
+ /* Slow down CPU/AHB CLK in VGA only mode */
+ temp = ast_read32(ast, 0x12008);
+ temp |= 0x73;
+ ast_write32(ast, 0x12008, temp);
+
+ param.dram_type = AST_DDR3;
+ if (temp & 0x01000000)
+ param.dram_type = AST_DDR2;
+ param.dram_chipid = ast->dram_type;
+ param.dram_freq = ast->mclk;
+ param.vram_size = ast->vram_size;
+
+ if (param.dram_type == AST_DDR3) {
+ get_ddr3_info(ast, &param);
+ ddr3_init(ast, &param);
+ } else {
+ get_ddr2_info(ast, &param);
+ ddr2_init(ast, &param);
+ }
+
+ temp = mindwm(ast, 0x1e6e2040);
+ moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ } while ((reg & 0x40) == 0);
+}
+
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
new file mode 100644
index 000000000000..95fa6aba26bc
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of the authors not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. The authors makes no representations
+ * about the suitability of this software for any purpose. It is provided
+ * "as is" without express or implied warranty.
+ *
+ * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Ported from xf86-video-ast driver */
+
+#ifndef AST_TABLES_H
+#define AST_TABLES_H
+
+/* Std. Table Index Definition */
+#define TextModeIndex 0
+#define EGAModeIndex 1
+#define VGAModeIndex 2
+#define HiCModeIndex 3
+#define TrueCModeIndex 4
+
+#define Charx8Dot 0x00000001
+#define HalfDCLK 0x00000002
+#define DoubleScanMode 0x00000004
+#define LineCompareOff 0x00000008
+#define SyncPP 0x00000000
+#define SyncPN 0x00000040
+#define SyncNP 0x00000080
+#define SyncNN 0x000000C0
+#define HBorder 0x00000020
+#define VBorder 0x00000010
+#define WideScreenMode 0x00000100
+
+
+/* DCLK Index */
+#define VCLK25_175 0x00
+#define VCLK28_322 0x01
+#define VCLK31_5 0x02
+#define VCLK36 0x03
+#define VCLK40 0x04
+#define VCLK49_5 0x05
+#define VCLK50 0x06
+#define VCLK56_25 0x07
+#define VCLK65 0x08
+#define VCLK75 0x09
+#define VCLK78_75 0x0A
+#define VCLK94_5 0x0B
+#define VCLK108 0x0C
+#define VCLK135 0x0D
+#define VCLK157_5 0x0E
+#define VCLK162 0x0F
+/* #define VCLK193_25 0x10 */
+#define VCLK154 0x10
+#define VCLK83_5 0x11
+#define VCLK106_5 0x12
+#define VCLK146_25 0x13
+#define VCLK148_5 0x14
+
+static struct ast_vbios_dclk_info dclk_table[] = {
+ {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
+ {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
+ {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
+ {0x76, 0x63, 0x01}, /* 03: VCLK36 */
+ {0xEE, 0x67, 0x01}, /* 04: VCLK40 */
+ {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
+ {0xC6, 0x64, 0x01}, /* 06: VCLK50 */
+ {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
+ {0x80, 0x64, 0x00}, /* 08: VCLK65 */
+ {0x7B, 0x63, 0x00}, /* 09: VCLK75 */
+ {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
+ {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
+ {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
+ {0x85, 0x24, 0x00}, /* 0D: VCLK135 */
+ {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
+ {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
+ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
+ {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */
+ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
+ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
+ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
+};
+
+static struct ast_vbios_stdtable vbios_stdtable[] = {
+ /* MD_2_3_400 */
+ {
+ 0x67,
+ {0x00,0x03,0x00,0x02},
+ {0x5f,0x4f,0x50,0x82,0x55,0x81,0xbf,0x1f,
+ 0x00,0x4f,0x0d,0x0e,0x00,0x00,0x00,0x00,
+ 0x9c,0x8e,0x8f,0x28,0x1f,0x96,0xb9,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+ 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+ 0x0c,0x00,0x0f,0x08},
+ {0x00,0x00,0x00,0x00,0x00,0x10,0x0e,0x00,
+ 0xff}
+ },
+ /* Mode12/ExtEGATable */
+ {
+ 0xe3,
+ {0x01,0x0f,0x00,0x06},
+ {0x5f,0x4f,0x50,0x82,0x55,0x81,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xe9,0x8b,0xdf,0x28,0x00,0xe7,0x04,0xe3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+ 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+ 0x01,0x00,0x0f,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtVGATable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x40,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtHiCTable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtTrueCTable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+};
+
+static struct ast_vbios_enhtable res_640x480[] = {
+ { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E },
+ { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E },
+ { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */
+ (SyncNN | Charx8Dot) , 75, 3, 0x2E },
+ { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */
+ (SyncNN | Charx8Dot) , 85, 4, 0x2E },
+ { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */
+ (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E },
+};
+
+static struct ast_vbios_enhtable res_800x600[] = {
+ {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */
+ (SyncPP | Charx8Dot), 56, 1, 0x30 },
+ {1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 2, 0x30 },
+ {1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72Hz */
+ (SyncPP | Charx8Dot), 72, 3, 0x30 },
+ {1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 4, 0x30 },
+ {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85Hz */
+ (SyncPP | Charx8Dot), 84, 5, 0x30 },
+ {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 5, 0x30 },
+};
+
+
+static struct ast_vbios_enhtable res_1024x768[] = {
+ {1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */
+ (SyncNN | Charx8Dot), 60, 1, 0x31 },
+ {1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */
+ (SyncNN | Charx8Dot), 70, 2, 0x31 },
+ {1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 3, 0x31 },
+ {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85Hz */
+ (SyncPP | Charx8Dot), 84, 4, 0x31 },
+ {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 4, 0x31 },
+};
+
+static struct ast_vbios_enhtable res_1280x1024[] = {
+ {1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x32 },
+ {1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 2, 0x32 },
+ {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85Hz */
+ (SyncPP | Charx8Dot), 85, 3, 0x32 },
+ {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 3, 0x32 },
+};
+
+static struct ast_vbios_enhtable res_1600x1200[] = {
+ {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x33 },
+ {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
+};
+
+static struct ast_vbios_enhtable res_1920x1200[] = {
+ {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
+ (SyncNP | Charx8Dot), 60, 1, 0x34 },
+ {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
+ (SyncNP | Charx8Dot), 0xFF, 1, 0x34 },
+};
+
+/* 16:10 */
+static struct ast_vbios_enhtable res_1280x800[] = {
+ {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x35 },
+ {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x35 },
+
+};
+
+static struct ast_vbios_enhtable res_1440x900[] = {
+ {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x36 },
+ {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x36 },
+};
+
+static struct ast_vbios_enhtable res_1680x1050[] = {
+ {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x37 },
+ {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x37 },
+};
+
+/* HDTV */
+static struct ast_vbios_enhtable res_1920x1080[] = {
+ {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x38 },
+ {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x38 },
+};
+#endif
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
new file mode 100644
index 000000000000..6cf2adea66bc
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "ast_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct ast_private *
+ast_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct ast_private, ttm.bdev);
+}
+
+static int
+ast_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+ast_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int ast_ttm_global_init(struct ast_private *ast)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &ast->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &ast_ttm_mem_global_init;
+ global_ref->release = &ast_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ ast->ttm.bo_global_ref.mem_glob =
+ ast->ttm.mem_global_ref.object;
+ global_ref = &ast->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+void
+ast_ttm_global_release(struct ast_private *ast)
+{
+ if (ast->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct ast_bo *bo;
+
+ bo = container_of(tbo, struct ast_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &ast_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct ast_bo *astbo = ast_bo(bo);
+
+ if (!ast_ttm_bo_is_ast_bo(bo))
+ return;
+
+ ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM);
+ *pl = astbo->placement;
+}
+
+static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct ast_private *ast = ast_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(ast->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int ast_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void ast_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func ast_tt_backend_func = {
+ .destroy = &ast_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &ast_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int ast_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver ast_bo_driver = {
+ .ttm_tt_create = ast_ttm_tt_create,
+ .ttm_tt_populate = ast_ttm_tt_populate,
+ .ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
+ .init_mem_type = ast_bo_init_mem_type,
+ .evict_flags = ast_bo_evict_flags,
+ .move = ast_bo_move,
+ .verify_access = ast_bo_verify_access,
+ .io_mem_reserve = &ast_ttm_io_mem_reserve,
+ .io_mem_free = &ast_ttm_io_mem_free,
+};
+
+int ast_mm_init(struct ast_private *ast)
+{
+ int ret;
+ struct drm_device *dev = ast->dev;
+ struct ttm_bo_device *bdev = &ast->ttm.bdev;
+
+ ret = ast_ttm_global_init(ast);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&ast->ttm.bdev,
+ ast->ttm.bo_global_ref.ref.object,
+ &ast_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ ast->vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ ast->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+
+ return 0;
+}
+
+void ast_mm_fini(struct ast_private *ast)
+{
+ struct drm_device *dev = ast->dev;
+ ttm_bo_device_release(&ast->ttm.bdev);
+
+ ast_ttm_global_release(ast);
+
+ if (ast->fb_mtrr >= 0) {
+ drm_mtrr_del(ast->fb_mtrr,
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+ ast->fb_mtrr = -1;
+ }
+}
+
+void ast_ttm_placement(struct ast_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+void ast_bo_unreserve(struct ast_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct ast_bo **pastbo)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_bo *astbo;
+ size_t acc_size;
+ int ret;
+
+ astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL);
+ if (!astbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &astbo->gem, size);
+ if (ret) {
+ kfree(astbo);
+ return ret;
+ }
+
+ astbo->gem.driver_private = NULL;
+ astbo->bo.bdev = &ast->ttm.bdev;
+
+ ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size,
+ sizeof(struct ast_bo));
+
+ ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
+ ttm_bo_type_device, &astbo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ NULL, ast_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pastbo = astbo;
+ return 0;
+}
+
+static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = ast_bo_gpu_offset(bo);
+ }
+
+ ast_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = ast_bo_gpu_offset(bo);
+ return 0;
+}
+
+int ast_bo_unpin(struct ast_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ast_bo_push_sysram(struct ast_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int ast_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct ast_private *ast;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ ast = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
new file mode 100644
index 000000000000..fc154dd75296
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -0,0 +1,12 @@
+config DRM_CIRRUS_QEMU
+ tristate "Cirrus driver for QEMU emulated device"
+ depends on DRM && PCI && EXPERIMENTAL
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ This is a KMS driver for emulated cirrus device in qemu.
+ It is *NOT* intended for real cirrus devices. This requires
+ the modesetting userspace X.org driver.
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
new file mode 100644
index 000000000000..69ffe7006d55
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+cirrus-y := cirrus_main.o cirrus_mode.o \
+ cirrus_drv.o cirrus_fbdev.o cirrus_ttm.o
+
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
new file mode 100644
index 000000000000..d7038230b71e
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2012 Red Hat <mjg@redhat.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "cirrus_drv.h"
+
+int cirrus_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, cirrus_modeset, int, 0400);
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+
+static struct drm_driver driver;
+
+/* only bind to the cirrus chip in qemu */
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
+ 0, 0 },
+ {0,}
+};
+
+static int __devinit
+cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void cirrus_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static const struct file_operations cirrus_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = cirrus_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+};
+static struct drm_driver driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR,
+ .load = cirrus_driver_load,
+ .unload = cirrus_driver_unload,
+ .fops = &cirrus_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+ .gem_init_object = cirrus_gem_init_object,
+ .gem_free_object = cirrus_gem_free_object,
+ .dumb_create = cirrus_dumb_create,
+ .dumb_map_offset = cirrus_dumb_mmap_offset,
+ .dumb_destroy = cirrus_dumb_destroy,
+};
+
+static struct pci_driver cirrus_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = cirrus_pci_probe,
+ .remove = cirrus_pci_remove,
+};
+
+static int __init cirrus_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && cirrus_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (cirrus_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &cirrus_pci_driver);
+}
+
+static void __exit cirrus_exit(void)
+{
+ drm_pci_exit(&driver, &cirrus_pci_driver);
+}
+
+module_init(cirrus_init);
+module_exit(cirrus_exit);
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
new file mode 100644
index 000000000000..21bdfa8836f7
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#ifndef __CIRRUS_DRV_H__
+#define __CIRRUS_DRV_H__
+
+#include <video/vga.h>
+
+#include <drm/drm_fb_helper.h>
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#define DRIVER_AUTHOR "Matthew Garrett"
+
+#define DRIVER_NAME "cirrus"
+#define DRIVER_DESC "qemu Cirrus emulation"
+#define DRIVER_DATE "20110418"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define CIRRUSFB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg))
+
+#define SEQ_INDEX 4
+#define SEQ_DATA 5
+
+#define WREG_SEQ(reg, v) \
+ do { \
+ WREG8(SEQ_INDEX, reg); \
+ WREG8(SEQ_DATA, v); \
+ } while (0) \
+
+#define CRT_INDEX 0x14
+#define CRT_DATA 0x15
+
+#define WREG_CRT(reg, v) \
+ do { \
+ WREG8(CRT_INDEX, reg); \
+ WREG8(CRT_DATA, v); \
+ } while (0) \
+
+#define GFX_INDEX 0xe
+#define GFX_DATA 0xf
+
+#define WREG_GFX(reg, v) \
+ do { \
+ WREG8(GFX_INDEX, reg); \
+ WREG8(GFX_DATA, v); \
+ } while (0) \
+
+/*
+ * Cirrus has a "hidden" DAC register that can be accessed by writing to
+ * the pixel mask register to reset the state, then reading from the register
+ * four times. The next write will then pass to the DAC
+ */
+#define VGA_DAC_MASK 0x6
+
+#define WREG_HDR(v) \
+ do { \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ WREG8(VGA_DAC_MASK, v); \
+ } while (0) \
+
+
+#define CIRRUS_MAX_FB_HEIGHT 4096
+#define CIRRUS_MAX_FB_WIDTH 4096
+
+#define CIRRUS_DPMS_CLEARED (-1)
+
+#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
+#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
+#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
+
+struct cirrus_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ int last_dpms;
+ bool enabled;
+};
+
+struct cirrus_fbdev;
+struct cirrus_mode_info {
+ bool mode_config_initialized;
+ struct cirrus_crtc *crtc;
+ /* pointer to fbdev info structure */
+ struct cirrus_fbdev *gfbdev;
+};
+
+struct cirrus_encoder {
+ struct drm_encoder base;
+ int last_dpms;
+};
+
+struct cirrus_connector {
+ struct drm_connector base;
+};
+
+struct cirrus_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct cirrus_mc {
+ resource_size_t vram_size;
+ resource_size_t vram_base;
+};
+
+struct cirrus_device {
+ struct drm_device *dev;
+ unsigned long flags;
+
+ resource_size_t rmmio_base;
+ resource_size_t rmmio_size;
+ void __iomem *rmmio;
+
+ struct cirrus_mc mc;
+ struct cirrus_mode_info mode_info;
+
+ int num_crtc;
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ atomic_t validate_sequence;
+ } ttm;
+
+};
+
+
+struct cirrus_fbdev {
+ struct drm_fb_helper helper;
+ struct cirrus_framebuffer gfb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+};
+
+struct cirrus_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
+
+static inline struct cirrus_bo *
+cirrus_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct cirrus_bo, bo);
+}
+
+
+#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+ /* cirrus_mode.c */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+
+ /* cirrus_main.c */
+int cirrus_device_init(struct cirrus_device *cdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev,
+ uint32_t flags);
+void cirrus_device_fini(struct cirrus_device *cdev);
+int cirrus_gem_init_object(struct drm_gem_object *obj);
+void cirrus_gem_free_object(struct drm_gem_object *obj);
+int cirrus_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+int cirrus_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+int cirrus_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int cirrus_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+ struct cirrus_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+ /* cirrus_display.c */
+int cirrus_modeset_init(struct cirrus_device *cdev);
+void cirrus_modeset_fini(struct cirrus_device *cdev);
+
+ /* cirrus_fbdev.c */
+int cirrus_fbdev_init(struct cirrus_device *cdev);
+void cirrus_fbdev_fini(struct cirrus_device *cdev);
+
+
+
+ /* cirrus_irq.c */
+void cirrus_driver_irq_preinstall(struct drm_device *dev);
+int cirrus_driver_irq_postinstall(struct drm_device *dev);
+void cirrus_driver_irq_uninstall(struct drm_device *dev);
+irqreturn_t cirrus_driver_irq_handler(DRM_IRQ_ARGS);
+
+ /* cirrus_kms.c */
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
+int cirrus_driver_unload(struct drm_device *dev);
+extern struct drm_ioctl_desc cirrus_ioctls[];
+extern int cirrus_max_ioctl;
+
+int cirrus_mm_init(struct cirrus_device *cirrus);
+void cirrus_mm_fini(struct cirrus_device *cirrus);
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct cirrus_bo **pcirrusbo);
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
+int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait);
+void cirrus_bo_unreserve(struct cirrus_bo *bo);
+int cirrus_bo_push_sysram(struct cirrus_bo *bo);
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
+#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
new file mode 100644
index 000000000000..9a276a536992
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_fb_helper.h"
+
+#include <linux/fb.h>
+
+#include "cirrus_drv.h"
+
+static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct cirrus_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
+ int ret;
+ bool unmap = false;
+
+ obj = afbdev->gfb.obj;
+ bo = gem_to_cirrus_bo(obj);
+
+ ret = cirrus_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("failed to reserve fb bo\n");
+ return;
+ }
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ cirrus_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i < y + height; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ cirrus_bo_unreserve(bo);
+}
+
+static void cirrus_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_fillrect(info, rect);
+ cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void cirrus_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_copyarea(info, area);
+ cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void cirrus_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_imageblit(info, image);
+ cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+
+static struct fb_ops cirrusfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = cirrus_fillrect,
+ .fb_copyarea = cirrus_copyarea,
+ .fb_imageblit = cirrus_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ if (bpp > 24)
+ return -EINVAL;
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = cirrus_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int cirrusfb_create(struct cirrus_fbdev *gfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = gfbdev->helper.dev;
+ struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct device *device = &dev->pdev->dev;
+ void *sysram;
+ struct drm_gem_object *gobj = NULL;
+ struct cirrus_bo *bo = NULL;
+ int size, ret;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+
+ bo = gem_to_cirrus_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (info == NULL)
+ return -ENOMEM;
+
+ info->par = gfbdev;
+
+ ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ gfbdev->sysram = sysram;
+ gfbdev->size = size;
+
+ fb = &gfbdev->gfb.base;
+ if (!fb) {
+ DRM_INFO("fb is NULL\n");
+ return -EINVAL;
+ }
+
+ /* setup helper */
+ gfbdev->helper.fb = fb;
+ gfbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "cirrusdrmfb");
+
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &cirrusfb_ops;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+ info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = cdev->mc.vram_size;
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
+ DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
+ DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
+ DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO(" pitch is %d\n", fb->pitches[0]);
+
+ return 0;
+out_iounmap:
+ return ret;
+}
+
+static int cirrus_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size
+ *sizes)
+{
+ struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = cirrusfb_create(gfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static int cirrus_fbdev_destroy(struct drm_device *dev,
+ struct cirrus_fbdev *gfbdev)
+{
+ struct fb_info *info;
+ struct cirrus_framebuffer *gfb = &gfbdev->gfb;
+
+ if (gfbdev->helper.fbdev) {
+ info = gfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (gfb->obj) {
+ drm_gem_object_unreference_unlocked(gfb->obj);
+ gfb->obj = NULL;
+ }
+
+ vfree(gfbdev->sysram);
+ drm_fb_helper_fini(&gfbdev->helper);
+ drm_framebuffer_cleanup(&gfb->base);
+
+ return 0;
+}
+
+static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
+ .gamma_set = cirrus_crtc_fb_gamma_set,
+ .gamma_get = cirrus_crtc_fb_gamma_get,
+ .fb_probe = cirrus_fb_find_or_create_single,
+};
+
+int cirrus_fbdev_init(struct cirrus_device *cdev)
+{
+ struct cirrus_fbdev *gfbdev;
+ int ret;
+ int bpp_sel = 24;
+
+ /*bpp_sel = 8;*/
+ gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
+ if (!gfbdev)
+ return -ENOMEM;
+
+ cdev->mode_info.gfbdev = gfbdev;
+ gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
+ cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
+ if (ret) {
+ kfree(gfbdev);
+ return ret;
+ }
+ drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
+ drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
+
+ return 0;
+}
+
+void cirrus_fbdev_fini(struct cirrus_device *cdev)
+{
+ if (!cdev->mode_info.gfbdev)
+ return;
+
+ cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
+ kfree(cdev->mode_info.gfbdev);
+ cdev->mode_info.gfbdev = NULL;
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
new file mode 100644
index 000000000000..e3c122578417
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "cirrus_drv.h"
+
+
+static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
+ if (cirrus_fb->obj)
+ drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static int cirrus_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
+ .destroy = cirrus_user_framebuffer_destroy,
+ .create_handle = cirrus_user_framebuffer_create_handle,
+};
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+ struct cirrus_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ return 0;
+}
+
+static struct drm_framebuffer *
+cirrus_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct cirrus_framebuffer *cirrus_fb;
+ int ret;
+ u32 bpp, depth;
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ /* cirrus can't handle > 24bpp framebuffers at all */
+ if (bpp > 24)
+ return ERR_PTR(-EINVAL);
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
+ if (!cirrus_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(cirrus_fb);
+ return ERR_PTR(ret);
+ }
+ return &cirrus_fb->base;
+}
+
+static const struct drm_mode_config_funcs cirrus_mode_funcs = {
+ .fb_create = cirrus_user_framebuffer_create,
+};
+
+/* Unmap the framebuffer from the core and release the memory */
+static void cirrus_vram_fini(struct cirrus_device *cdev)
+{
+ iounmap(cdev->rmmio);
+ cdev->rmmio = NULL;
+ if (cdev->mc.vram_base)
+ release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int cirrus_vram_init(struct cirrus_device *cdev)
+{
+ /* BAR 0 is VRAM */
+ cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
+ /* We have 4MB of VRAM */
+ cdev->mc.vram_size = 4 * 1024 * 1024;
+
+ if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
+ "cirrusdrmfb_vram")) {
+ DRM_ERROR("can't reserve VRAM\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Our emulated hardware has two sets of memory. One is video RAM and can
+ * simply be used as a linear framebuffer - the other provides mmio access
+ * to the display registers. The latter can also be accessed via IO port
+ * access, but we map the range and use mmio to program them instead
+ */
+
+int cirrus_device_init(struct cirrus_device *cdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev, uint32_t flags)
+{
+ int ret;
+
+ cdev->dev = ddev;
+ cdev->flags = flags;
+
+ /* Hardcode the number of CRTCs to 1 */
+ cdev->num_crtc = 1;
+
+ /* BAR 0 is the framebuffer, BAR 1 contains registers */
+ cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
+ cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
+
+ if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
+ "cirrusdrmfb_mmio")) {
+ DRM_ERROR("can't reserve mmio registers\n");
+ return -ENOMEM;
+ }
+
+ cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
+
+ if (cdev->rmmio == NULL)
+ return -ENOMEM;
+
+ ret = cirrus_vram_init(cdev);
+ if (ret) {
+ release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+ return ret;
+ }
+
+ return 0;
+}
+
+void cirrus_device_fini(struct cirrus_device *cdev)
+{
+ release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+ cirrus_vram_fini(cdev);
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct cirrus_device *cdev;
+ int r;
+
+ cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
+ if (cdev == NULL)
+ return -ENOMEM;
+ dev->dev_private = (void *)cdev;
+
+ r = cirrus_device_init(cdev, dev, dev->pdev, flags);
+ if (r) {
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+ goto out;
+ }
+
+ r = cirrus_mm_init(cdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "fatal err on mm init\n");
+
+ r = cirrus_modeset_init(cdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+
+ dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
+out:
+ if (r)
+ cirrus_driver_unload(dev);
+ return r;
+}
+
+int cirrus_driver_unload(struct drm_device *dev)
+{
+ struct cirrus_device *cdev = dev->dev_private;
+
+ if (cdev == NULL)
+ return 0;
+ cirrus_modeset_fini(cdev);
+ cirrus_mm_fini(cdev);
+ cirrus_device_fini(cdev);
+ kfree(cdev);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int cirrus_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct cirrus_bo *cirrusbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &cirrusbo->gem;
+ return 0;
+}
+
+int cirrus_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = cirrus_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int cirrus_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+int cirrus_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
+ return 0;
+}
+
+void cirrus_bo_unref(struct cirrus_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+
+void cirrus_gem_free_object(struct drm_gem_object *obj)
+{
+ struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
+
+ if (!cirrus_bo)
+ return;
+ cirrus_bo_unref(&cirrus_bo);
+}
+
+
+static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
+{
+ return bo->bo.addr_space_offset;
+}
+
+int
+cirrus_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct cirrus_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_cirrus_bo(obj);
+ *offset = cirrus_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
new file mode 100644
index 000000000000..100f6308c509
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -0,0 +1,629 @@
+
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ *
+ * Portions of this code derived from cirrusfb.c:
+ * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
+ *
+ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include <video/cirrus.h>
+
+#include "cirrus_drv.h"
+
+#define CIRRUS_LUT_SIZE 256
+
+#define PALETTE_INDEX 0x8
+#define PALETTE_DATA 0x9
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ /* VGA registers */
+ WREG8(PALETTE_INDEX, i);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_r[i]);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_g[i]);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_b[i]);
+ }
+}
+
+/*
+ * The DRM core requires DPMS functions, but they make little sense in our
+ * case and so are just stubs
+ */
+
+static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ u8 sr01, gr0e;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ sr01 = 0x00;
+ gr0e = 0x00;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ sr01 = 0x20;
+ gr0e = 0x02;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ sr01 = 0x20;
+ gr0e = 0x04;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ sr01 = 0x20;
+ gr0e = 0x06;
+ break;
+ default:
+ return;
+ }
+
+ WREG8(SEQ_INDEX, 0x1);
+ sr01 |= RREG8(SEQ_DATA) & ~0x20;
+ WREG_SEQ(0x1, sr01);
+
+ WREG8(GFX_INDEX, 0xe);
+ gr0e |= RREG8(GFX_DATA) & ~0x06;
+ WREG_GFX(0xe, gr0e);
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+ struct cirrus_device *cdev = crtc->dev->dev_private;
+ u32 addr;
+ u8 tmp;
+
+ addr = offset >> 2;
+ WREG_CRT(0x0c, (u8)((addr >> 8) & 0xff));
+ WREG_CRT(0x0d, (u8)(addr & 0xff));
+
+ WREG8(CRT_INDEX, 0x1b);
+ tmp = RREG8(CRT_DATA);
+ tmp &= 0xf2;
+ tmp |= (addr >> 16) & 0x01;
+ tmp |= (addr >> 15) & 0x0c;
+ WREG_CRT(0x1b, tmp);
+ WREG8(CRT_INDEX, 0x1d);
+ tmp = RREG8(CRT_DATA);
+ tmp &= 0x7f;
+ tmp |= (addr >> 12) & 0x80;
+ WREG_CRT(0x1d, tmp);
+}
+
+/* cirrus is different - we will force move buffers out of VRAM */
+static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct cirrus_device *cdev = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct cirrus_framebuffer *cirrus_fb;
+ struct cirrus_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ cirrus_fb = to_cirrus_framebuffer(fb);
+ obj = cirrus_fb->obj;
+ bo = gem_to_cirrus_bo(obj);
+ ret = cirrus_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ cirrus_bo_push_sysram(bo);
+ cirrus_bo_unreserve(bo);
+ }
+
+ cirrus_fb = to_cirrus_framebuffer(crtc->fb);
+ obj = cirrus_fb->obj;
+ bo = gem_to_cirrus_bo(obj);
+
+ ret = cirrus_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = cirrus_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ cirrus_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+ }
+ cirrus_bo_unreserve(bo);
+
+ cirrus_set_start_address(crtc, (u32)gpu_addr);
+ return 0;
+}
+
+static int cirrus_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+/*
+ * The meat of this driver. The core passes us a mode and we have to program
+ * it. The modesetting here is the bare minimum required to satisfy the qemu
+ * emulation of this hardware, and running this against a real device is
+ * likely to result in an inadequately programmed mode. We've already had
+ * the opportunity to modify the mode, so whatever we receive here should
+ * be something that can be correctly programmed and displayed
+ */
+static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ int hsyncstart, hsyncend, htotal, hdispend;
+ int vtotal, vdispend;
+ int tmp;
+ int sr07 = 0, hdr = 0;
+
+ htotal = mode->htotal / 8;
+ hsyncend = mode->hsync_end / 8;
+ hsyncstart = mode->hsync_start / 8;
+ hdispend = mode->hdisplay / 8;
+
+ vtotal = mode->vtotal;
+ vdispend = mode->vdisplay;
+
+ vdispend -= 1;
+ vtotal -= 2;
+
+ htotal -= 5;
+ hdispend -= 1;
+ hsyncstart += 1;
+ hsyncend += 1;
+
+ WREG_CRT(VGA_CRTC_V_SYNC_END, 0x20);
+ WREG_CRT(VGA_CRTC_H_TOTAL, htotal);
+ WREG_CRT(VGA_CRTC_H_DISP, hdispend);
+ WREG_CRT(VGA_CRTC_H_SYNC_START, hsyncstart);
+ WREG_CRT(VGA_CRTC_H_SYNC_END, hsyncend);
+ WREG_CRT(VGA_CRTC_V_TOTAL, vtotal & 0xff);
+ WREG_CRT(VGA_CRTC_V_DISP_END, vdispend & 0xff);
+
+ tmp = 0x40;
+ if ((vdispend + 1) & 512)
+ tmp |= 0x20;
+ WREG_CRT(VGA_CRTC_MAX_SCAN, tmp);
+
+ /*
+ * Overflow bits for values that don't fit in the standard registers
+ */
+ tmp = 16;
+ if (vtotal & 256)
+ tmp |= 1;
+ if (vdispend & 256)
+ tmp |= 2;
+ if ((vdispend + 1) & 256)
+ tmp |= 8;
+ if (vtotal & 512)
+ tmp |= 32;
+ if (vdispend & 512)
+ tmp |= 64;
+ WREG_CRT(VGA_CRTC_OVERFLOW, tmp);
+
+ tmp = 0;
+
+ /* More overflow bits */
+
+ if ((htotal + 5) & 64)
+ tmp |= 16;
+ if ((htotal + 5) & 128)
+ tmp |= 32;
+ if (vtotal & 256)
+ tmp |= 64;
+ if (vtotal & 512)
+ tmp |= 128;
+
+ WREG_CRT(CL_CRT1A, tmp);
+
+ /* Disable Hercules/CGA compatibility */
+ WREG_CRT(VGA_CRTC_MODE, 0x03);
+
+ WREG8(SEQ_INDEX, 0x7);
+ sr07 = RREG8(SEQ_DATA);
+ sr07 &= 0xe0;
+ hdr = 0;
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ sr07 |= 0x11;
+ break;
+ case 16:
+ sr07 |= 0xc1;
+ hdr = 0xc0;
+ break;
+ case 24:
+ sr07 |= 0x15;
+ hdr = 0xc5;
+ break;
+ case 32:
+ sr07 |= 0x19;
+ hdr = 0xc5;
+ break;
+ default:
+ return -1;
+ }
+
+ WREG_SEQ(0x7, sr07);
+
+ /* Program the pitch */
+ tmp = crtc->fb->pitches[0] / 8;
+ WREG_CRT(VGA_CRTC_OFFSET, tmp);
+
+ /* Enable extended blanking and pitch bits, and enable full memory */
+ tmp = 0x22;
+ tmp |= (crtc->fb->pitches[0] >> 7) & 0x10;
+ tmp |= (crtc->fb->pitches[0] >> 6) & 0x40;
+ WREG_CRT(0x1b, tmp);
+
+ /* Enable high-colour modes */
+ WREG_GFX(VGA_GFX_MODE, 0x40);
+
+ /* And set graphics mode */
+ WREG_GFX(VGA_GFX_MISC, 0x01);
+
+ WREG_HDR(hdr);
+ cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+ return 0;
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void cirrus_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void cirrus_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+ int i;
+
+ if (size != CIRRUS_LUT_SIZE)
+ return;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ cirrus_crtc->lut_r[i] = red[i];
+ cirrus_crtc->lut_g[i] = green[i];
+ cirrus_crtc->lut_b[i] = blue[i];
+ }
+ cirrus_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void cirrus_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(cirrus_crtc);
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs cirrus_crtc_funcs = {
+ .gamma_set = cirrus_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = cirrus_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
+ .dpms = cirrus_crtc_dpms,
+ .mode_fixup = cirrus_crtc_mode_fixup,
+ .mode_set = cirrus_crtc_mode_set,
+ .mode_set_base = cirrus_crtc_mode_set_base,
+ .prepare = cirrus_crtc_prepare,
+ .commit = cirrus_crtc_commit,
+ .load_lut = cirrus_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void cirrus_crtc_init(struct drm_device *dev)
+{
+ struct cirrus_device *cdev = dev->dev_private;
+ struct cirrus_crtc *cirrus_crtc;
+ int i;
+
+ cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
+ (CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+
+ if (cirrus_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &cirrus_crtc->base, &cirrus_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
+ cdev->mode_info.crtc = cirrus_crtc;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ cirrus_crtc->lut_r[i] = i;
+ cirrus_crtc->lut_g[i] = i;
+ cirrus_crtc->lut_b[i] = i;
+ }
+
+ drm_crtc_helper_add(&cirrus_crtc->base, &cirrus_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ cirrus_crtc->lut_r[regno] = red;
+ cirrus_crtc->lut_g[regno] = green;
+ cirrus_crtc->lut_b[regno] = blue;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ *red = cirrus_crtc->lut_r[regno];
+ *green = cirrus_crtc->lut_g[regno];
+ *blue = cirrus_crtc->lut_b[regno];
+}
+
+
+static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void cirrus_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+ return;
+}
+
+static void cirrus_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void cirrus_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+void cirrus_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(cirrus_encoder);
+}
+
+static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = {
+ .dpms = cirrus_encoder_dpms,
+ .mode_fixup = cirrus_encoder_mode_fixup,
+ .mode_set = cirrus_encoder_mode_set,
+ .prepare = cirrus_encoder_prepare,
+ .commit = cirrus_encoder_commit,
+};
+
+static const struct drm_encoder_funcs cirrus_encoder_encoder_funcs = {
+ .destroy = cirrus_encoder_destroy,
+};
+
+static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct cirrus_encoder *cirrus_encoder;
+
+ cirrus_encoder = kzalloc(sizeof(struct cirrus_encoder), GFP_KERNEL);
+ if (!cirrus_encoder)
+ return NULL;
+
+ encoder = &cirrus_encoder->base;
+ encoder->possible_crtcs = 0x1;
+
+ drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
+
+ return encoder;
+}
+
+
+int cirrus_vga_get_modes(struct drm_connector *connector)
+{
+ /* Just add a static list of modes */
+ drm_add_modes_noedid(connector, 640, 480);
+ drm_add_modes_noedid(connector, 800, 600);
+ drm_add_modes_noedid(connector, 1024, 768);
+ drm_add_modes_noedid(connector, 1280, 1024);
+
+ return 4;
+}
+
+static int cirrus_vga_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* Any mode we've added is valid */
+ return MODE_OK;
+}
+
+struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
+ *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj =
+ drm_mode_object_find(connector->dev, enc_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static enum drm_connector_status cirrus_vga_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void cirrus_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
+ .get_modes = cirrus_vga_get_modes,
+ .mode_valid = cirrus_vga_mode_valid,
+ .best_encoder = cirrus_connector_best_encoder,
+};
+
+struct drm_connector_funcs cirrus_vga_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = cirrus_vga_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = cirrus_connector_destroy,
+};
+
+static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct cirrus_connector *cirrus_connector;
+
+ cirrus_connector = kzalloc(sizeof(struct cirrus_connector), GFP_KERNEL);
+ if (!cirrus_connector)
+ return NULL;
+
+ connector = &cirrus_connector->base;
+
+ drm_connector_init(dev, connector,
+ &cirrus_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
+
+ return connector;
+}
+
+
+int cirrus_modeset_init(struct cirrus_device *cdev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ drm_mode_config_init(cdev->dev);
+ cdev->mode_info.mode_config_initialized = true;
+
+ cdev->dev->mode_config.max_width = CIRRUS_MAX_FB_WIDTH;
+ cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
+
+ cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
+ cdev->dev->mode_config.preferred_depth = 24;
+ /* don't prefer a shadow on virt GPU */
+ cdev->dev->mode_config.prefer_shadow = 0;
+
+ cirrus_crtc_init(cdev->dev);
+
+ encoder = cirrus_encoder_init(cdev->dev);
+ if (!encoder) {
+ DRM_ERROR("cirrus_encoder_init failed\n");
+ return -1;
+ }
+
+ connector = cirrus_vga_init(cdev->dev);
+ if (!connector) {
+ DRM_ERROR("cirrus_vga_init failed\n");
+ return -1;
+ }
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ret = cirrus_fbdev_init(cdev);
+ if (ret) {
+ DRM_ERROR("cirrus_fbdev_init failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void cirrus_modeset_fini(struct cirrus_device *cdev)
+{
+ cirrus_fbdev_fini(cdev);
+
+ if (cdev->mode_info.mode_config_initialized) {
+ drm_mode_config_cleanup(cdev->dev);
+ cdev->mode_info.mode_config_initialized = false;
+ }
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
new file mode 100644
index 000000000000..2ebcd11a5023
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "cirrus_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct cirrus_device *
+cirrus_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct cirrus_device, ttm.bdev);
+}
+
+static int
+cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &cirrus->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &cirrus_ttm_mem_global_init;
+ global_ref->release = &cirrus_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ cirrus->ttm.bo_global_ref.mem_glob =
+ cirrus->ttm.mem_global_ref.object;
+ global_ref = &cirrus->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+void
+cirrus_ttm_global_release(struct cirrus_device *cirrus)
+{
+ if (cirrus->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+ cirrus->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct cirrus_bo *bo;
+
+ bo = container_of(tbo, struct cirrus_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &cirrus_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+cirrus_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct cirrus_bo *cirrusbo = cirrus_bo(bo);
+
+ if (!cirrus_ttm_bo_is_cirrus_bo(bo))
+ return;
+
+ cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_SYSTEM);
+ *pl = cirrusbo->placement;
+}
+
+static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct cirrus_device *cirrus = cirrus_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(cirrus->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int cirrus_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func cirrus_tt_backend_func = {
+ .destroy = &cirrus_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &cirrus_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver cirrus_bo_driver = {
+ .ttm_tt_create = cirrus_ttm_tt_create,
+ .ttm_tt_populate = cirrus_ttm_tt_populate,
+ .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
+ .init_mem_type = cirrus_bo_init_mem_type,
+ .evict_flags = cirrus_bo_evict_flags,
+ .move = cirrus_bo_move,
+ .verify_access = cirrus_bo_verify_access,
+ .io_mem_reserve = &cirrus_ttm_io_mem_reserve,
+ .io_mem_free = &cirrus_ttm_io_mem_free,
+};
+
+int cirrus_mm_init(struct cirrus_device *cirrus)
+{
+ int ret;
+ struct drm_device *dev = cirrus->dev;
+ struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
+
+ ret = cirrus_ttm_global_init(cirrus);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&cirrus->ttm.bdev,
+ cirrus->ttm.bo_global_ref.ref.object,
+ &cirrus_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ cirrus->mc.vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ cirrus->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+
+ return 0;
+}
+
+void cirrus_mm_fini(struct cirrus_device *cirrus)
+{
+ struct drm_device *dev = cirrus->dev;
+ ttm_bo_device_release(&cirrus->ttm.bdev);
+
+ cirrus_ttm_global_release(cirrus);
+
+ if (cirrus->fb_mtrr >= 0) {
+ drm_mtrr_del(cirrus->fb_mtrr,
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+ cirrus->fb_mtrr = -1;
+ }
+}
+
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+void cirrus_bo_unreserve(struct cirrus_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct cirrus_bo **pcirrusbo)
+{
+ struct cirrus_device *cirrus = dev->dev_private;
+ struct cirrus_bo *cirrusbo;
+ size_t acc_size;
+ int ret;
+
+ cirrusbo = kzalloc(sizeof(struct cirrus_bo), GFP_KERNEL);
+ if (!cirrusbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &cirrusbo->gem, size);
+ if (ret) {
+ kfree(cirrusbo);
+ return ret;
+ }
+
+ cirrusbo->gem.driver_private = NULL;
+ cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+
+ cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&cirrus->ttm.bdev, size,
+ sizeof(struct cirrus_bo));
+
+ ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
+ ttm_bo_type_device, &cirrusbo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ NULL, cirrus_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pcirrusbo = cirrusbo;
+ return 0;
+}
+
+static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = cirrus_bo_gpu_offset(bo);
+ }
+
+ cirrus_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = cirrus_bo_gpu_offset(bo);
+ return 0;
+}
+
+int cirrus_bo_unpin(struct cirrus_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int cirrus_bo_push_sysram(struct cirrus_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct cirrus_device *cirrus;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ cirrus = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 4b8653b932f9..08758e061478 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -98,3 +98,26 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
#endif
}
EXPORT_SYMBOL(drm_clflush_pages);
+
+void
+drm_clflush_virt_range(char *addr, unsigned long length)
+{
+#if defined(CONFIG_X86)
+ if (cpu_has_clflush) {
+ char *end = addr + length;
+ mb();
+ for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
+ clflush(addr);
+ clflush(end - 1);
+ mb();
+ return;
+ }
+
+ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+ printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+ WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_virt_range);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 325365f6d355..affa629589ac 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -85,11 +85,12 @@ again:
mutex_lock(&dev->struct_mutex);
ret = idr_get_new_above(&dev->ctx_idr, NULL,
DRM_RESERVED_CONTEXTS, &new_id);
- if (ret == -EAGAIN) {
- mutex_unlock(&dev->struct_mutex);
- goto again;
- }
mutex_unlock(&dev->struct_mutex);
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ return ret;
+
return new_id;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index c79870a75c2f..92cea9d77ec9 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -227,7 +227,7 @@ static int drm_mode_object_get(struct drm_device *dev,
again:
if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Ran out memory getting a mode number\n");
- return -EINVAL;
+ return -ENOMEM;
}
mutex_lock(&dev->mode_config.idr_mutex);
@@ -235,6 +235,8 @@ again:
mutex_unlock(&dev->mode_config.idr_mutex);
if (ret == -EAGAIN)
goto again;
+ else if (ret)
+ return ret;
obj->id = new_id;
obj->type = obj_type;
@@ -361,7 +363,7 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
* @funcs: callbacks for the new CRTC
*
* LOCKING:
- * Caller must hold mode config lock.
+ * Takes mode_config lock.
*
* Inits a new object created as base part of an driver crtc object.
*
@@ -382,6 +384,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
if (ret)
goto out;
+ crtc->base.properties = &crtc->properties;
+
list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
dev->mode_config.num_crtc++;
@@ -481,6 +485,7 @@ int drm_connector_init(struct drm_device *dev,
if (ret)
goto out;
+ connector->base.properties = &connector->properties;
connector->dev = dev;
connector->funcs = funcs;
connector->connector_type = connector_type;
@@ -603,6 +608,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (ret)
goto out;
+ plane->base.properties = &plane->properties;
plane->dev = dev;
plane->funcs = funcs;
plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
@@ -1422,11 +1428,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
}
connector = obj_to_connector(obj);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- props_count++;
- }
- }
+ props_count = connector->properties.count;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
@@ -1479,21 +1481,19 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
copied = 0;
prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- if (put_user(connector->property_ids[i],
- prop_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
+ for (i = 0; i < connector->properties.count; i++) {
+ if (put_user(connector->properties.ids[i],
+ prop_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
- if (put_user(connector->property_values[i],
- prop_values + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+ if (put_user(connector->properties.values[i],
+ prop_values + copied)) {
+ ret = -EFAULT;
+ goto out;
}
+ copied++;
}
}
out_resp->count_props = props_count;
@@ -1830,7 +1830,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_display_mode *mode = NULL;
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
- int ret = 0;
+ int ret;
int i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -2102,7 +2102,7 @@ int drm_mode_addfb(struct drm_device *dev,
fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
if (IS_ERR(fb)) {
- DRM_ERROR("could not create framebuffer\n");
+ DRM_DEBUG_KMS("could not create framebuffer\n");
ret = PTR_ERR(fb);
goto out;
}
@@ -2185,6 +2185,47 @@ static int format_check(struct drm_mode_fb_cmd2 *r)
}
}
+static int framebuffer_check(struct drm_mode_fb_cmd2 *r)
+{
+ int ret, hsub, vsub, num_planes, i;
+
+ ret = format_check(r);
+ if (ret) {
+ DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
+ return ret;
+ }
+
+ hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
+ num_planes = drm_format_num_planes(r->pixel_format);
+
+ if (r->width == 0 || r->width % hsub) {
+ DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
+ return -EINVAL;
+ }
+
+ if (r->height == 0 || r->height % vsub) {
+ DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width = r->width / (i != 0 ? hsub : 1);
+
+ if (!r->handles[i]) {
+ DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+ return -EINVAL;
+ }
+
+ if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
+ DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
* @inode: inode from the ioctl
@@ -2208,33 +2249,31 @@ int drm_mode_addfb2(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r = data;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
+ DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
return -EINVAL;
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
+ DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
r->height, config->min_height, config->max_height);
return -EINVAL;
}
- ret = format_check(r);
- if (ret) {
- DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
+ ret = framebuffer_check(r);
+ if (ret)
return ret;
- }
mutex_lock(&dev->mode_config.mutex);
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
- DRM_ERROR("could not create framebuffer\n");
+ DRM_DEBUG_KMS("could not create framebuffer\n");
ret = PTR_ERR(fb);
goto out;
}
@@ -2365,7 +2404,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -2564,7 +2603,7 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
struct drm_display_mode *mode;
struct drm_mode_object *obj;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -2618,7 +2657,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
struct drm_connector *connector;
struct drm_display_mode mode;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -2710,6 +2749,34 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
}
EXPORT_SYMBOL(drm_property_create_enum);
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
+{
+ struct drm_property *property;
+ int i, ret;
+
+ flags |= DRM_MODE_PROP_BITMASK;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+ }
+
+ return property;
+}
+EXPORT_SYMBOL(drm_property_create_bitmask);
+
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max)
@@ -2734,7 +2801,14 @@ int drm_property_add_enum(struct drm_property *property, int index,
{
struct drm_property_enum *prop_enum;
- if (!(property->flags & DRM_MODE_PROP_ENUM))
+ if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
+ return -EINVAL;
+
+ /*
+ * Bitmask enum properties have the additional constraint of values
+ * from 0 to 63
+ */
+ if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
return -EINVAL;
if (!list_empty(&property->enum_blob_list)) {
@@ -2778,60 +2852,78 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
-int drm_connector_attach_property(struct drm_connector *connector,
+void drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val)
{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == 0) {
- connector->property_ids[i] = property->base.id;
- connector->property_values[i] = init_val;
- break;
- }
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ drm_object_attach_property(&connector->base, property, init_val);
}
EXPORT_SYMBOL(drm_connector_attach_property);
int drm_connector_property_set_value(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
+ return drm_object_property_set_value(&connector->base, property, value);
+}
+EXPORT_SYMBOL(drm_connector_property_set_value);
+
+int drm_connector_property_get_value(struct drm_connector *connector,
+ struct drm_property *property, uint64_t *val)
+{
+ return drm_object_property_get_value(&connector->base, property, val);
+}
+EXPORT_SYMBOL(drm_connector_property_get_value);
+
+void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val)
+{
+ int count = obj->properties->count;
+
+ if (count == DRM_OBJECT_MAX_PROPERTY) {
+ WARN(1, "Failed to attach object property (type: 0x%x). Please "
+ "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+ "you see this message on the same object type.\n",
+ obj->type);
+ return;
+ }
+
+ obj->properties->ids[count] = property->base.id;
+ obj->properties->values[count] = init_val;
+ obj->properties->count++;
+}
+EXPORT_SYMBOL(drm_object_attach_property);
+
+int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t val)
+{
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- connector->property_values[i] = value;
- break;
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ obj->properties->values[i] = val;
+ return 0;
}
}
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL(drm_connector_property_set_value);
+EXPORT_SYMBOL(drm_object_property_set_value);
-int drm_connector_property_get_value(struct drm_connector *connector,
+int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- *val = connector->property_values[i];
- break;
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ *val = obj->properties->values[i];
+ return 0;
}
}
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL(drm_connector_property_get_value);
+EXPORT_SYMBOL(drm_object_property_get_value);
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -2862,7 +2954,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
}
property = obj_to_property(obj);
- if (property->flags & DRM_MODE_PROP_ENUM) {
+ if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head)
enum_count++;
} else if (property->flags & DRM_MODE_PROP_BLOB) {
@@ -2887,7 +2979,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
}
out_resp->count_values = value_count;
- if (property->flags & DRM_MODE_PROP_ENUM) {
+ if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
@@ -3009,7 +3101,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
struct drm_device *dev = connector->dev;
- int ret = 0, size;
+ int ret, size;
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -3033,75 +3125,202 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+static bool drm_property_change_is_valid(struct drm_property *property,
+ __u64 value)
+{
+ if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ return false;
+ if (property->flags & DRM_MODE_PROP_RANGE) {
+ if (value < property->values[0] || value > property->values[1])
+ return false;
+ return true;
+ } else if (property->flags & DRM_MODE_PROP_BITMASK) {
+ int i;
+ __u64 valid_mask = 0;
+ for (i = 0; i < property->num_values; i++)
+ valid_mask |= (1ULL << property->values[i]);
+ return !(value & ~valid_mask);
+ } else {
+ int i;
+ for (i = 0; i < property->num_values; i++)
+ if (property->values[i] == value)
+ return true;
+ return false;
+ }
+}
+
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_connector_set_property *out_resp = data;
- struct drm_mode_object *obj;
- struct drm_property *property;
- struct drm_connector *connector;
+ struct drm_mode_connector_set_property *conn_set_prop = data;
+ struct drm_mode_obj_set_property obj_set_prop = {
+ .value = conn_set_prop->value,
+ .prop_id = conn_set_prop->prop_id,
+ .obj_id = conn_set_prop->connector_id,
+ .obj_type = DRM_MODE_OBJECT_CONNECTOR
+ };
+
+ /* It does all the locking and checking we need */
+ return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
+}
+
+static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
int ret = -EINVAL;
+ struct drm_connector *connector = obj_to_connector(obj);
+
+ /* Do DPMS ourselves */
+ if (property == connector->dev->mode_config.dpms_property) {
+ if (connector->funcs->dpms)
+ (*connector->funcs->dpms)(connector, (int)value);
+ ret = 0;
+ } else if (connector->funcs->set_property)
+ ret = connector->funcs->set_property(connector, property, value);
+
+ /* store the property value if successful */
+ if (!ret)
+ drm_connector_property_set_value(connector, property, value);
+ return ret;
+}
+
+static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->set_property)
+ ret = crtc->funcs->set_property(crtc, property, value);
+ if (!ret)
+ drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_plane *plane = obj_to_plane(obj);
+
+ if (plane->funcs->set_property)
+ ret = plane->funcs->set_property(plane, property, value);
+ if (!ret)
+ drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_get_properties *arg = data;
+ struct drm_mode_object *obj;
+ int ret = 0;
int i;
+ int copied = 0;
+ int props_count = 0;
+ uint32_t __user *props_ptr;
+ uint64_t __user *prop_values_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+ obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!obj->properties) {
+ ret = -EINVAL;
goto out;
}
- connector = obj_to_connector(obj);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == out_resp->prop_id)
- break;
+ props_count = obj->properties->count;
+
+ /* This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it. */
+ if ((arg->count_props >= props_count) && props_count) {
+ copied = 0;
+ props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+ prop_values_ptr = (uint64_t __user *)(unsigned long)
+ (arg->prop_values_ptr);
+ for (i = 0; i < props_count; i++) {
+ if (put_user(obj->properties->ids[i],
+ props_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (put_user(obj->properties->values[i],
+ prop_values_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
}
+ arg->count_props = props_count;
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_set_property *arg = data;
+ struct drm_mode_object *arg_obj;
+ struct drm_mode_object *prop_obj;
+ struct drm_property *property;
+ int ret = -EINVAL;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
- if (i == DRM_CONNECTOR_MAX_PROPERTY) {
+ arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ if (!arg_obj)
+ goto out;
+ if (!arg_obj->properties)
goto out;
- }
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
+ for (i = 0; i < arg_obj->properties->count; i++)
+ if (arg_obj->properties->ids[i] == arg->prop_id)
+ break;
+
+ if (i == arg_obj->properties->count)
goto out;
- }
- property = obj_to_property(obj);
- if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ prop_obj = drm_mode_object_find(dev, arg->prop_id,
+ DRM_MODE_OBJECT_PROPERTY);
+ if (!prop_obj)
goto out;
+ property = obj_to_property(prop_obj);
- if (property->flags & DRM_MODE_PROP_RANGE) {
- if (out_resp->value < property->values[0])
- goto out;
+ if (!drm_property_change_is_valid(property, arg->value))
+ goto out;
- if (out_resp->value > property->values[1])
- goto out;
- } else {
- int found = 0;
- for (i = 0; i < property->num_values; i++) {
- if (property->values[i] == out_resp->value) {
- found = 1;
- break;
- }
- }
- if (!found) {
- goto out;
- }
+ switch (arg_obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR:
+ ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+ arg->value);
+ break;
+ case DRM_MODE_OBJECT_CRTC:
+ ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+ break;
+ case DRM_MODE_OBJECT_PLANE:
+ ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
+ break;
}
- /* Do DPMS ourselves */
- if (property == connector->dev->mode_config.dpms_property) {
- if (connector->funcs->dpms)
- (*connector->funcs->dpms)(connector, (int) out_resp->value);
- ret = 0;
- } else if (connector->funcs->set_property)
- ret = connector->funcs->set_property(connector, property, out_resp->value);
-
- /* store the property value if successful */
- if (!ret)
- drm_connector_property_set_value(connector, property, out_resp->value);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
@@ -3173,6 +3392,11 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
}
crtc = obj_to_crtc(obj);
+ if (crtc->funcs->gamma_set == NULL) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
ret = -EINVAL;
@@ -3468,3 +3692,140 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
}
}
EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_num_planes);
+
+/**
+ * drm_format_plane_cpp - determine the bytes per pixel value
+ * @format: pixel format (DRM_FORMAT_*)
+ * @plane: plane index
+ *
+ * RETURNS:
+ * The bytes per pixel value for the specified plane.
+ */
+int drm_format_plane_cpp(uint32_t format, int plane)
+{
+ unsigned int depth;
+ int bpp;
+
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return 2;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return plane ? 2 : 1;
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 1;
+ default:
+ drm_fb_get_bpp_depth(format, &depth, &bpp);
+ return bpp >> 3;
+ }
+}
+EXPORT_SYMBOL(drm_format_plane_cpp);
+
+/**
+ * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The horizontal chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_horz_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
+
+/**
+ * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The vertical chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_vert_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 81118893264c..3252e7067d8b 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -518,7 +518,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_mode_set save_set;
- int ret = 0;
+ int ret;
int i;
DRM_DEBUG_KMS("\n");
@@ -1023,36 +1023,3 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
-
-
-/**
- * drm_format_num_planes - get the number of planes for format
- * @format: pixel format (DRM_FORMAT_*)
- *
- * RETURNS:
- * The number of planes used by the specified pixel format.
- */
-int drm_format_num_planes(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 3;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- return 2;
- default:
- return 1;
- }
-}
-EXPORT_SYMBOL(drm_format_num_planes);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6116e3b75393..8a9d0792e4ec 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -163,7 +163,9 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5a18b0df8285..608bddfc7e35 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -81,7 +81,7 @@ struct detailed_mode_closure {
#define LEVEL_CVT 3
static struct edid_quirk {
- char *vendor;
+ char vendor[4];
int product_id;
u32 quirks;
} edid_quirk_list[] = {
@@ -149,13 +149,13 @@ EXPORT_SYMBOL(drm_edid_header_is_valid);
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
*/
-bool drm_edid_block_valid(u8 *raw_edid)
+bool drm_edid_block_valid(u8 *raw_edid, int block)
{
int i;
u8 csum = 0;
struct edid *edid = (struct edid *)raw_edid;
- if (raw_edid[0] == 0x00) {
+ if (block == 0) {
int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
else if (score >= 6) {
@@ -219,7 +219,7 @@ bool drm_edid_is_valid(struct edid *edid)
return false;
for (i = 0; i <= edid->extensions; i++)
- if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i))
return false;
return true;
@@ -299,7 +299,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block))
+ if (drm_edid_block_valid(block, 0))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++;
@@ -324,7 +324,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
+ if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) {
valid_extensions++;
break;
}
@@ -486,23 +486,47 @@ static void edid_fixup_preferred(struct drm_connector *connector,
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
+static bool
+mode_is_rb(const struct drm_display_mode *mode)
+{
+ return (mode->htotal - mode->hdisplay == 160) &&
+ (mode->hsync_end - mode->hdisplay == 80) &&
+ (mode->hsync_end - mode->hsync_start == 32) &&
+ (mode->vsync_start - mode->vdisplay == 3);
+}
+
+/*
+ * drm_mode_find_dmt - Create a copy of a mode if present in DMT
+ * @dev: Device to duplicate against
+ * @hsize: Mode width
+ * @vsize: Mode height
+ * @fresh: Mode refresh rate
+ * @rb: Mode reduced-blanking-ness
+ *
+ * Walk the DMT mode list looking for a match for the given parameters.
+ * Return a newly allocated copy of the mode, or NULL if not found.
+ */
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh)
+ int hsize, int vsize, int fresh,
+ bool rb)
{
- struct drm_display_mode *mode = NULL;
int i;
for (i = 0; i < drm_num_dmt_modes; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
- if (hsize == ptr->hdisplay &&
- vsize == ptr->vdisplay &&
- fresh == drm_mode_vrefresh(ptr)) {
- /* get the expected default mode */
- mode = drm_mode_duplicate(dev, ptr);
- break;
- }
+ if (hsize != ptr->hdisplay)
+ continue;
+ if (vsize != ptr->vdisplay)
+ continue;
+ if (fresh != drm_mode_vrefresh(ptr))
+ continue;
+ if (rb != mode_is_rb(ptr))
+ continue;
+
+ return drm_mode_duplicate(dev, ptr);
}
- return mode;
+
+ return NULL;
}
EXPORT_SYMBOL(drm_mode_find_dmt);
@@ -731,10 +755,17 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
}
/* check whether it can be found in default mode table */
- mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
+ if (drm_monitor_supports_rb(edid)) {
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
+ true);
+ if (mode)
+ return mode;
+ }
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
if (mode)
return mode;
+ /* okay, generate it */
switch (timing_level) {
case LEVEL_DMT:
break;
@@ -748,6 +779,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
* secondary GTF curve. Please don't do that.
*/
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (!mode)
+ return NULL;
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
drm_mode_destroy(dev, mode);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
@@ -909,15 +942,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
}
static bool
-mode_is_rb(const struct drm_display_mode *mode)
-{
- return (mode->htotal - mode->hdisplay == 160) &&
- (mode->hsync_end - mode->hdisplay == 80) &&
- (mode->hsync_end - mode->hsync_start == 32) &&
- (mode->vsync_start - mode->vdisplay == 3);
-}
-
-static bool
mode_in_hsync_range(const struct drm_display_mode *mode,
struct edid *edid, u8 *t)
{
@@ -994,12 +1018,8 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
return true;
}
-/*
- * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
- * need to account for them.
- */
static int
-drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
@@ -1019,17 +1039,110 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
}
+/* fix up 1366x768 mode from 1368x768;
+ * GFT/CVT can't express 1366 width which isn't dividable by 8
+ */
+static void fixup_mode_1366x768(struct drm_display_mode *mode)
+{
+ if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
+ mode->hdisplay = 1366;
+ mode->hsync_start--;
+ mode->hsync_end--;
+ drm_mode_set_name(mode);
+ }
+}
+
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < num_extra_modes; i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
+ if (!newmode)
+ return modes;
+
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
+ }
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
+static int
+drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ bool rb = drm_monitor_supports_rb(edid);
+
+ for (i = 0; i < num_extra_modes; i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
+ if (!newmode)
+ return modes;
+
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
+ }
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
static void
do_inferred_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
- int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+ struct detailed_data_monitor_range *range = &data->data.range;
+
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ return;
- if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
+ closure->modes += drm_dmt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+
+ if (!version_greater(closure->edid, 1, 1))
+ return; /* GTF not defined yet */
+
+ switch (range->flags) {
+ case 0x02: /* secondary gtf, XXX could do more */
+ case 0x00: /* default gtf */
closure->modes += drm_gtf_modes_for_range(closure->connector,
closure->edid,
timing);
+ break;
+ case 0x04: /* cvt, only in 1.4+ */
+ if (!version_greater(closure->edid, 1, 3))
+ break;
+
+ closure->modes += drm_cvt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+ break;
+ case 0x01: /* just the ranges, no formula */
+ default:
+ break;
+ }
}
static int
@@ -1062,8 +1175,8 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
mode = drm_mode_find_dmt(connector->dev,
est3_modes[m].w,
est3_modes[m].h,
- est3_modes[m].r
- /*, est3_modes[m].rb */);
+ est3_modes[m].r,
+ est3_modes[m].rb);
if (mode) {
drm_mode_probed_add(connector, mode);
modes++;
@@ -1312,6 +1425,8 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define EDID_BASIC_AUDIO (1 << 6)
+#define EDID_CEA_YCRCB444 (1 << 5)
+#define EDID_CEA_YCRCB422 (1 << 4)
/**
* Search EDID for CEA extension block.
@@ -1666,13 +1781,29 @@ static void drm_add_display_info(struct edid *edid,
info->bpc = 0;
info->color_formats = 0;
- /* Only defined for 1.4 with digital displays */
- if (edid->revision < 4)
+ if (edid->revision < 3)
return;
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
return;
+ /* Get data from CEA blocks if present */
+ edid_ext = drm_find_cea_extension(edid);
+ if (edid_ext) {
+ info->cea_rev = edid_ext[1];
+
+ /* The existence of a CEA block should imply RGB support */
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ }
+
+ /* Only defined for 1.4 with digital displays */
+ if (edid->revision < 4)
+ return;
+
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
info->bpc = 6;
@@ -1698,18 +1829,11 @@ static void drm_add_display_info(struct edid *edid,
break;
}
- info->color_formats = DRM_COLOR_FORMAT_RGB444;
- if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444)
- info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
- if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
- info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
-
- /* Get data from CEA blocks if present */
- edid_ext = drm_find_cea_extension(edid);
- if (!edid_ext)
- return;
-
- info->cea_rev = edid_ext[1];
+ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
/**
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index da9acba2dd6c..66d4a28ad5a2 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -173,7 +173,7 @@ static int edid_load(struct drm_connector *connector, char *name,
}
memcpy(edid, fwdata, fwsize);
- if (!drm_edid_block_valid(edid)) {
+ if (!drm_edid_block_valid(edid, 0)) {
DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
name);
kfree(edid);
@@ -185,7 +185,7 @@ static int edid_load(struct drm_connector *connector, char *name,
if (i != valid_extensions + 1)
memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
edid + i * EDID_LENGTH, EDID_LENGTH);
- if (drm_edid_block_valid(edid + i * EDID_LENGTH))
+ if (drm_edid_block_valid(edid + i * EDID_LENGTH, i))
valid_extensions++;
}
@@ -220,18 +220,18 @@ int drm_load_edid_firmware(struct drm_connector *connector)
{
char *connector_name = drm_get_connector_name(connector);
char *edidname = edid_firmware, *last, *colon;
- int ret = 0;
+ int ret;
if (*edidname == '\0')
- return ret;
+ return 0;
colon = strchr(edidname, ':');
if (colon != NULL) {
if (strncmp(connector_name, edidname, colon - edidname))
- return ret;
+ return 0;
edidname = colon + 1;
if (*edidname == '\0')
- return ret;
+ return 0;
}
last = edidname + strlen(edidname) - 1;
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index a91ffb117220..ff98a7eb38dd 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -30,7 +30,6 @@
/*
* Autogenerated from the DMT spec.
* This table is copied from xfree86/modes/xf86EdidModes.c.
- * But the mode with Reduced blank feature is deleted.
*/
static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
@@ -81,6 +80,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
896, 1048, 0, 600, 601, 604, 631, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@120Hz RB */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
+ 880, 960, 0, 600, 603, 607, 636, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 848x480@60Hz */
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
976, 1088, 0, 480, 486, 494, 517, 0,
@@ -106,10 +109,18 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@120Hz RB */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
+ 1104, 1184, 0, 768, 771, 775, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 790, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
@@ -122,6 +133,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
1496, 1712, 0, 768, 771, 778, 809, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@120Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 823, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
@@ -134,6 +153,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
1496, 1712, 0, 800, 803, 809, 843, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@120Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 847, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
@@ -142,6 +165,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
1504, 1728, 0, 960, 961, 964, 1011, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@120Hz RB */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
+ 1360, 1440, 0, 960, 963, 967, 1017, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
@@ -154,22 +181,42 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@120Hz RB */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
+ 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@60Hz */
+ /* 1360x768@120Hz RB */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
+ 1440, 1520, 0, 768, 771, 776, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@75Hz */
+ /* 1400x1050@75Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@85Hz */
+ /* 1400x1050@85Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@120Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 926, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
@@ -182,6 +229,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
1696, 1952, 0, 900, 903, 909, 948, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@120Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 953, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
@@ -202,6 +253,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@120Hz RB */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
+ 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
@@ -214,15 +273,23 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@120Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1729x1344@75Hz */
+ /* 1792x1344@75Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1853x1392@60Hz */
+ /* 1792x1344@120Hz RB */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
+ 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1856x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
@@ -230,6 +297,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@120Hz RB */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
+ 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
@@ -242,6 +317,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@120Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
@@ -250,6 +329,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@120Hz RB */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
+ 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
@@ -262,6 +349,11 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@120Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+
};
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
@@ -320,12 +412,14 @@ static const struct drm_display_mode edid_est_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
-static const struct {
+struct minimode {
short w;
short h;
short r;
short rb;
-} est3_modes[] = {
+};
+
+static const struct minimode est3_modes[] = {
/* byte 6 */
{ 640, 350, 85, 0 },
{ 640, 400, 85, 0 },
@@ -377,288 +471,304 @@ static const struct {
{ 1920, 1440, 60, 0 },
{ 1920, 1440, 75, 0 },
};
-static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+static const int num_est3_modes = ARRAY_SIZE(est3_modes);
+
+static const struct minimode extra_modes[] = {
+ { 1024, 576, 60, 0 },
+ { 1366, 768, 60, 0 },
+ { 1600, 900, 60, 0 },
+ { 1680, 945, 60, 0 },
+ { 1920, 1080, 60, 0 },
+ { 2048, 1152, 60, 0 },
+ { 2048, 1536, 60, 0 },
+};
+static const int num_extra_modes = ARRAY_SIZE(extra_modes);
/*
* Probably taken from CEA-861 spec.
* This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
*/
static const struct drm_display_mode edid_cea_modes[] = {
- /* 640x480@60Hz */
+ /* 1 - 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x480@60Hz */
+ /* 2 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x480@60Hz */
+ /* 3 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x720@60Hz */
+ /* 4 - 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080i@60Hz */
+ /* 5 - 1920x1080i@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1440x480i@60Hz */
+ /* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x480i@60Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x240@60Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 8 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x240@60Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 9 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x480i@60Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 10 - 2880x480i@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 2880x480i@60Hz */
+ /* 11 - 2880x480i@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 2880x240@60Hz */
+ /* 12 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x240@60Hz */
+ /* 13 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x480@60Hz */
+ /* 14 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x480@60Hz */
+ /* 15 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1080@60Hz */
+ /* 16 - 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x576@50Hz */
+ /* 17 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x576@50Hz */
+ /* 18 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x720@50Hz */
+ /* 19 - 1280x720@50Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080i@50Hz */
+ /* 20 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1440x576i@50Hz */
+ /* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x576i@50Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x288@50Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 23 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x288@50Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 24 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x576i@50Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 25 - 2880x576i@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 2880x576i@50Hz */
+ /* 26 - 2880x576i@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 2880x288@50Hz */
+ /* 27 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x288@50Hz */
+ /* 28 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576@50Hz */
+ /* 29 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576@50Hz */
+ /* 30 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1080@50Hz */
+ /* 31 - 1920x1080@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@24Hz */
+ /* 32 - 1920x1080@24Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@25Hz */
+ /* 33 - 1920x1080@25Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@30Hz */
+ /* 34 - 1920x1080@30Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2880x480@60Hz */
+ /* 35 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x480@60Hz */
+ /* 36 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x576@50Hz */
+ /* 37 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x576@50Hz */
+ /* 38 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1080i@50Hz */
+ /* 39 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1920x1080i@100Hz */
+ /* 40 - 1920x1080i@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1280x720@100Hz */
+ /* 41 - 1280x720@100Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x576@100Hz */
+ /* 42 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x576@100Hz */
+ /* 43 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576i@100Hz */
+ /* 44 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576i@100Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 45 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1080i@120Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1280x720@120Hz */
+ /* 47 - 1280x720@120Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x480@120Hz */
+ /* 48 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x480@120Hz */
+ /* 49 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x480i@120Hz */
+ /* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x480i@120Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 720x576@200Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 52 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x576@200Hz */
+ /* 53 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576i@200Hz */
+ /* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x576i@200Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 720x480@240Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 56 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x480@240Hz */
+ /* 57 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x480i@240 */
+ /* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x480i@240 */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1280x720@24Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 60 - 1280x720@24Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x720@25Hz */
+ /* 61 - 1280x720@25Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x720@30Hz */
+ /* 62 - 1280x720@30Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@120Hz */
+ /* 63 - 1920x1080@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@100Hz */
+ /* 64 - 1920x1080@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
-static const int drm_num_cea_modes =
- sizeof (edid_cea_modes) / sizeof (edid_cea_modes[0]);
+static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a0d6e894d97c..5683b7fdd746 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -136,6 +136,9 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
{
uint16_t *r_base, *g_base, *b_base;
+ if (crtc->funcs->gamma_set == NULL)
+ return;
+
r_base = crtc->gamma_store;
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
@@ -383,7 +386,6 @@ int drm_fb_helper_init(struct drm_device *dev,
int crtc_count, int max_conn_count)
{
struct drm_crtc *crtc;
- int ret = 0;
int i;
fb_helper->dev = dev;
@@ -408,10 +410,8 @@ int drm_fb_helper_init(struct drm_device *dev,
sizeof(struct drm_connector *),
GFP_KERNEL);
- if (!fb_helper->crtc_info[i].mode_set.connectors) {
- ret = -ENOMEM;
+ if (!fb_helper->crtc_info[i].mode_set.connectors)
goto out_free;
- }
fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
@@ -1083,7 +1083,7 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
/* try and find a 1024x768 mode on each connector */
can_clone = true;
- dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
+ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
for (i = 0; i < fb_helper->connector_count; i++) {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 83114b5e3cee..d58e69da1fb5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -201,6 +201,19 @@ free:
}
EXPORT_SYMBOL(drm_gem_object_alloc);
+static void
+drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
+{
+ if (obj->import_attach) {
+ drm_prime_remove_imported_buf_handle(&filp->prime,
+ obj->import_attach->dmabuf);
+ }
+ if (obj->export_dma_buf) {
+ drm_prime_remove_imported_buf_handle(&filp->prime,
+ obj->export_dma_buf);
+ }
+}
+
/**
* Removes the mapping from handle to filp for this object.
*/
@@ -233,9 +246,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
- if (obj->import_attach)
- drm_prime_remove_imported_buf_handle(&filp->prime,
- obj->import_attach->dmabuf);
+ drm_gem_remove_prime_handles(obj, filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, filp);
@@ -272,8 +283,7 @@ again:
spin_unlock(&file_priv->table_lock);
if (ret == -EAGAIN)
goto again;
-
- if (ret != 0)
+ else if (ret)
return ret;
drm_gem_object_handle_reference(obj);
@@ -329,7 +339,7 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj)
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list;
struct drm_local_map *map;
- int ret = 0;
+ int ret;
/* Set the object up for mmap'ing */
list = &obj->map_list;
@@ -456,8 +466,7 @@ again:
if (ret == -EAGAIN)
goto again;
-
- if (ret != 0)
+ else if (ret)
goto err;
/* Allocate a reference for the name table. */
@@ -532,9 +541,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
- if (obj->import_attach)
- drm_prime_remove_imported_buf_handle(&file_priv->prime,
- obj->import_attach->dmabuf);
+ drm_gem_remove_prime_handles(obj, file_priv);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
@@ -628,7 +635,7 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
drm_gem_object_reference(obj);
mutex_lock(&obj->dev->struct_mutex);
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(obj->dev, vma);
mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_open);
@@ -639,7 +646,7 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
struct drm_device *dev = obj->dev;
mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(vma);
+ drm_vm_close_locked(obj->dev, vma);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
}
@@ -712,7 +719,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
*/
drm_gem_object_reference(obj);
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index cf85155da2a0..64a62c697313 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -283,6 +283,10 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_DUMB_PREFER_SHADOW:
req->value = dev->mode_config.prefer_shadow;
break;
+ case DRM_CAP_PRIME:
+ req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
+ req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index c869436e238a..c798eeae0a03 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -189,7 +189,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
if (dev->num_crtcs == 0)
return;
- del_timer(&dev->vblank_disable_timer);
+ del_timer_sync(&dev->vblank_disable_timer);
vblank_disable_fn((unsigned long)dev);
@@ -310,7 +310,7 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
*/
int drm_irq_install(struct drm_device *dev)
{
- int ret = 0;
+ int ret;
unsigned long sh_flags = 0;
char *irqname;
@@ -731,7 +731,7 @@ EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
struct timeval *tvblank, unsigned flags)
{
- int ret = 0;
+ int ret;
/* Define requested maximum error on timestamps (nanoseconds). */
int max_error = (int) drm_timestamp_precision * 1000;
@@ -1031,18 +1031,15 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- int ret = 0;
unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
- goto out;
+ return 0;
crtc = modeset->crtc;
- if (crtc >= dev->num_crtcs) {
- ret = -EINVAL;
- goto out;
- }
+ if (crtc >= dev->num_crtcs)
+ return -EINVAL;
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
@@ -1052,12 +1049,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
drm_vblank_post_modeset(dev, crtc);
break;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-out:
- return ret;
+ return 0;
}
static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
@@ -1154,7 +1149,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
union drm_wait_vblank *vblwait = data;
- int ret = 0;
+ int ret;
unsigned int flags, seq, crtc, high_crtc;
if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index c79c713eeba0..521152041691 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -331,7 +331,7 @@ static int drm_notifier(void *priv)
void drm_idlelock_take(struct drm_lock_data *lock_data)
{
- int ret = 0;
+ int ret;
spin_lock_bh(&lock_data->spinlock);
lock_data->kernel_waiters++;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1bdf2b54eaf6..f546ff98a114 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -68,6 +68,7 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
{
struct drm_gem_object *obj;
void *buf;
+ int ret;
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj)
@@ -100,6 +101,17 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
obj->export_dma_buf = buf;
*prime_fd = dma_buf_fd(buf, flags);
}
+ /* if we've exported this buffer the cheat and add it to the import list
+ * so we get the correct handle back
+ */
+ ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
+ obj->export_dma_buf, handle);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ mutex_unlock(&file_priv->prime.lock);
+ return ret;
+ }
+
mutex_unlock(&file_priv->prime.lock);
return 0;
}
@@ -227,6 +239,42 @@ out:
}
EXPORT_SYMBOL(drm_prime_pages_to_sg);
+/* export an sg table into an array of pages and addresses
+ this is currently required by the TTM driver in order to do correct fault
+ handling */
+int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
+ dma_addr_t *addrs, int max_pages)
+{
+ unsigned count;
+ struct scatterlist *sg;
+ struct page *page;
+ u32 len, offset;
+ int pg_index;
+ dma_addr_t addr;
+
+ pg_index = 0;
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ len = sg->length;
+ offset = sg->offset;
+ page = sg_page(sg);
+ addr = sg_dma_address(sg);
+
+ while (len > 0) {
+ if (WARN_ON(pg_index >= max_pages))
+ return -1;
+ pages[pg_index] = page;
+ if (addrs)
+ addrs[pg_index] = addr;
+
+ page++;
+ addr += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ pg_index++;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
/* helper function to cleanup a GEM/prime object */
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
{
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index aa454f80e109..21bcd4a555d8 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -122,11 +122,10 @@ again:
ret = idr_get_new_above(&drm_minors_idr, NULL,
base, &new_id);
mutex_unlock(&dev->struct_mutex);
- if (ret == -EAGAIN) {
+ if (ret == -EAGAIN)
goto again;
- } else if (ret) {
+ else if (ret)
return ret;
- }
if (new_id >= limit) {
idr_remove(&drm_minors_idr, new_id);
@@ -211,7 +210,7 @@ EXPORT_SYMBOL(drm_master_put);
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- int ret = 0;
+ int ret;
if (file_priv->is_master)
return 0;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 5a7bd51fc3d8..45cf1dd3eb9c 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -347,17 +347,17 @@ static struct bin_attribute edid_attr = {
};
/**
- * drm_sysfs_connector_add - add an connector to sysfs
+ * drm_sysfs_connector_add - add a connector to sysfs
* @connector: connector to add
*
- * Create an connector device in sysfs, along with its associated connector
+ * Create a connector device in sysfs, along with its associated connector
* properties (so far, connection status, dpms, mode list & edid) and
* generate a hotplug event so userspace knows there's a new connector
* available.
*
* Note:
- * This routine should only be called *once* for each DRM minor registered.
- * A second call for an already registered device will trigger the BUG_ON
+ * This routine should only be called *once* for each registered connector.
+ * A second call for an already registered connector will trigger the BUG_ON
* below.
*/
int drm_sysfs_connector_add(struct drm_connector *connector)
@@ -366,7 +366,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
int attr_cnt = 0;
int opt_cnt = 0;
int i;
- int ret = 0;
+ int ret;
/* We shouldn't get called more than once for the same connector */
BUG_ON(device_is_registered(&connector->kdev));
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 149561818349..961ee08927fe 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -406,10 +406,9 @@ static const struct vm_operations_struct drm_vm_sg_ops = {
* Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist.
*/
-void drm_vm_open_locked(struct vm_area_struct *vma)
+void drm_vm_open_locked(struct drm_device *dev,
+ struct vm_area_struct *vma)
{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *vma_entry;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -430,14 +429,13 @@ static void drm_vm_open(struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
-void drm_vm_close_locked(struct vm_area_struct *vma)
+void drm_vm_close_locked(struct drm_device *dev,
+ struct vm_area_struct *vma)
{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -467,7 +465,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(vma);
+ drm_vm_close_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
@@ -519,7 +517,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_flags |= VM_DONTEXPAND;
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
return 0;
}
@@ -670,7 +668,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_flags |= VM_DONTEXPAND;
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 3343ac437fe5..7f5096763b7d 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
+config DRM_EXYNOS_DMABUF
+ bool "EXYNOS DRM DMABUF"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use DMABUF feature for DRM.
+
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
depends on DRM_EXYNOS && !FB_S3C
@@ -27,3 +33,9 @@ config DRM_EXYNOS_VIDI
depends on DRM_EXYNOS
help
Choose this option if you want to use Exynos VIDI for DRM.
+
+config DRM_EXYNOS_G2D
+ bool "Exynos DRM G2D"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 9e0bff8badf9..eb651ca8e2a8 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,10 +8,12 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
exynos_ddc.o exynos_hdmiphy.o \
exynos_drm_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index de8d2090bce3..b3cb0a69fbf2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -35,7 +35,7 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
dma_addr_t start_addr;
- unsigned int npages, page_size, i = 0;
+ unsigned int npages, i = 0;
struct scatterlist *sgl;
int ret = 0;
@@ -53,13 +53,13 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
if (buf->size >= SZ_1M) {
npages = buf->size >> SECTION_SHIFT;
- page_size = SECTION_SIZE;
+ buf->page_size = SECTION_SIZE;
} else if (buf->size >= SZ_64K) {
npages = buf->size >> 16;
- page_size = SZ_64K;
+ buf->page_size = SZ_64K;
} else {
npages = buf->size >> PAGE_SHIFT;
- page_size = PAGE_SIZE;
+ buf->page_size = PAGE_SIZE;
}
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
@@ -96,9 +96,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
while (i < npages) {
buf->pages[i] = phys_to_page(start_addr);
- sg_set_page(sgl, buf->pages[i], page_size, 0);
+ sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
sg_dma_address(sgl) = start_addr;
- start_addr += page_size;
+ start_addr += buf->page_size;
sgl = sg_next(sgl);
i++;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 3486ffed0bf0..4afb625128d7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -105,6 +105,8 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
overlay->fb_y = pos->fb_y;
overlay->fb_width = fb->width;
overlay->fb_height = fb->height;
+ overlay->src_width = pos->src_w;
+ overlay->src_height = pos->src_h;
overlay->bpp = fb->bits_per_pixel;
overlay->pitch = fb->pitches[0];
overlay->pixel_format = fb->pixel_format;
@@ -153,6 +155,8 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
pos.crtc_y = 0;
pos.crtc_w = fb->width - crtc->x;
pos.crtc_h = fb->height - crtc->y;
+ pos.src_w = pos.crtc_w;
+ pos.src_h = pos.crtc_h;
return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 25f72a62cb88..16b8e2195a0d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -42,6 +42,8 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
* - the unit is screen coordinates.
* @fb_y: offset y on a framebuffer to be displayed
* - the unit is screen coordinates.
+ * @src_w: width of source area to be displayed from a framebuffer.
+ * @src_h: height of source area to be displayed from a framebuffer.
* @crtc_x: offset x on hardware screen.
* @crtc_y: offset y on hardware screen.
* @crtc_w: width of hardware screen.
@@ -50,6 +52,8 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
struct exynos_drm_crtc_pos {
unsigned int fb_x;
unsigned int fb_y;
+ unsigned int src_w;
+ unsigned int src_h;
unsigned int crtc_x;
unsigned int crtc_y;
unsigned int crtc_w;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
new file mode 100644
index 000000000000..274909271c36
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -0,0 +1,272 @@
+/* exynos_drm_dmabuf.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+
+#include <linux/dma-buf.h>
+
+static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
+ unsigned int page_size)
+{
+ struct sg_table *sgt = NULL;
+ struct scatterlist *sgl;
+ int i, ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ goto out;
+
+ ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
+ if (ret)
+ goto err_free_sgt;
+
+ if (page_size < PAGE_SIZE)
+ page_size = PAGE_SIZE;
+
+ for_each_sg(sgt->sgl, sgl, nr_pages, i)
+ sg_set_page(sgl, pages[i], page_size, 0);
+
+ return sgt;
+
+err_free_sgt:
+ kfree(sgt);
+ sgt = NULL;
+out:
+ return NULL;
+}
+
+static struct sg_table *
+ exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
+ struct drm_device *dev = gem_obj->base.dev;
+ struct exynos_drm_gem_buf *buf;
+ struct sg_table *sgt = NULL;
+ unsigned int npages;
+ int nents;
+
+ DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+ mutex_lock(&dev->struct_mutex);
+
+ buf = gem_obj->buffer;
+
+ /* there should always be pages allocated. */
+ if (!buf->pages) {
+ DRM_ERROR("pages is null.\n");
+ goto err_unlock;
+ }
+
+ npages = buf->size / buf->page_size;
+
+ sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+ DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
+ npages, buf->size, buf->page_size);
+
+err_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return sgt;
+}
+
+static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = NULL;
+}
+
+static void exynos_dmabuf_release(struct dma_buf *dmabuf)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
+
+ DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+ /*
+ * exynos_dmabuf_release() call means that file object's
+ * f_count is 0 and it calls drm_gem_object_handle_unreference()
+ * to drop the references that these values had been increased
+ * at drm_prime_handle_to_fd()
+ */
+ if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
+ exynos_gem_obj->base.export_dma_buf = NULL;
+
+ /*
+ * drop this gem object refcount to release allocated buffer
+ * and resources.
+ */
+ drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
+ }
+}
+
+static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ /* TODO */
+
+ return NULL;
+}
+
+static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num,
+ void *addr)
+{
+ /* TODO */
+}
+
+static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ /* TODO */
+
+ return NULL;
+}
+
+static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+ /* TODO */
+}
+
+static struct dma_buf_ops exynos_dmabuf_ops = {
+ .map_dma_buf = exynos_gem_map_dma_buf,
+ .unmap_dma_buf = exynos_gem_unmap_dma_buf,
+ .kmap = exynos_gem_dmabuf_kmap,
+ .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
+ .kunmap = exynos_gem_dmabuf_kunmap,
+ .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
+ .release = exynos_dmabuf_release,
+};
+
+struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
+ struct drm_gem_object *obj, int flags)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
+ exynos_gem_obj->base.size, 0600);
+}
+
+struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct scatterlist *sgl;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buffer;
+ struct page *page;
+ int ret, i = 0;
+
+ DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+ /* is this one of own objects? */
+ if (dma_buf->ops == &exynos_dmabuf_ops) {
+ struct drm_gem_object *obj;
+
+ exynos_gem_obj = dma_buf->priv;
+ obj = &exynos_gem_obj->base;
+
+ /* is it from our device? */
+ if (obj->dev == drm_dev) {
+ drm_gem_object_reference(obj);
+ return obj;
+ }
+ }
+
+ attach = dma_buf_attach(dma_buf, drm_dev->dev);
+ if (IS_ERR(attach))
+ return ERR_PTR(-EINVAL);
+
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_buf_detach;
+ }
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
+ ret = -ENOMEM;
+ goto err_unmap_attach;
+ }
+
+ buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
+ if (!buffer->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ ret = -ENOMEM;
+ goto err_free_buffer;
+ }
+
+ exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
+ if (!exynos_gem_obj) {
+ ret = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ sgl = sgt->sgl;
+ buffer->dma_addr = sg_dma_address(sgl);
+
+ while (i < sgt->nents) {
+ buffer->pages[i] = sg_page(sgl);
+ buffer->size += sg_dma_len(sgl);
+ sgl = sg_next(sgl);
+ i++;
+ }
+
+ exynos_gem_obj->buffer = buffer;
+ buffer->sgt = sgt;
+ exynos_gem_obj->base.import_attach = attach;
+
+ DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
+ buffer->size);
+
+ return &exynos_gem_obj->base;
+
+err_free_pages:
+ kfree(buffer->pages);
+ buffer->pages = NULL;
+err_free_buffer:
+ kfree(buffer);
+ buffer = NULL;
+err_unmap_attach:
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+err_buf_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
new file mode 100644
index 000000000000..662a8f98ccdb
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
@@ -0,0 +1,39 @@
+/* exynos_drm_dmabuf.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_DMABUF_H_
+#define _EXYNOS_DRM_DMABUF_H_
+
+#ifdef CONFIG_DRM_EXYNOS_DMABUF
+struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
+ struct drm_gem_object *obj, int flags);
+
+struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
+ struct dma_buf *dma_buf);
+#else
+#define exynos_dmabuf_prime_export NULL
+#define exynos_dmabuf_prime_import NULL
+#endif
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index a6819b5f8428..420953197d0a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -39,6 +39,8 @@
#include "exynos_drm_gem.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_vidi.h"
+#include "exynos_drm_dmabuf.h"
+#include "exynos_drm_g2d.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -147,8 +149,17 @@ static int exynos_drm_unload(struct drm_device *dev)
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
+ struct drm_exynos_file_private *file_priv;
+
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ drm_prime_init_file_private(&file->prime);
+ file->driver_priv = file_priv;
+
return exynos_drm_subdrv_open(dev, file);
}
@@ -170,6 +181,7 @@ static void exynos_drm_preclose(struct drm_device *dev,
e->base.destroy(&e->base);
}
}
+ drm_prime_destroy_file_private(&file->prime);
spin_unlock_irqrestore(&dev->event_lock, flags);
exynos_drm_subdrv_close(dev, file);
@@ -193,7 +205,7 @@ static void exynos_drm_lastclose(struct drm_device *dev)
exynos_drm_fbdev_restore_mode(dev);
}
-static struct vm_operations_struct exynos_drm_gem_vm_ops = {
+static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
.fault = exynos_drm_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -207,10 +219,18 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
+ exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
+ exynos_g2d_get_ver_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST,
+ exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
+ exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -225,7 +245,7 @@ static const struct file_operations exynos_drm_driver_fops = {
static struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM |
- DRIVER_MODESET | DRIVER_GEM,
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = exynos_drm_load,
.unload = exynos_drm_unload,
.open = exynos_drm_open,
@@ -241,6 +261,10 @@ static struct drm_driver exynos_drm_driver = {
.dumb_create = exynos_drm_gem_dumb_create,
.dumb_map_offset = exynos_drm_gem_dumb_map_offset,
.dumb_destroy = exynos_drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = exynos_dmabuf_prime_export,
+ .gem_prime_import = exynos_dmabuf_prime_import,
.ioctls = exynos_ioctls,
.fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
@@ -307,6 +331,12 @@ static int __init exynos_drm_init(void)
goto out_vidi;
#endif
+#ifdef CONFIG_DRM_EXYNOS_G2D
+ ret = platform_driver_register(&g2d_driver);
+ if (ret < 0)
+ goto out_g2d;
+#endif
+
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret < 0)
goto out;
@@ -314,6 +344,11 @@ static int __init exynos_drm_init(void)
return 0;
out:
+#ifdef CONFIG_DRM_EXYNOS_G2D
+ platform_driver_unregister(&g2d_driver);
+out_g2d:
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_VIDI
out_vidi:
platform_driver_unregister(&vidi_driver);
@@ -341,6 +376,10 @@ static void __exit exynos_drm_exit(void)
platform_driver_unregister(&exynos_drm_platform_driver);
+#ifdef CONFIG_DRM_EXYNOS_G2D
+ platform_driver_unregister(&g2d_driver);
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_HDMI
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
platform_driver_unregister(&mixer_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 1d814175cd49..c82c90c443e7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -77,6 +77,8 @@ struct exynos_drm_overlay_ops {
* - the unit is screen coordinates.
* @fb_width: width of a framebuffer.
* @fb_height: height of a framebuffer.
+ * @src_width: width of a partial image to be displayed from framebuffer.
+ * @src_height: height of a partial image to be displayed from framebuffer.
* @crtc_x: offset x on hardware screen.
* @crtc_y: offset y on hardware screen.
* @crtc_width: window width to be displayed (hardware screen).
@@ -108,6 +110,8 @@ struct exynos_drm_overlay {
unsigned int fb_y;
unsigned int fb_width;
unsigned int fb_height;
+ unsigned int src_width;
+ unsigned int src_height;
unsigned int crtc_x;
unsigned int crtc_y;
unsigned int crtc_width;
@@ -205,6 +209,18 @@ struct exynos_drm_manager {
struct exynos_drm_display_ops *display_ops;
};
+struct exynos_drm_g2d_private {
+ struct device *dev;
+ struct list_head inuse_cmdlist;
+ struct list_head event_list;
+ struct list_head gem_list;
+ unsigned int gem_nr;
+};
+
+struct drm_exynos_file_private {
+ struct exynos_drm_g2d_private *g2d_priv;
+};
+
/*
* Exynos drm private structure.
*/
@@ -287,4 +303,5 @@ extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
+extern struct platform_driver g2d_driver;
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index c38c8f468fa3..f82a299553fb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -191,7 +191,7 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(fb_helper);
}
-static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
+static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
new file mode 100644
index 000000000000..d2d88f22a037
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -0,0 +1,937 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "drmP.h"
+#include "exynos_drm.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+
+#define G2D_HW_MAJOR_VER 4
+#define G2D_HW_MINOR_VER 1
+
+/* vaild register range set from user: 0x0104 ~ 0x0880 */
+#define G2D_VALID_START 0x0104
+#define G2D_VALID_END 0x0880
+
+/* general registers */
+#define G2D_SOFT_RESET 0x0000
+#define G2D_INTEN 0x0004
+#define G2D_INTC_PEND 0x000C
+#define G2D_DMA_SFR_BASE_ADDR 0x0080
+#define G2D_DMA_COMMAND 0x0084
+#define G2D_DMA_STATUS 0x008C
+#define G2D_DMA_HOLD_CMD 0x0090
+
+/* command registers */
+#define G2D_BITBLT_START 0x0100
+
+/* registers for base address */
+#define G2D_SRC_BASE_ADDR 0x0304
+#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
+#define G2D_DST_BASE_ADDR 0x0404
+#define G2D_DST_PLANE2_BASE_ADDR 0x0418
+#define G2D_PAT_BASE_ADDR 0x0500
+#define G2D_MSK_BASE_ADDR 0x0520
+
+/* G2D_SOFT_RESET */
+#define G2D_SFRCLEAR (1 << 1)
+#define G2D_R (1 << 0)
+
+/* G2D_INTEN */
+#define G2D_INTEN_ACF (1 << 3)
+#define G2D_INTEN_UCF (1 << 2)
+#define G2D_INTEN_GCF (1 << 1)
+#define G2D_INTEN_SCF (1 << 0)
+
+/* G2D_INTC_PEND */
+#define G2D_INTP_ACMD_FIN (1 << 3)
+#define G2D_INTP_UCMD_FIN (1 << 2)
+#define G2D_INTP_GCMD_FIN (1 << 1)
+#define G2D_INTP_SCMD_FIN (1 << 0)
+
+/* G2D_DMA_COMMAND */
+#define G2D_DMA_HALT (1 << 2)
+#define G2D_DMA_CONTINUE (1 << 1)
+#define G2D_DMA_START (1 << 0)
+
+/* G2D_DMA_STATUS */
+#define G2D_DMA_LIST_DONE_COUNT (0xFF << 17)
+#define G2D_DMA_BITBLT_DONE_COUNT (0xFFFF << 1)
+#define G2D_DMA_DONE (1 << 0)
+#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
+
+/* G2D_DMA_HOLD_CMD */
+#define G2D_USET_HOLD (1 << 2)
+#define G2D_LIST_HOLD (1 << 1)
+#define G2D_BITBLT_HOLD (1 << 0)
+
+/* G2D_BITBLT_START */
+#define G2D_START_CASESEL (1 << 2)
+#define G2D_START_NHOLT (1 << 1)
+#define G2D_START_BITBLT (1 << 0)
+
+#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
+#define G2D_CMDLIST_NUM 64
+#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
+#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
+
+/* cmdlist data structure */
+struct g2d_cmdlist {
+ u32 head;
+ u32 data[G2D_CMDLIST_DATA_NUM];
+ u32 last; /* last data offset */
+};
+
+struct drm_exynos_pending_g2d_event {
+ struct drm_pending_event base;
+ struct drm_exynos_g2d_event event;
+};
+
+struct g2d_gem_node {
+ struct list_head list;
+ unsigned int handle;
+};
+
+struct g2d_cmdlist_node {
+ struct list_head list;
+ struct g2d_cmdlist *cmdlist;
+ unsigned int gem_nr;
+ dma_addr_t dma_addr;
+
+ struct drm_exynos_pending_g2d_event *event;
+};
+
+struct g2d_runqueue_node {
+ struct list_head list;
+ struct list_head run_cmdlist;
+ struct list_head event_list;
+ struct completion complete;
+ int async;
+};
+
+struct g2d_data {
+ struct device *dev;
+ struct clk *gate_clk;
+ struct resource *regs_res;
+ void __iomem *regs;
+ int irq;
+ struct workqueue_struct *g2d_workq;
+ struct work_struct runqueue_work;
+ struct exynos_drm_subdrv subdrv;
+ bool suspended;
+
+ /* cmdlist */
+ struct g2d_cmdlist_node *cmdlist_node;
+ struct list_head free_cmdlist;
+ struct mutex cmdlist_mutex;
+ dma_addr_t cmdlist_pool;
+ void *cmdlist_pool_virt;
+
+ /* runqueue*/
+ struct g2d_runqueue_node *runqueue_node;
+ struct list_head runqueue;
+ struct mutex runqueue_mutex;
+ struct kmem_cache *runqueue_slab;
+};
+
+static int g2d_init_cmdlist(struct g2d_data *g2d)
+{
+ struct device *dev = g2d->dev;
+ struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+ int nr;
+ int ret;
+
+ g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
+ &g2d->cmdlist_pool, GFP_KERNEL);
+ if (!g2d->cmdlist_pool_virt) {
+ dev_err(dev, "failed to allocate dma memory\n");
+ return -ENOMEM;
+ }
+
+ node = kcalloc(G2D_CMDLIST_NUM, G2D_CMDLIST_NUM * sizeof(*node),
+ GFP_KERNEL);
+ if (!node) {
+ dev_err(dev, "failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
+ node[nr].cmdlist =
+ g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
+ node[nr].dma_addr =
+ g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
+
+ list_add_tail(&node[nr].list, &g2d->free_cmdlist);
+ }
+
+ return 0;
+
+err:
+ dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool);
+ return ret;
+}
+
+static void g2d_fini_cmdlist(struct g2d_data *g2d)
+{
+ struct device *dev = g2d->dev;
+
+ kfree(g2d->cmdlist_node);
+ dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool);
+}
+
+static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
+{
+ struct device *dev = g2d->dev;
+ struct g2d_cmdlist_node *node;
+
+ mutex_lock(&g2d->cmdlist_mutex);
+ if (list_empty(&g2d->free_cmdlist)) {
+ dev_err(dev, "there is no free cmdlist\n");
+ mutex_unlock(&g2d->cmdlist_mutex);
+ return NULL;
+ }
+
+ node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
+ list);
+ list_del_init(&node->list);
+ mutex_unlock(&g2d->cmdlist_mutex);
+
+ return node;
+}
+
+static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
+{
+ mutex_lock(&g2d->cmdlist_mutex);
+ list_move_tail(&node->list, &g2d->free_cmdlist);
+ mutex_unlock(&g2d->cmdlist_mutex);
+}
+
+static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
+ struct g2d_cmdlist_node *node)
+{
+ struct g2d_cmdlist_node *lnode;
+
+ if (list_empty(&g2d_priv->inuse_cmdlist))
+ goto add_to_list;
+
+ /* this links to base address of new cmdlist */
+ lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
+ struct g2d_cmdlist_node, list);
+ lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
+
+add_to_list:
+ list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
+
+ if (node->event)
+ list_add_tail(&node->event->base.link, &g2d_priv->event_list);
+}
+
+static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct g2d_cmdlist_node *node)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist *cmdlist = node->cmdlist;
+ dma_addr_t *addr;
+ int offset;
+ int i;
+
+ for (i = 0; i < node->gem_nr; i++) {
+ struct g2d_gem_node *gem_node;
+
+ gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
+ if (!gem_node) {
+ dev_err(g2d_priv->dev, "failed to allocate gem node\n");
+ return -ENOMEM;
+ }
+
+ offset = cmdlist->last - (i * 2 + 1);
+ gem_node->handle = cmdlist->data[offset];
+
+ addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
+ file);
+ if (IS_ERR(addr)) {
+ node->gem_nr = i;
+ kfree(gem_node);
+ return PTR_ERR(addr);
+ }
+
+ cmdlist->data[offset] = *addr;
+ list_add_tail(&gem_node->list, &g2d_priv->gem_list);
+ g2d_priv->gem_nr++;
+ }
+
+ return 0;
+}
+
+static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
+ struct drm_file *file,
+ unsigned int nr)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_gem_node *node, *n;
+
+ list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
+ if (!nr)
+ break;
+
+ exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
+ list_del_init(&node->list);
+ kfree(node);
+ nr--;
+ }
+}
+
+static void g2d_dma_start(struct g2d_data *g2d,
+ struct g2d_runqueue_node *runqueue_node)
+{
+ struct g2d_cmdlist_node *node =
+ list_first_entry(&runqueue_node->run_cmdlist,
+ struct g2d_cmdlist_node, list);
+
+ pm_runtime_get_sync(g2d->dev);
+ clk_enable(g2d->gate_clk);
+
+ /* interrupt enable */
+ writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
+ g2d->regs + G2D_INTEN);
+
+ writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
+ writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
+}
+
+static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
+{
+ struct g2d_runqueue_node *runqueue_node;
+
+ if (list_empty(&g2d->runqueue))
+ return NULL;
+
+ runqueue_node = list_first_entry(&g2d->runqueue,
+ struct g2d_runqueue_node, list);
+ list_del_init(&runqueue_node->list);
+ return runqueue_node;
+}
+
+static void g2d_free_runqueue_node(struct g2d_data *g2d,
+ struct g2d_runqueue_node *runqueue_node)
+{
+ if (!runqueue_node)
+ return;
+
+ mutex_lock(&g2d->cmdlist_mutex);
+ list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
+ mutex_unlock(&g2d->cmdlist_mutex);
+
+ kmem_cache_free(g2d->runqueue_slab, runqueue_node);
+}
+
+static void g2d_exec_runqueue(struct g2d_data *g2d)
+{
+ g2d->runqueue_node = g2d_get_runqueue_node(g2d);
+ if (g2d->runqueue_node)
+ g2d_dma_start(g2d, g2d->runqueue_node);
+}
+
+static void g2d_runqueue_worker(struct work_struct *work)
+{
+ struct g2d_data *g2d = container_of(work, struct g2d_data,
+ runqueue_work);
+
+
+ mutex_lock(&g2d->runqueue_mutex);
+ clk_disable(g2d->gate_clk);
+ pm_runtime_put_sync(g2d->dev);
+
+ complete(&g2d->runqueue_node->complete);
+ if (g2d->runqueue_node->async)
+ g2d_free_runqueue_node(g2d, g2d->runqueue_node);
+
+ if (g2d->suspended)
+ g2d->runqueue_node = NULL;
+ else
+ g2d_exec_runqueue(g2d);
+ mutex_unlock(&g2d->runqueue_mutex);
+}
+
+static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
+{
+ struct drm_device *drm_dev = g2d->subdrv.drm_dev;
+ struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
+ struct drm_exynos_pending_g2d_event *e;
+ struct timeval now;
+ unsigned long flags;
+
+ if (list_empty(&runqueue_node->event_list))
+ return;
+
+ e = list_first_entry(&runqueue_node->event_list,
+ struct drm_exynos_pending_g2d_event, base.link);
+
+ do_gettimeofday(&now);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ e->event.cmdlist_no = cmdlist_no;
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+}
+
+static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
+{
+ struct g2d_data *g2d = dev_id;
+ u32 pending;
+
+ pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
+ if (pending)
+ writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
+
+ if (pending & G2D_INTP_GCMD_FIN) {
+ u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
+
+ cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
+ G2D_DMA_LIST_DONE_COUNT_OFFSET;
+
+ g2d_finish_event(g2d, cmdlist_no);
+
+ writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
+ if (!(pending & G2D_INTP_ACMD_FIN)) {
+ writel_relaxed(G2D_DMA_CONTINUE,
+ g2d->regs + G2D_DMA_COMMAND);
+ }
+ }
+
+ if (pending & G2D_INTP_ACMD_FIN)
+ queue_work(g2d->g2d_workq, &g2d->runqueue_work);
+
+ return IRQ_HANDLED;
+}
+
+static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
+ int nr, bool for_addr)
+{
+ int reg_offset;
+ int index;
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ index = cmdlist->last - 2 * (i + 1);
+ reg_offset = cmdlist->data[index] & ~0xfffff000;
+
+ if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
+ goto err;
+ if (reg_offset % 4)
+ goto err;
+
+ switch (reg_offset) {
+ case G2D_SRC_BASE_ADDR:
+ case G2D_SRC_PLANE2_BASE_ADDR:
+ case G2D_DST_BASE_ADDR:
+ case G2D_DST_PLANE2_BASE_ADDR:
+ case G2D_PAT_BASE_ADDR:
+ case G2D_MSK_BASE_ADDR:
+ if (!for_addr)
+ goto err;
+ break;
+ default:
+ if (for_addr)
+ goto err;
+ break;
+ }
+ }
+
+ return 0;
+
+err:
+ dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
+ return -EINVAL;
+}
+
+/* ioctl functions */
+int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_g2d_get_ver *ver = data;
+
+ ver->major = G2D_HW_MAJOR_VER;
+ ver->minor = G2D_HW_MINOR_VER;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
+
+int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct device *dev = g2d_priv->dev;
+ struct g2d_data *g2d;
+ struct drm_exynos_g2d_set_cmdlist *req = data;
+ struct drm_exynos_g2d_cmd *cmd;
+ struct drm_exynos_pending_g2d_event *e;
+ struct g2d_cmdlist_node *node;
+ struct g2d_cmdlist *cmdlist;
+ unsigned long flags;
+ int size;
+ int ret;
+
+ if (!dev)
+ return -ENODEV;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ node = g2d_get_cmdlist(g2d);
+ if (!node)
+ return -ENOMEM;
+
+ node->event = NULL;
+
+ if (req->event_type != G2D_EVENT_NOT) {
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ if (file->event_space < sizeof(e->event)) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ ret = -ENOMEM;
+ goto err;
+ }
+ file->event_space -= sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ e = kzalloc(sizeof(*node->event), GFP_KERNEL);
+ if (!e) {
+ dev_err(dev, "failed to allocate event\n");
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ e->event.base.type = DRM_EXYNOS_G2D_EVENT;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = req->user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+ node->event = e;
+ }
+
+ cmdlist = node->cmdlist;
+
+ cmdlist->last = 0;
+
+ /*
+ * If don't clear SFR registers, the cmdlist is affected by register
+ * values of previous cmdlist. G2D hw executes SFR clear command and
+ * a next command at the same time then the next command is ignored and
+ * is executed rightly from next next command, so needs a dummy command
+ * to next command of SFR clear command.
+ */
+ cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
+ cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
+ cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
+ cmdlist->data[cmdlist->last++] = 0;
+
+ if (node->event) {
+ cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
+ cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
+ }
+
+ /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
+ size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
+ if (size > G2D_CMDLIST_DATA_NUM) {
+ dev_err(dev, "cmdlist size is too big\n");
+ ret = -EINVAL;
+ goto err_free_event;
+ }
+
+ cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd;
+
+ if (copy_from_user(cmdlist->data + cmdlist->last,
+ (void __user *)cmd,
+ sizeof(*cmd) * req->cmd_nr)) {
+ ret = -EFAULT;
+ goto err_free_event;
+ }
+ cmdlist->last += req->cmd_nr * 2;
+
+ ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
+ if (ret < 0)
+ goto err_free_event;
+
+ node->gem_nr = req->cmd_gem_nr;
+ if (req->cmd_gem_nr) {
+ struct drm_exynos_g2d_cmd *cmd_gem;
+
+ cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
+
+ if (copy_from_user(cmdlist->data + cmdlist->last,
+ (void __user *)cmd_gem,
+ sizeof(*cmd_gem) * req->cmd_gem_nr)) {
+ ret = -EFAULT;
+ goto err_free_event;
+ }
+ cmdlist->last += req->cmd_gem_nr * 2;
+
+ ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
+ if (ret < 0)
+ goto err_free_event;
+
+ ret = g2d_get_cmdlist_gem(drm_dev, file, node);
+ if (ret < 0)
+ goto err_unmap;
+ }
+
+ cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
+ cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
+
+ /* head */
+ cmdlist->head = cmdlist->last / 2;
+
+ /* tail */
+ cmdlist->data[cmdlist->last] = 0;
+
+ g2d_add_cmdlist_to_inuse(g2d_priv, node);
+
+ return 0;
+
+err_unmap:
+ g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
+err_free_event:
+ if (node->event) {
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ kfree(node->event);
+ }
+err:
+ g2d_put_cmdlist(g2d, node);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
+
+int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct device *dev = g2d_priv->dev;
+ struct g2d_data *g2d;
+ struct drm_exynos_g2d_exec *req = data;
+ struct g2d_runqueue_node *runqueue_node;
+ struct list_head *run_cmdlist;
+ struct list_head *event_list;
+
+ if (!dev)
+ return -ENODEV;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
+ if (!runqueue_node) {
+ dev_err(dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+ run_cmdlist = &runqueue_node->run_cmdlist;
+ event_list = &runqueue_node->event_list;
+ INIT_LIST_HEAD(run_cmdlist);
+ INIT_LIST_HEAD(event_list);
+ init_completion(&runqueue_node->complete);
+ runqueue_node->async = req->async;
+
+ list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
+ list_splice_init(&g2d_priv->event_list, event_list);
+
+ if (list_empty(run_cmdlist)) {
+ dev_err(dev, "there is no inuse cmdlist\n");
+ kmem_cache_free(g2d->runqueue_slab, runqueue_node);
+ return -EPERM;
+ }
+
+ mutex_lock(&g2d->runqueue_mutex);
+ list_add_tail(&runqueue_node->list, &g2d->runqueue);
+ if (!g2d->runqueue_node)
+ g2d_exec_runqueue(g2d);
+ mutex_unlock(&g2d->runqueue_mutex);
+
+ if (runqueue_node->async)
+ goto out;
+
+ wait_for_completion(&runqueue_node->complete);
+ g2d_free_runqueue_node(g2d, runqueue_node);
+
+out:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
+
+static int g2d_open(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv;
+
+ g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
+ if (!g2d_priv) {
+ dev_err(dev, "failed to allocate g2d private data\n");
+ return -ENOMEM;
+ }
+
+ g2d_priv->dev = dev;
+ file_priv->g2d_priv = g2d_priv;
+
+ INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
+ INIT_LIST_HEAD(&g2d_priv->event_list);
+ INIT_LIST_HEAD(&g2d_priv->gem_list);
+
+ return 0;
+}
+
+static void g2d_close(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_data *g2d;
+ struct g2d_cmdlist_node *node, *n;
+
+ if (!dev)
+ return;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return;
+
+ mutex_lock(&g2d->cmdlist_mutex);
+ list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
+ list_move_tail(&node->list, &g2d->free_cmdlist);
+ mutex_unlock(&g2d->cmdlist_mutex);
+
+ g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
+
+ kfree(file_priv->g2d_priv);
+}
+
+static int __devinit g2d_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct g2d_data *g2d;
+ struct exynos_drm_subdrv *subdrv;
+ int ret;
+
+ g2d = kzalloc(sizeof(*g2d), GFP_KERNEL);
+ if (!g2d) {
+ dev_err(dev, "failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
+ sizeof(struct g2d_runqueue_node), 0, 0, NULL);
+ if (!g2d->runqueue_slab) {
+ ret = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ g2d->dev = dev;
+
+ g2d->g2d_workq = create_singlethread_workqueue("g2d");
+ if (!g2d->g2d_workq) {
+ dev_err(dev, "failed to create workqueue\n");
+ ret = -EINVAL;
+ goto err_destroy_slab;
+ }
+
+ INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
+ INIT_LIST_HEAD(&g2d->free_cmdlist);
+ INIT_LIST_HEAD(&g2d->runqueue);
+
+ mutex_init(&g2d->cmdlist_mutex);
+ mutex_init(&g2d->runqueue_mutex);
+
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0)
+ goto err_destroy_workqueue;
+
+ g2d->gate_clk = clk_get(dev, "fimg2d");
+ if (IS_ERR(g2d->gate_clk)) {
+ dev_err(dev, "failed to get gate clock\n");
+ ret = PTR_ERR(g2d->gate_clk);
+ goto err_fini_cmdlist;
+ }
+
+ pm_runtime_enable(dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get I/O memory\n");
+ ret = -ENOENT;
+ goto err_put_clk;
+ }
+
+ g2d->regs_res = request_mem_region(res->start, resource_size(res),
+ dev_name(dev));
+ if (!g2d->regs_res) {
+ dev_err(dev, "failed to request I/O memory\n");
+ ret = -ENOENT;
+ goto err_put_clk;
+ }
+
+ g2d->regs = ioremap(res->start, resource_size(res));
+ if (!g2d->regs) {
+ dev_err(dev, "failed to remap I/O memory\n");
+ ret = -ENXIO;
+ goto err_release_res;
+ }
+
+ g2d->irq = platform_get_irq(pdev, 0);
+ if (g2d->irq < 0) {
+ dev_err(dev, "failed to get irq\n");
+ ret = g2d->irq;
+ goto err_unmap_base;
+ }
+
+ ret = request_irq(g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d);
+ if (ret < 0) {
+ dev_err(dev, "irq request failed\n");
+ goto err_unmap_base;
+ }
+
+ platform_set_drvdata(pdev, g2d);
+
+ subdrv = &g2d->subdrv;
+ subdrv->dev = dev;
+ subdrv->open = g2d_open;
+ subdrv->close = g2d_close;
+
+ ret = exynos_drm_subdrv_register(subdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm g2d device\n");
+ goto err_free_irq;
+ }
+
+ dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
+ G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
+
+ return 0;
+
+err_free_irq:
+ free_irq(g2d->irq, g2d);
+err_unmap_base:
+ iounmap(g2d->regs);
+err_release_res:
+ release_resource(g2d->regs_res);
+ kfree(g2d->regs_res);
+err_put_clk:
+ pm_runtime_disable(dev);
+ clk_put(g2d->gate_clk);
+err_fini_cmdlist:
+ g2d_fini_cmdlist(g2d);
+err_destroy_workqueue:
+ destroy_workqueue(g2d->g2d_workq);
+err_destroy_slab:
+ kmem_cache_destroy(g2d->runqueue_slab);
+err_free_mem:
+ kfree(g2d);
+ return ret;
+}
+
+static int __devexit g2d_remove(struct platform_device *pdev)
+{
+ struct g2d_data *g2d = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&g2d->runqueue_work);
+ exynos_drm_subdrv_unregister(&g2d->subdrv);
+ free_irq(g2d->irq, g2d);
+
+ while (g2d->runqueue_node) {
+ g2d_free_runqueue_node(g2d, g2d->runqueue_node);
+ g2d->runqueue_node = g2d_get_runqueue_node(g2d);
+ }
+
+ iounmap(g2d->regs);
+ release_resource(g2d->regs_res);
+ kfree(g2d->regs_res);
+
+ pm_runtime_disable(&pdev->dev);
+ clk_put(g2d->gate_clk);
+
+ g2d_fini_cmdlist(g2d);
+ destroy_workqueue(g2d->g2d_workq);
+ kmem_cache_destroy(g2d->runqueue_slab);
+ kfree(g2d);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int g2d_suspend(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+
+ mutex_lock(&g2d->runqueue_mutex);
+ g2d->suspended = true;
+ mutex_unlock(&g2d->runqueue_mutex);
+
+ while (g2d->runqueue_node)
+ /* FIXME: good range? */
+ usleep_range(500, 1000);
+
+ flush_work_sync(&g2d->runqueue_work);
+
+ return 0;
+}
+
+static int g2d_resume(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+
+ g2d->suspended = false;
+ g2d_exec_runqueue(g2d);
+
+ return 0;
+}
+#endif
+
+SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
+
+struct platform_driver g2d_driver = {
+ .probe = g2d_probe,
+ .remove = __devexit_p(g2d_remove),
+ .driver = {
+ .name = "s5p-g2d",
+ .owner = THIS_MODULE,
+ .pm = &g2d_pm_ops,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
new file mode 100644
index 000000000000..1a9c7ca8c15b
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#ifdef CONFIG_DRM_EXYNOS_G2D
+extern int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+#else
+static inline int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 1dffa8359f88..fc91293c4560 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -66,6 +66,22 @@ static int check_gem_flags(unsigned int flags)
return 0;
}
+static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
+ struct vm_area_struct *vma)
+{
+ DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+
+ /* non-cachable as default. */
+ if (obj->flags & EXYNOS_BO_CACHABLE)
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ else if (obj->flags & EXYNOS_BO_WC)
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ else
+ vma->vm_page_prot =
+ pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+}
+
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
if (!IS_NONCONTIG_BUFFER(flags)) {
@@ -80,7 +96,7 @@ out:
return roundup(size, PAGE_SIZE);
}
-static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
+struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
struct inode *inode;
@@ -180,6 +196,7 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
}
npages = obj->size >> PAGE_SHIFT;
+ buf->page_size = PAGE_SIZE;
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!buf->sgt) {
@@ -262,24 +279,24 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
{
struct drm_gem_object *obj;
+ struct exynos_drm_gem_buf *buf;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (!exynos_gem_obj)
- return;
-
obj = &exynos_gem_obj->base;
+ buf = exynos_gem_obj->buffer;
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
- if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
- exynos_gem_obj->buffer->pages)
+ if (!buf->pages)
+ return;
+
+ if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
exynos_drm_gem_put_pages(obj);
else
- exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags,
- exynos_gem_obj->buffer);
+ exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
- exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer);
+ exynos_drm_fini_buf(obj->dev, buf);
exynos_gem_obj->buffer = NULL;
if (obj->map_list.map)
@@ -292,7 +309,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
exynos_gem_obj = NULL;
}
-static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -493,8 +510,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
vma->vm_flags |= (VM_IO | VM_RESERVED);
- /* in case of direct mapping, always having non-cachable attribute */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ update_vm_cache_attr(exynos_gem_obj, vma);
vm_size = usize = vma->vm_end - vma->vm_start;
@@ -588,6 +604,32 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
+int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_exynos_gem_info *args = data;
+ struct drm_gem_object *obj;
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ args->flags = exynos_gem_obj->flags;
+ args->size = exynos_gem_obj->size;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -597,8 +639,17 @@ int exynos_drm_gem_init_object(struct drm_gem_object *obj)
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buf;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+ buf = exynos_gem_obj->buffer;
+
+ if (obj->import_attach)
+ drm_prime_gem_destroy(obj, buf->sgt);
+
exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
}
@@ -724,6 +775,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -735,8 +788,20 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
+ obj = vma->vm_private_data;
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_gem_flags(exynos_gem_obj->flags);
+ if (ret) {
+ drm_gem_vm_close(vma);
+ drm_gem_free_mmap_offset(obj);
+ return ret;
+ }
+
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
+ update_vm_cache_attr(exynos_gem_obj, vma);
+
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 4ed842039505..14d038b6cb02 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -40,6 +40,7 @@
* device address with IOMMU.
* @sgt: sg table to transfer page data.
* @pages: contain all pages to allocated memory region.
+ * @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region.
*/
struct exynos_drm_gem_buf {
@@ -47,6 +48,7 @@ struct exynos_drm_gem_buf {
dma_addr_t dma_addr;
struct sg_table *sgt;
struct page **pages;
+ unsigned long page_size;
unsigned long size;
};
@@ -74,9 +76,15 @@ struct exynos_drm_gem_obj {
unsigned int flags;
};
+struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+
/* destroy a buffer with gem object */
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
+/* create a private gem object and initialize it. */
+struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+ unsigned long size);
+
/* create a new buffer with gem object */
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
@@ -119,6 +127,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/* get buffer information to memory region allocated by gem. */
+int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
/* initialize gem object. */
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 3424463676e0..5d9d2c2f8f3f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -37,6 +37,8 @@ struct drm_hdmi_context {
struct exynos_drm_subdrv subdrv;
struct exynos_drm_hdmi_context *hdmi_ctx;
struct exynos_drm_hdmi_context *mixer_ctx;
+
+ bool enabled[MIXER_WIN_NR];
};
void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
@@ -189,23 +191,34 @@ static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
DRM_DEBUG_KMS("%s\n", __FILE__);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- if (hdmi_ops && hdmi_ops->disable)
- hdmi_ops->disable(ctx->hdmi_ctx->ctx);
- break;
- default:
- DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
- break;
+ if (mixer_ops && mixer_ops->dpms)
+ mixer_ops->dpms(ctx->mixer_ctx->ctx, mode);
+
+ if (hdmi_ops && hdmi_ops->dpms)
+ hdmi_ops->dpms(ctx->hdmi_ctx->ctx, mode);
+}
+
+static void drm_hdmi_apply(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ if (!ctx->enabled[i])
+ continue;
+ if (mixer_ops && mixer_ops->win_commit)
+ mixer_ops->win_commit(ctx->mixer_ctx->ctx, i);
}
+
+ if (hdmi_ops && hdmi_ops->commit)
+ hdmi_ops->commit(ctx->hdmi_ctx->ctx);
}
static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
.dpms = drm_hdmi_dpms,
+ .apply = drm_hdmi_apply,
.enable_vblank = drm_hdmi_enable_vblank,
.disable_vblank = drm_hdmi_disable_vblank,
.mode_fixup = drm_hdmi_mode_fixup,
@@ -228,21 +241,37 @@ static void drm_mixer_mode_set(struct device *subdrv_dev,
static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+ int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (win < 0 || win > MIXER_WIN_NR) {
+ DRM_ERROR("mixer window[%d] is wrong\n", win);
+ return;
+ }
+
if (mixer_ops && mixer_ops->win_commit)
- mixer_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
+ mixer_ops->win_commit(ctx->mixer_ctx->ctx, win);
+
+ ctx->enabled[win] = true;
}
static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+ int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (win < 0 || win > MIXER_WIN_NR) {
+ DRM_ERROR("mixer window[%d] is wrong\n", win);
+ return;
+ }
+
if (mixer_ops && mixer_ops->win_disable)
- mixer_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
+ mixer_ops->win_disable(ctx->mixer_ctx->ctx, win);
+
+ ctx->enabled[win] = false;
}
static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
@@ -335,25 +364,6 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
return 0;
}
-static int hdmi_runtime_suspend(struct device *dev)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- .runtime_suspend = hdmi_runtime_suspend,
- .runtime_resume = hdmi_runtime_resume,
-};
-
static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
{
struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
@@ -372,6 +382,5 @@ struct platform_driver exynos_drm_common_hdmi_driver = {
.driver = {
.name = "exynos-drm-hdmi",
.owner = THIS_MODULE,
- .pm = &hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index f3ae192c8dcf..bd8126996e52 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -26,6 +26,9 @@
#ifndef _EXYNOS_DRM_HDMI_H_
#define _EXYNOS_DRM_HDMI_H_
+#define MIXER_WIN_NR 3
+#define MIXER_DEFAULT_WIN 0
+
/*
* exynos hdmi common context structure.
*
@@ -54,13 +57,14 @@ struct exynos_hdmi_ops {
void (*get_max_resol)(void *ctx, unsigned int *width,
unsigned int *height);
void (*commit)(void *ctx);
- void (*disable)(void *ctx);
+ void (*dpms)(void *ctx, int mode);
};
struct exynos_mixer_ops {
/* manager */
int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx);
+ void (*dpms)(void *ctx, int mode);
/* overlay */
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index f92fe4c6174a..c4c6525d4653 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -41,8 +41,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
container_of(plane, struct exynos_plane, base);
struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
struct exynos_drm_crtc_pos pos;
- unsigned int x = src_x >> 16;
- unsigned int y = src_y >> 16;
int ret;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -53,10 +51,12 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pos.crtc_w = crtc_w;
pos.crtc_h = crtc_h;
- pos.fb_x = x;
- pos.fb_y = y;
+ /* considering 16.16 fixed point of source values */
+ pos.fb_x = src_x >> 16;
+ pos.fb_y = src_y >> 16;
+ pos.src_w = src_w >> 16;
+ pos.src_h = src_h >> 16;
- /* TODO: scale feature */
ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index b00353876458..a137e9e39a33 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -57,18 +57,16 @@ struct hdmi_resources {
struct hdmi_context {
struct device *dev;
struct drm_device *drm_dev;
- struct fb_videomode *default_timing;
- unsigned int is_v13:1;
- unsigned int default_win;
- unsigned int default_bpp;
- bool hpd_handle;
- bool enabled;
+ bool hpd;
+ bool powered;
+ bool is_v13;
+ bool dvi_mode;
+ struct mutex hdmi_mutex;
struct resource *regs_res;
void __iomem *regs;
- unsigned int irq;
- struct workqueue_struct *wq;
- struct work_struct hotplug_work;
+ unsigned int external_irq;
+ unsigned int internal_irq;
struct i2c_client *ddc_port;
struct i2c_client *hdmiphy_port;
@@ -78,6 +76,9 @@ struct hdmi_context {
struct hdmi_resources res;
void *parent_ctx;
+
+ void (*cfg_hpd)(bool external);
+ int (*get_hpd)(void);
};
/* HDMI Version 1.3 */
@@ -361,6 +362,13 @@ static const u8 hdmiphy_conf27_027[32] = {
0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
};
+static const u8 hdmiphy_conf74_176[32] = {
+ 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x5b, 0xef, 0x08,
+ 0x81, 0xa0, 0xb9, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+};
+
static const u8 hdmiphy_conf74_25[32] = {
0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
@@ -750,6 +758,63 @@ static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
},
};
+static const struct hdmi_preset_conf hdmi_conf_1080p30 = {
+ .core = {
+ .h_blank = {0x18, 0x01},
+ .v2_blank = {0x65, 0x04},
+ .v1_blank = {0x2d, 0x00},
+ .v_line = {0x65, 0x04},
+ .h_line = {0x98, 0x08},
+ .hsync_pol = {0x00},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f0 = {0xff, 0xff},
+ .v_blank_f1 = {0xff, 0xff},
+ .h_sync_start = {0x56, 0x00},
+ .h_sync_end = {0x82, 0x00},
+ .v_sync_line_bef_2 = {0x09, 0x00},
+ .v_sync_line_bef_1 = {0x04, 0x00},
+ .v_sync_line_aft_2 = {0xff, 0xff},
+ .v_sync_line_aft_1 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_2 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_1 = {0xff, 0xff},
+ .v_blank_f2 = {0xff, 0xff},
+ .v_blank_f3 = {0xff, 0xff},
+ .v_blank_f4 = {0xff, 0xff},
+ .v_blank_f5 = {0xff, 0xff},
+ .v_sync_line_aft_3 = {0xff, 0xff},
+ .v_sync_line_aft_4 = {0xff, 0xff},
+ .v_sync_line_aft_5 = {0xff, 0xff},
+ .v_sync_line_aft_6 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_3 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_4 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_5 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_6 = {0xff, 0xff},
+ .vact_space_1 = {0xff, 0xff},
+ .vact_space_2 = {0xff, 0xff},
+ .vact_space_3 = {0xff, 0xff},
+ .vact_space_4 = {0xff, 0xff},
+ .vact_space_5 = {0xff, 0xff},
+ .vact_space_6 = {0xff, 0xff},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x18, 0x01, 0x80, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x00, 0x00, /* vact_st3 */
+ 0x00, 0x00, /* vact_st4 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ 0x00, /* 3d FP */
+ },
+};
+
static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
.core = {
.h_blank = {0xd0, 0x02},
@@ -864,6 +929,7 @@ static const struct hdmi_conf hdmi_confs[] = {
{ 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
{ 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
{ 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+ { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
{ 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
{ 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
};
@@ -1194,12 +1260,8 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
static bool hdmi_is_connected(void *ctx)
{
struct hdmi_context *hdata = ctx;
- u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
-
- if (val)
- return true;
- return false;
+ return hdata->hpd;
}
static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
@@ -1215,10 +1277,12 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
if (raw_edid) {
+ hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
* EDID_LENGTH, len));
- DRM_DEBUG_KMS("width[%d] x height[%d]\n",
- raw_edid->width_cm, raw_edid->height_cm);
+ DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
+ (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
+ raw_edid->width_cm, raw_edid->height_cm);
} else {
return -ENODEV;
}
@@ -1289,28 +1353,6 @@ static int hdmi_check_timing(void *ctx, void *timing)
return hdmi_v14_check_timing(check_timing);
}
-static int hdmi_display_power_on(void *ctx, int mode)
-{
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- DRM_DEBUG_KMS("hdmi [on]\n");
- break;
- case DRM_MODE_DPMS_STANDBY:
- break;
- case DRM_MODE_DPMS_SUSPEND:
- break;
- case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("hdmi [off]\n");
- break;
- default:
- break;
- }
-
- return 0;
-}
-
static void hdmi_set_acr(u32 freq, u8 *acr)
{
u32 n, cts;
@@ -1463,10 +1505,7 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff)
{
- u32 mod;
-
- mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
- if (mod & HDMI_DVI_MODE_EN)
+ if (hdata->dvi_mode)
return;
hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0);
@@ -1478,9 +1517,6 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
{
u32 reg;
- /* disable hpd handle for drm */
- hdata->hpd_handle = false;
-
if (hdata->is_v13)
reg = HDMI_V13_CORE_RSTOUT;
else
@@ -1491,16 +1527,10 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
mdelay(10);
hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
mdelay(10);
-
- /* enable hpd handle for drm */
- hdata->hpd_handle = true;
}
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- /* disable hpd handle for drm */
- hdata->hpd_handle = false;
-
/* enable HPD interrupts */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1514,6 +1544,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
/* disable bluescreen */
hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
+ if (hdata->dvi_mode) {
+ /* choose DVI mode */
+ hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
+ HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
+ hdmi_reg_writeb(hdata, HDMI_CON_2,
+ HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
+ }
+
if (hdata->is_v13) {
/* choose bluescreen (fecal) color */
hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
@@ -1535,9 +1573,6 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
}
-
- /* enable hpd handle for drm */
- hdata->hpd_handle = true;
}
static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
@@ -1890,8 +1925,11 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
hdmiphy_conf_reset(hdata);
hdmiphy_conf_apply(hdata);
+ mutex_lock(&hdata->hdmi_mutex);
hdmi_conf_reset(hdata);
hdmi_conf_init(hdata);
+ mutex_unlock(&hdata->hdmi_mutex);
+
hdmi_audio_init(hdata);
/* setting core registers */
@@ -1971,20 +2009,86 @@ static void hdmi_commit(void *ctx)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
hdmi_conf_apply(hdata);
+}
+
+static void hdmi_poweron(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&hdata->hdmi_mutex);
+ if (hdata->powered) {
+ mutex_unlock(&hdata->hdmi_mutex);
+ return;
+ }
+
+ hdata->powered = true;
+
+ if (hdata->cfg_hpd)
+ hdata->cfg_hpd(true);
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ pm_runtime_get_sync(hdata->dev);
+
+ regulator_bulk_enable(res->regul_count, res->regul_bulk);
+ clk_enable(res->hdmiphy);
+ clk_enable(res->hdmi);
+ clk_enable(res->sclk_hdmi);
+}
+
+static void hdmi_poweroff(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&hdata->hdmi_mutex);
+ if (!hdata->powered)
+ goto out;
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ /*
+ * The TV power domain needs any condition of hdmiphy to turn off and
+ * its reset state seems to meet the condition.
+ */
+ hdmiphy_conf_reset(hdata);
+
+ clk_disable(res->sclk_hdmi);
+ clk_disable(res->hdmi);
+ clk_disable(res->hdmiphy);
+ regulator_bulk_disable(res->regul_count, res->regul_bulk);
+
+ pm_runtime_put_sync(hdata->dev);
- hdata->enabled = true;
+ mutex_lock(&hdata->hdmi_mutex);
+ if (hdata->cfg_hpd)
+ hdata->cfg_hpd(false);
+
+ hdata->powered = false;
+
+out:
+ mutex_unlock(&hdata->hdmi_mutex);
}
-static void hdmi_disable(void *ctx)
+static void hdmi_dpms(void *ctx, int mode)
{
struct hdmi_context *hdata = ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- if (hdata->enabled) {
- hdmi_audio_control(hdata, false);
- hdmiphy_conf_reset(hdata);
- hdmi_conf_reset(hdata);
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ hdmi_poweron(hdata);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ hdmi_poweroff(hdata);
+ break;
+ default:
+ DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+ break;
}
}
@@ -1993,30 +2097,35 @@ static struct exynos_hdmi_ops hdmi_ops = {
.is_connected = hdmi_is_connected,
.get_edid = hdmi_get_edid,
.check_timing = hdmi_check_timing,
- .power_on = hdmi_display_power_on,
/* manager */
.mode_fixup = hdmi_mode_fixup,
.mode_set = hdmi_mode_set,
.get_max_resol = hdmi_get_max_resol,
.commit = hdmi_commit,
- .disable = hdmi_disable,
+ .dpms = hdmi_dpms,
};
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
-static void hdmi_hotplug_func(struct work_struct *work)
+static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
{
- struct hdmi_context *hdata =
- container_of(work, struct hdmi_context, hotplug_work);
- struct exynos_drm_hdmi_context *ctx =
- (struct exynos_drm_hdmi_context *)hdata->parent_ctx;
+ struct exynos_drm_hdmi_context *ctx = arg;
+ struct hdmi_context *hdata = ctx->ctx;
+
+ if (!hdata->get_hpd)
+ goto out;
+
+ mutex_lock(&hdata->hdmi_mutex);
+ hdata->hpd = hdata->get_hpd();
+ mutex_unlock(&hdata->hdmi_mutex);
- drm_helper_hpd_irq_event(ctx->drm_dev);
+ if (ctx->drm_dev)
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+
+out:
+ return IRQ_HANDLED;
}
-static irqreturn_t hdmi_irq_handler(int irq, void *arg)
+static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
{
struct exynos_drm_hdmi_context *ctx = arg;
struct hdmi_context *hdata = ctx->ctx;
@@ -2025,19 +2134,28 @@ static irqreturn_t hdmi_irq_handler(int irq, void *arg)
intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
/* clearing flags for HPD plug/unplug */
if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
- DRM_DEBUG_KMS("unplugged, handling:%d\n", hdata->hpd_handle);
+ DRM_DEBUG_KMS("unplugged\n");
hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
HDMI_INTC_FLAG_HPD_UNPLUG);
}
if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
- DRM_DEBUG_KMS("plugged, handling:%d\n", hdata->hpd_handle);
+ DRM_DEBUG_KMS("plugged\n");
hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
HDMI_INTC_FLAG_HPD_PLUG);
}
- if (ctx->drm_dev && hdata->hpd_handle)
- queue_work(hdata->wq, &hdata->hotplug_work);
+ mutex_lock(&hdata->hdmi_mutex);
+ hdata->hpd = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
+ if (hdata->powered && hdata->hpd) {
+ mutex_unlock(&hdata->hdmi_mutex);
+ goto out;
+ }
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ if (ctx->drm_dev)
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+out:
return IRQ_HANDLED;
}
@@ -2131,68 +2249,6 @@ static int hdmi_resources_cleanup(struct hdmi_context *hdata)
return 0;
}
-static void hdmi_resource_poweron(struct hdmi_context *hdata)
-{
- struct hdmi_resources *res = &hdata->res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- /* turn HDMI power on */
- regulator_bulk_enable(res->regul_count, res->regul_bulk);
- /* power-on hdmi physical interface */
- clk_enable(res->hdmiphy);
- /* turn clocks on */
- clk_enable(res->hdmi);
- clk_enable(res->sclk_hdmi);
-
- hdmiphy_conf_reset(hdata);
- hdmi_conf_reset(hdata);
- hdmi_conf_init(hdata);
- hdmi_audio_init(hdata);
-}
-
-static void hdmi_resource_poweroff(struct hdmi_context *hdata)
-{
- struct hdmi_resources *res = &hdata->res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- /* turn clocks off */
- clk_disable(res->sclk_hdmi);
- clk_disable(res->hdmi);
- /* power-off hdmiphy */
- clk_disable(res->hdmiphy);
- /* turn HDMI power off */
- regulator_bulk_disable(res->regul_count, res->regul_bulk);
-}
-
-static int hdmi_runtime_suspend(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
-
- DRM_DEBUG_KMS("%s\n", __func__);
-
- hdmi_resource_poweroff(ctx->ctx);
-
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
-
- DRM_DEBUG_KMS("%s\n", __func__);
-
- hdmi_resource_poweron(ctx->ctx);
-
- return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- .runtime_suspend = hdmi_runtime_suspend,
- .runtime_resume = hdmi_runtime_resume,
-};
-
static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2237,15 +2293,16 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ mutex_init(&hdata->hdmi_mutex);
+
drm_hdmi_ctx->ctx = (void *)hdata;
hdata->parent_ctx = (void *)drm_hdmi_ctx;
platform_set_drvdata(pdev, drm_hdmi_ctx);
hdata->is_v13 = pdata->is_v13;
- hdata->default_win = pdata->default_win;
- hdata->default_timing = &pdata->timing;
- hdata->default_bpp = pdata->bpp;
+ hdata->cfg_hpd = pdata->cfg_hpd;
+ hdata->get_hpd = pdata->get_hpd;
hdata->dev = dev;
ret = hdmi_resources_init(hdata);
@@ -2294,41 +2351,49 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
hdata->hdmiphy_port = hdmi_hdmiphy;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- DRM_ERROR("get interrupt resource failed.\n");
- ret = -ENXIO;
+ hdata->external_irq = platform_get_irq_byname(pdev, "external_irq");
+ if (hdata->external_irq < 0) {
+ DRM_ERROR("failed to get platform irq\n");
+ ret = hdata->external_irq;
goto err_hdmiphy;
}
- /* create workqueue and hotplug work */
- hdata->wq = alloc_workqueue("exynos-drm-hdmi",
- WQ_UNBOUND | WQ_NON_REENTRANT, 1);
- if (hdata->wq == NULL) {
- DRM_ERROR("Failed to create workqueue.\n");
- ret = -ENOMEM;
+ hdata->internal_irq = platform_get_irq_byname(pdev, "internal_irq");
+ if (hdata->internal_irq < 0) {
+ DRM_ERROR("failed to get platform internal irq\n");
+ ret = hdata->internal_irq;
goto err_hdmiphy;
}
- INIT_WORK(&hdata->hotplug_work, hdmi_hotplug_func);
- /* register hpd interrupt */
- ret = request_irq(res->start, hdmi_irq_handler, 0, "drm_hdmi",
- drm_hdmi_ctx);
+ ret = request_threaded_irq(hdata->external_irq, NULL,
+ hdmi_external_irq_thread, IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "hdmi_external", drm_hdmi_ctx);
if (ret) {
- DRM_ERROR("request interrupt failed.\n");
- goto err_workqueue;
+ DRM_ERROR("failed to register hdmi internal interrupt\n");
+ goto err_hdmiphy;
+ }
+
+ if (hdata->cfg_hpd)
+ hdata->cfg_hpd(false);
+
+ ret = request_threaded_irq(hdata->internal_irq, NULL,
+ hdmi_internal_irq_thread, IRQF_ONESHOT,
+ "hdmi_internal", drm_hdmi_ctx);
+ if (ret) {
+ DRM_ERROR("failed to register hdmi internal interrupt\n");
+ goto err_free_irq;
}
- hdata->irq = res->start;
/* register specific callbacks to common hdmi. */
exynos_hdmi_ops_register(&hdmi_ops);
- hdmi_resource_poweron(hdata);
+ pm_runtime_enable(dev);
return 0;
-err_workqueue:
- destroy_workqueue(hdata->wq);
+err_free_irq:
+ free_irq(hdata->external_irq, drm_hdmi_ctx);
err_hdmiphy:
i2c_del_driver(&hdmiphy_driver);
err_ddc:
@@ -2348,18 +2413,15 @@ err_data:
static int __devexit hdmi_remove(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
struct hdmi_context *hdata = ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- hdmi_resource_poweroff(hdata);
+ pm_runtime_disable(dev);
- disable_irq(hdata->irq);
- free_irq(hdata->irq, hdata);
-
- cancel_work_sync(&hdata->hotplug_work);
- destroy_workqueue(hdata->wq);
+ free_irq(hdata->internal_irq, hdata);
hdmi_resources_cleanup(hdata);
@@ -2378,12 +2440,43 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int hdmi_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+
+ disable_irq(hdata->internal_irq);
+ disable_irq(hdata->external_irq);
+
+ hdata->hpd = false;
+ if (ctx->drm_dev)
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+
+ hdmi_poweroff(hdata);
+
+ return 0;
+}
+
+static int hdmi_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+
+ enable_irq(hdata->external_irq);
+ enable_irq(hdata->internal_irq);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
+
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
.remove = __devexit_p(hdmi_remove),
.driver = {
.name = "exynos4-hdmi",
.owner = THIS_MODULE,
- .pm = &hdmi_pm_ops,
+ .pm = &hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e15438c01129..68ef01028375 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -37,9 +37,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h"
-#define MIXER_WIN_NR 3
-#define MIXER_DEFAULT_WIN 0
-
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
struct hdmi_win_data {
@@ -57,13 +54,14 @@ struct hdmi_win_data {
unsigned int fb_y;
unsigned int fb_width;
unsigned int fb_height;
+ unsigned int src_width;
+ unsigned int src_height;
unsigned int mode_width;
unsigned int mode_height;
unsigned int scan_flags;
};
struct mixer_resources {
- struct device *dev;
int irq;
void __iomem *mixer_regs;
void __iomem *vp_regs;
@@ -76,10 +74,13 @@ struct mixer_resources {
};
struct mixer_context {
- unsigned int irq;
+ struct device *dev;
int pipe;
bool interlace;
+ bool powered;
+ u32 int_en;
+ struct mutex mixer_mutex;
struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR];
};
@@ -352,10 +353,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
struct hdmi_win_data *win_data;
- unsigned int full_width, full_height, width, height;
unsigned int x_ratio, y_ratio;
- unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
- unsigned int mode_width, mode_height;
unsigned int buf_num;
dma_addr_t luma_addr[2], chroma_addr[2];
bool tiled_mode = false;
@@ -382,21 +380,9 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
return;
}
- full_width = win_data->fb_width;
- full_height = win_data->fb_height;
- width = win_data->crtc_width;
- height = win_data->crtc_height;
- mode_width = win_data->mode_width;
- mode_height = win_data->mode_height;
-
/* scaling feature: (src << 16) / dst */
- x_ratio = (width << 16) / width;
- y_ratio = (height << 16) / height;
-
- src_x_offset = win_data->fb_x;
- src_y_offset = win_data->fb_y;
- dst_x_offset = win_data->crtc_x;
- dst_y_offset = win_data->crtc_y;
+ x_ratio = (win_data->src_width << 16) / win_data->crtc_width;
+ y_ratio = (win_data->src_height << 16) / win_data->crtc_height;
if (buf_num == 2) {
luma_addr[0] = win_data->dma_addr;
@@ -404,7 +390,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
} else {
luma_addr[0] = win_data->dma_addr;
chroma_addr[0] = win_data->dma_addr
- + (full_width * full_height);
+ + (win_data->fb_width * win_data->fb_height);
}
if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
@@ -413,8 +399,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
- luma_addr[1] = luma_addr[0] + full_width;
- chroma_addr[1] = chroma_addr[0] + full_width;
+ luma_addr[1] = luma_addr[0] + win_data->fb_width;
+ chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
}
} else {
ctx->interlace = false;
@@ -435,26 +421,26 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
- vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(full_width) |
- VP_IMG_VSIZE(full_height));
+ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
+ VP_IMG_VSIZE(win_data->fb_height));
/* chroma height has to reduced by 2 to avoid chroma distorions */
- vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(full_width) |
- VP_IMG_VSIZE(full_height / 2));
+ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
+ VP_IMG_VSIZE(win_data->fb_height / 2));
- vp_reg_write(res, VP_SRC_WIDTH, width);
- vp_reg_write(res, VP_SRC_HEIGHT, height);
+ vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
+ vp_reg_write(res, VP_SRC_HEIGHT, win_data->src_height);
vp_reg_write(res, VP_SRC_H_POSITION,
- VP_SRC_H_POSITION_VAL(src_x_offset));
- vp_reg_write(res, VP_SRC_V_POSITION, src_y_offset);
+ VP_SRC_H_POSITION_VAL(win_data->fb_x));
+ vp_reg_write(res, VP_SRC_V_POSITION, win_data->fb_y);
- vp_reg_write(res, VP_DST_WIDTH, width);
- vp_reg_write(res, VP_DST_H_POSITION, dst_x_offset);
+ vp_reg_write(res, VP_DST_WIDTH, win_data->crtc_width);
+ vp_reg_write(res, VP_DST_H_POSITION, win_data->crtc_x);
if (ctx->interlace) {
- vp_reg_write(res, VP_DST_HEIGHT, height / 2);
- vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset / 2);
+ vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height / 2);
+ vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y / 2);
} else {
- vp_reg_write(res, VP_DST_HEIGHT, height);
- vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset);
+ vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height);
+ vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y);
}
vp_reg_write(res, VP_H_RATIO, x_ratio);
@@ -468,8 +454,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
- mixer_cfg_scan(ctx, mode_height);
- mixer_cfg_rgb_fmt(ctx, mode_height);
+ mixer_cfg_scan(ctx, win_data->mode_height);
+ mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
mixer_cfg_layer(ctx, win, true);
mixer_run(ctx);
@@ -484,10 +470,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
struct hdmi_win_data *win_data;
- unsigned int full_width, width, height;
unsigned int x_ratio, y_ratio;
unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
- unsigned int mode_width, mode_height;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
@@ -510,26 +494,17 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
fmt = ARGB8888;
}
- dma_addr = win_data->dma_addr;
- full_width = win_data->fb_width;
- width = win_data->crtc_width;
- height = win_data->crtc_height;
- mode_width = win_data->mode_width;
- mode_height = win_data->mode_height;
-
/* 2x scaling feature */
x_ratio = 0;
y_ratio = 0;
- src_x_offset = win_data->fb_x;
- src_y_offset = win_data->fb_y;
dst_x_offset = win_data->crtc_x;
dst_y_offset = win_data->crtc_y;
/* converting dma address base and source offset */
- dma_addr = dma_addr
- + (src_x_offset * win_data->bpp >> 3)
- + (src_y_offset * full_width * win_data->bpp >> 3);
+ dma_addr = win_data->dma_addr
+ + (win_data->fb_x * win_data->bpp >> 3)
+ + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
src_x_offset = 0;
src_y_offset = 0;
@@ -546,10 +521,10 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
/* setup geometry */
- mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), full_width);
+ mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
- val = MXR_GRP_WH_WIDTH(width);
- val |= MXR_GRP_WH_HEIGHT(height);
+ val = MXR_GRP_WH_WIDTH(win_data->crtc_width);
+ val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height);
val |= MXR_GRP_WH_H_SCALE(x_ratio);
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -567,8 +542,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
/* set buffer address to mixer */
mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
- mixer_cfg_scan(ctx, mode_height);
- mixer_cfg_rgb_fmt(ctx, mode_height);
+ mixer_cfg_scan(ctx, win_data->mode_height);
+ mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
mixer_cfg_layer(ctx, win, true);
mixer_run(ctx);
@@ -591,6 +566,116 @@ static void vp_win_reset(struct mixer_context *ctx)
WARN(tries == 0, "failed to reset Video Processor\n");
}
+static void mixer_win_reset(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ u32 val; /* value stored to register */
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
+
+ /* set output in RGB888 mode */
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
+
+ /* 16 beat burst in DMA */
+ mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
+ MXR_STATUS_BURST_MASK);
+
+ /* setting default layer priority: layer1 > layer0 > video
+ * because typical usage scenario would be
+ * layer1 - OSD
+ * layer0 - framebuffer
+ * video - video overlay
+ */
+ val = MXR_LAYER_CFG_GRP1_VAL(3);
+ val |= MXR_LAYER_CFG_GRP0_VAL(2);
+ val |= MXR_LAYER_CFG_VP_VAL(1);
+ mixer_reg_write(res, MXR_LAYER_CFG, val);
+
+ /* setting background color */
+ mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
+
+ /* setting graphical layers */
+
+ val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
+ val |= MXR_GRP_CFG_WIN_BLEND_EN;
+ val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
+
+ /* the same configuration for both layers */
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
+
+ val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+ val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
+
+ /* configuration of Video Processor Registers */
+ vp_win_reset(ctx);
+ vp_default_filter(res);
+
+ /* disable all layers */
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void mixer_poweron(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (ctx->powered) {
+ mutex_unlock(&ctx->mixer_mutex);
+ return;
+ }
+ ctx->powered = true;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ pm_runtime_get_sync(ctx->dev);
+
+ clk_enable(res->mixer);
+ clk_enable(res->vp);
+ clk_enable(res->sclk_mixer);
+
+ mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+ mixer_win_reset(ctx);
+}
+
+static void mixer_poweroff(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (!ctx->powered)
+ goto out;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+
+ clk_disable(res->mixer);
+ clk_disable(res->vp);
+ clk_disable(res->sclk_mixer);
+
+ pm_runtime_put_sync(ctx->dev);
+
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = false;
+
+out:
+ mutex_unlock(&ctx->mixer_mutex);
+}
+
static int mixer_enable_vblank(void *ctx, int pipe)
{
struct mixer_context *mixer_ctx = ctx;
@@ -618,6 +703,27 @@ static void mixer_disable_vblank(void *ctx)
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
+static void mixer_dpms(void *ctx, int mode)
+{
+ struct mixer_context *mixer_ctx = ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ mixer_poweron(mixer_ctx);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ mixer_poweroff(mixer_ctx);
+ break;
+ default:
+ DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+ break;
+ }
+}
+
static void mixer_win_mode_set(void *ctx,
struct exynos_drm_overlay *overlay)
{
@@ -643,7 +749,7 @@ static void mixer_win_mode_set(void *ctx,
win = MIXER_DEFAULT_WIN;
if (win < 0 || win > MIXER_WIN_NR) {
- DRM_ERROR("overlay plane[%d] is wrong\n", win);
+ DRM_ERROR("mixer window[%d] is wrong\n", win);
return;
}
@@ -665,6 +771,8 @@ static void mixer_win_mode_set(void *ctx,
win_data->fb_y = overlay->fb_y;
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
+ win_data->src_width = overlay->src_width;
+ win_data->src_height = overlay->src_height;
win_data->mode_width = overlay->mode_width;
win_data->mode_height = overlay->mode_height;
@@ -672,44 +780,26 @@ static void mixer_win_mode_set(void *ctx,
win_data->scan_flags = overlay->scan_flag;
}
-static void mixer_win_commit(void *ctx, int zpos)
+static void mixer_win_commit(void *ctx, int win)
{
struct mixer_context *mixer_ctx = ctx;
- int win = zpos;
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
- if (win == DEFAULT_ZPOS)
- win = MIXER_DEFAULT_WIN;
-
- if (win < 0 || win > MIXER_WIN_NR) {
- DRM_ERROR("overlay plane[%d] is wrong\n", win);
- return;
- }
-
if (win > 1)
vp_video_buffer(mixer_ctx, win);
else
mixer_graph_buffer(mixer_ctx, win);
}
-static void mixer_win_disable(void *ctx, int zpos)
+static void mixer_win_disable(void *ctx, int win)
{
struct mixer_context *mixer_ctx = ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
unsigned long flags;
- int win = zpos;
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
- if (win == DEFAULT_ZPOS)
- win = MIXER_DEFAULT_WIN;
-
- if (win < 0 || win > MIXER_WIN_NR) {
- DRM_ERROR("overlay plane[%d] is wrong\n", win);
- return;
- }
-
spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(mixer_ctx, false);
@@ -723,6 +813,7 @@ static struct exynos_mixer_ops mixer_ops = {
/* manager */
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
+ .dpms = mixer_dpms,
/* overlay */
.win_mode_set = mixer_win_mode_set,
@@ -773,7 +864,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
struct mixer_context *ctx = drm_hdmi_ctx->ctx;
struct mixer_resources *res = &ctx->mixer_res;
- u32 val, val_base;
+ u32 val, base, shadow;
spin_lock(&res->reg_slock);
@@ -784,12 +875,14 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
if (val & MXR_INT_STATUS_VSYNC) {
/* interlace scan need to check shadow register */
if (ctx->interlace) {
- val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
- if (ctx->win_data[0].dma_addr != val_base)
+ base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
+ shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
+ if (base != shadow)
goto out;
- val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
- if (ctx->win_data[1].dma_addr != val_base)
+ base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
+ shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
+ if (base != shadow)
goto out;
}
@@ -811,117 +904,6 @@ out:
return IRQ_HANDLED;
}
-static void mixer_win_reset(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
- unsigned long flags;
- u32 val; /* value stored to register */
-
- spin_lock_irqsave(&res->reg_slock, flags);
- mixer_vsync_set_update(ctx, false);
-
- mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
-
- /* set output in RGB888 mode */
- mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
-
- /* 16 beat burst in DMA */
- mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
- MXR_STATUS_BURST_MASK);
-
- /* setting default layer priority: layer1 > layer0 > video
- * because typical usage scenario would be
- * layer1 - OSD
- * layer0 - framebuffer
- * video - video overlay
- */
- val = MXR_LAYER_CFG_GRP1_VAL(3);
- val |= MXR_LAYER_CFG_GRP0_VAL(2);
- val |= MXR_LAYER_CFG_VP_VAL(1);
- mixer_reg_write(res, MXR_LAYER_CFG, val);
-
- /* setting background color */
- mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
- mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
- mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
-
- /* setting graphical layers */
-
- val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
- val |= MXR_GRP_CFG_WIN_BLEND_EN;
- val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
-
- /* the same configuration for both layers */
- mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
-
- val |= MXR_GRP_CFG_BLEND_PRE_MUL;
- val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
- mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
-
- /* configuration of Video Processor Registers */
- vp_win_reset(ctx);
- vp_default_filter(res);
-
- /* disable all layers */
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
-
- mixer_vsync_set_update(ctx, true);
- spin_unlock_irqrestore(&res->reg_slock, flags);
-}
-
-static void mixer_resource_poweron(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- clk_enable(res->mixer);
- clk_enable(res->vp);
- clk_enable(res->sclk_mixer);
-
- mixer_win_reset(ctx);
-}
-
-static void mixer_resource_poweroff(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- clk_disable(res->mixer);
- clk_disable(res->vp);
- clk_disable(res->sclk_mixer);
-}
-
-static int mixer_runtime_resume(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
-
- DRM_DEBUG_KMS("resume - start\n");
-
- mixer_resource_poweron(ctx->ctx);
-
- return 0;
-}
-
-static int mixer_runtime_suspend(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
-
- DRM_DEBUG_KMS("suspend - start\n");
-
- mixer_resource_poweroff(ctx->ctx);
-
- return 0;
-}
-
-static const struct dev_pm_ops mixer_pm_ops = {
- .runtime_suspend = mixer_runtime_suspend,
- .runtime_resume = mixer_runtime_resume,
-};
-
static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
struct platform_device *pdev)
{
@@ -931,7 +913,6 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
struct resource *res;
int ret;
- mixer_res->dev = dev;
spin_lock_init(&mixer_res->reg_slock);
mixer_res->mixer = clk_get(dev, "mixer");
@@ -1027,7 +1008,6 @@ fail:
clk_put(mixer_res->vp);
if (!IS_ERR_OR_NULL(mixer_res->mixer))
clk_put(mixer_res->mixer);
- mixer_res->dev = NULL;
return ret;
}
@@ -1035,7 +1015,6 @@ static void mixer_resources_cleanup(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
- disable_irq(res->irq);
free_irq(res->irq, ctx);
iounmap(res->vp_regs);
@@ -1064,6 +1043,9 @@ static int __devinit mixer_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ mutex_init(&ctx->mixer_mutex);
+
+ ctx->dev = &pdev->dev;
drm_hdmi_ctx->ctx = (void *)ctx;
platform_set_drvdata(pdev, drm_hdmi_ctx);
@@ -1076,7 +1058,7 @@ static int __devinit mixer_probe(struct platform_device *pdev)
/* register specific callback point to common hdmi. */
exynos_mixer_ops_register(&mixer_ops);
- mixer_resource_poweron(ctx);
+ pm_runtime_enable(dev);
return 0;
@@ -1095,12 +1077,27 @@ static int mixer_remove(struct platform_device *pdev)
dev_info(dev, "remove successful\n");
- mixer_resource_poweroff(ctx);
+ pm_runtime_disable(&pdev->dev);
+
mixer_resources_cleanup(ctx);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int mixer_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ mixer_poweroff(ctx);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
+
struct platform_driver mixer_driver = {
.driver = {
.name = "s5p-mixer",
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 3c04bea842ce..9cc7c5e9718c 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -138,14 +138,16 @@
#define HDMI_ASP_MASK (1 << 2)
#define HDMI_EN (1 << 0)
+/* HDMI_CON_2 */
+#define HDMI_VID_PREAMBLE_DIS (1 << 5)
+#define HDMI_GUARD_BAND_DIS (1 << 1)
+
/* HDMI_PHY_STATUS */
#define HDMI_PHY_STATUS_READY (1 << 0)
/* HDMI_MODE_SEL */
#define HDMI_MODE_HDMI_EN (1 << 1)
#define HDMI_MODE_DVI_EN (1 << 0)
-#define HDMI_DVI_MODE_EN (1)
-#define HDMI_DVI_MODE_DIS (0)
#define HDMI_MODE_MASK (3 << 0)
/* HDMI_TG_CMD */
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 1583982917ce..abfa2a93f0d0 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -1,7 +1,7 @@
#
# KMS driver for the GMA500
#
-ccflags-y += -Iinclude/drm
+ccflags-y += -I$(srctree)/include/drm
gma500_gfx-y += gem_glue.o \
accel_2d.o \
@@ -12,7 +12,6 @@ gma500_gfx-y += gem_glue.o \
intel_bios.o \
intel_i2c.o \
intel_gmbus.o \
- intel_opregion.o \
mmu.o \
power.o \
psb_drv.o \
@@ -25,6 +24,8 @@ gma500_gfx-y += gem_glue.o \
psb_device.o \
mid_bios.o
+gma500_gfx-$(CONFIG_ACPI) += opregion.o \
+
gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
cdv_intel_crt.o \
cdv_intel_display.o \
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index a54cc738926a..9764045428ce 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -49,13 +49,15 @@ static void cdv_disable_vga(struct drm_device *dev)
static int cdv_output_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+
+ drm_mode_create_scaling_mode_property(dev);
+
cdv_disable_vga(dev);
cdv_intel_crt_init(dev, &dev_priv->mode_dev);
cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
- /* These bits indicate HDMI not SDVO on CDV, but we don't yet support
- the HDMI interface */
+ /* These bits indicate HDMI not SDVO on CDV */
if (REG_READ(SDVOB) & SDVO_DETECTED)
cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
if (REG_READ(SDVOC) & SDVO_DETECTED)
@@ -66,76 +68,71 @@ static int cdv_output_init(struct drm_device *dev)
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
/*
- * Poulsbo Backlight Interfaces
+ * Cedartrail Backlght Interfaces
*/
-#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
-#define BLC_PWM_FREQ_CALC_CONSTANT 32
-#define MHz 1000000
-
-#define PSB_BLC_PWM_PRECISION_FACTOR 10
-#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
-#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
-
-#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
-#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
-
-static int cdv_brightness;
static struct backlight_device *cdv_backlight_device;
-static int cdv_get_brightness(struct backlight_device *bd)
+static int cdv_backlight_combination_mode(struct drm_device *dev)
{
- /* return locally cached var instead of HW read (due to DPST etc.) */
- /* FIXME: ideally return actual value in case firmware fiddled with
- it */
- return cdv_brightness;
+ return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
}
-
-static int cdv_backlight_setup(struct drm_device *dev)
+static int cdv_get_brightness(struct backlight_device *bd)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
- unsigned long core_clock;
- /* u32 bl_max_freq; */
- /* unsigned long value; */
- u16 bl_max_freq;
- uint32_t value;
- uint32_t blc_pwm_precision_factor;
-
- /* get bl_max_freq and pol from dev_priv*/
- if (!dev_priv->lvds_bl) {
- dev_err(dev->dev, "Has no valid LVDS backlight info\n");
- return -ENOENT;
- }
- bl_max_freq = dev_priv->lvds_bl->freq;
- blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+ struct drm_device *dev = bl_get_data(bd);
+ u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- core_clock = dev_priv->core_freq;
+ if (cdv_backlight_combination_mode(dev)) {
+ u8 lbpc;
- value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
- value *= blc_pwm_precision_factor;
- value /= bl_max_freq;
- value /= blc_pwm_precision_factor;
+ val &= ~1;
+ pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+ val *= lbpc;
+ }
+ return val;
+}
- if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
- value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
- return -ERANGE;
- else {
- /* FIXME */
+static u32 cdv_get_max_backlight(struct drm_device *dev)
+{
+ u32 max = REG_READ(BLC_PWM_CTL);
+
+ if (max == 0) {
+ DRM_DEBUG_KMS("LVDS Panel PWM value is 0!\n");
+ /* i915 does this, I believe which means that we should not
+ * smash PWM control as firmware will take control of it. */
+ return 1;
}
- return 0;
+
+ max >>= 16;
+ if (cdv_backlight_combination_mode(dev))
+ max *= 0xff;
+ return max;
}
static int cdv_set_brightness(struct backlight_device *bd)
{
+ struct drm_device *dev = bl_get_data(bd);
int level = bd->props.brightness;
+ u32 blc_pwm_ctl;
/* Percentage 1-100% being valid */
if (level < 1)
level = 1;
- /*cdv_intel_lvds_set_brightness(dev, level); FIXME */
- cdv_brightness = level;
+ if (cdv_backlight_combination_mode(dev)) {
+ u32 max = cdv_get_max_backlight(dev);
+ u8 lbpc;
+
+ lbpc = level * 0xfe / max + 1;
+ level /= lbpc;
+
+ pci_write_config_byte(dev->pdev, 0xF4, lbpc);
+ }
+
+ blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
return 0;
}
@@ -147,7 +144,6 @@ static const struct backlight_ops cdv_ops = {
static int cdv_backlight_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- int ret;
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -159,14 +155,9 @@ static int cdv_backlight_init(struct drm_device *dev)
if (IS_ERR(cdv_backlight_device))
return PTR_ERR(cdv_backlight_device);
- ret = cdv_backlight_setup(dev);
- if (ret < 0) {
- backlight_device_unregister(cdv_backlight_device);
- cdv_backlight_device = NULL;
- return ret;
- }
- cdv_backlight_device->props.brightness = 100;
- cdv_backlight_device->props.max_brightness = 100;
+ cdv_backlight_device->props.brightness =
+ cdv_get_brightness(cdv_backlight_device);
+ cdv_backlight_device->props.max_brightness = cdv_get_max_backlight(dev);
backlight_update_status(cdv_backlight_device);
dev_priv->backlight_device = cdv_backlight_device;
return 0;
@@ -238,6 +229,19 @@ static void cdv_init_pm(struct drm_device *dev)
dev_err(dev->dev, "GPU: power management timed out.\n");
}
+static void cdv_errata(struct drm_device *dev)
+{
+ /* Disable bonus launch.
+ * CPU and GPU competes for memory and display misses updates and
+ * flickers. Worst with dual core, dual displays.
+ *
+ * Fixes were done to Win 7 gfx driver to disable a feature called
+ * Bonus Launch to work around the issue, by degrading
+ * performance.
+ */
+ CDV_MSG_WRITE32(3, 0x30, 0x08027108);
+}
+
/**
* cdv_save_display_registers - save registers lost on suspend
* @dev: our DRM device
@@ -251,7 +255,7 @@ static int cdv_save_display_registers(struct drm_device *dev)
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
- dev_info(dev->dev, "Saving GPU registers.\n");
+ dev_dbg(dev->dev, "Saving GPU registers.\n");
pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
@@ -355,7 +359,7 @@ static int cdv_restore_display_registers(struct drm_device *dev)
REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR);
/* Fix arbitration bug */
- CDV_MSG_WRITE32(3, 0x30, 0x08027108);
+ cdv_errata(dev);
drm_mode_config_reset(dev);
@@ -447,13 +451,106 @@ static void cdv_get_core_freq(struct drm_device *dev)
}
}
+static void cdv_hotplug_work_func(struct work_struct *work)
+{
+ struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
+ hotplug_work);
+ struct drm_device *dev = dev_priv->dev;
+
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(dev);
+}
+
+/* The core driver has received a hotplug IRQ. We are in IRQ context
+ so extract the needed information and kick off queued processing */
+
+static int cdv_hotplug_event(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ schedule_work(&dev_priv->hotplug_work);
+ REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ return 1;
+}
+
+static void cdv_hotplug_enable(struct drm_device *dev, bool on)
+{
+ if (on) {
+ u32 hotplug = REG_READ(PORT_HOTPLUG_EN);
+ hotplug |= HDMIB_HOTPLUG_INT_EN | HDMIC_HOTPLUG_INT_EN |
+ HDMID_HOTPLUG_INT_EN | CRT_HOTPLUG_INT_EN;
+ REG_WRITE(PORT_HOTPLUG_EN, hotplug);
+ } else {
+ REG_WRITE(PORT_HOTPLUG_EN, 0);
+ REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ }
+}
+
+/* Cedarview */
+static const struct psb_offset cdv_regmap[2] = {
+ {
+ .fp0 = FPA0,
+ .fp1 = FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = DPLL_A,
+ .dpll_md = DPLL_A_MD,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .base = DSPABASE,
+ .surf = DSPASURF,
+ .addr = DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .dpll_md = DPLL_B_MD,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .base = DSPBBASE,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ }
+};
+
static int cdv_chip_setup(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
+
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->regmap = cdv_regmap;
cdv_get_core_freq(dev);
- gma_intel_opregion_init(dev);
+ psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
- REG_WRITE(PORT_HOTPLUG_EN, 0);
- REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ cdv_hotplug_enable(dev, false);
return 0;
}
@@ -464,13 +561,19 @@ const struct psb_ops cdv_chip_ops = {
.accel_2d = 0,
.pipes = 2,
.crtcs = 2,
+ .hdmi_mask = (1 << 0) | (1 << 1),
+ .lvds_mask = (1 << 1),
+ .cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
.chip_setup = cdv_chip_setup,
+ .errata = cdv_errata,
.crtc_helper = &cdv_intel_helper_funcs,
.crtc_funcs = &cdv_intel_crtc_funcs,
.output_init = cdv_output_init,
+ .hotplug = cdv_hotplug_event,
+ .hotplug_enable = cdv_hotplug_enable,
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
.backlight_init = cdv_backlight_init,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index a71a6cd95bdd..187422018601 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -67,8 +67,6 @@ static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
- int max_clock = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -77,18 +75,9 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_LOW;
/* The max clock for CDV is 355 instead of 400 */
- max_clock = 355000;
- if (mode->clock > max_clock)
+ if (mode->clock > 355000)
return MODE_CLOCK_HIGH;
- if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
- return MODE_PANEL;
-
- /* We assume worst case scenario of 32 bpp here, since we don't know */
- if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
- dev_priv->vram_stolen_size)
- return MODE_MEM;
-
return MODE_OK;
}
@@ -156,13 +145,7 @@ static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
u32 hotplug_en;
int i, tries = 0, ret = false;
- u32 adpa_orig;
-
- /* disable the DAC when doing the hotplug detection */
-
- adpa_orig = REG_READ(ADPA);
-
- REG_WRITE(ADPA, adpa_orig & ~(ADPA_DAC_ENABLE));
+ u32 orig;
/*
* On a CDV thep, CRT detect sequence need to be done twice
@@ -170,7 +153,7 @@ static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
*/
tries = 2;
- hotplug_en = REG_READ(PORT_HOTPLUG_EN);
+ orig = hotplug_en = REG_READ(PORT_HOTPLUG_EN);
hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
@@ -195,8 +178,11 @@ static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
CRT_HOTPLUG_MONITOR_NONE)
ret = true;
- /* Restore the saved ADPA */
- REG_WRITE(ADPA, adpa_orig);
+ /* clear the interrupt we just generated, if any */
+ REG_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+
+ /* and put the bits back */
+ REG_WRITE(PORT_HOTPLUG_EN, orig);
return ret;
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index be8455919b33..c3e9a0f701df 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -216,22 +216,22 @@ static void cdv_sb_reset(struct drm_device *dev)
*/
static int
cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
- struct cdv_intel_clock_t *clock)
+ struct cdv_intel_clock_t *clock, bool is_lvds)
{
- struct psb_intel_crtc *psb_crtc =
- to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_crtc->pipe;
u32 m, n_vco, p;
int ret = 0;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int ref_sfr = (pipe == 0) ? SB_REF_DPLLA : SB_REF_DPLLB;
u32 ref_value;
+ u32 lane_reg, lane_value;
cdv_sb_reset(dev);
- if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) {
- DRM_ERROR("Attempting to set DPLL with refclk disabled\n");
- return -EBUSY;
- }
+ REG_WRITE(dpll_reg, DPLL_SYNCLOCK_ENABLE | DPLL_VGA_MODE_DIS);
+
+ udelay(100);
/* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
ref_value = 0x68A701;
@@ -241,6 +241,35 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
/* We don't know what the other fields of these regs are, so
* leave them in place.
*/
+ /*
+ * The BIT 14:13 of 0x8010/0x8030 is used to select the ref clk
+ * for the pipe A/B. Display spec 1.06 has wrong definition.
+ * Correct definition is like below:
+ *
+ * refclka mean use clock from same PLL
+ *
+ * if DPLLA sets 01 and DPLLB sets 01, they use clock from their pll
+ *
+ * if DPLLA sets 01 and DPLLB sets 02, both use clk from DPLLA
+ *
+ */
+ ret = cdv_sb_read(dev, ref_sfr, &ref_value);
+ if (ret)
+ return ret;
+ ref_value &= ~(REF_CLK_MASK);
+
+ /* use DPLL_A for pipeB on CRT/HDMI */
+ if (pipe == 1 && !is_lvds) {
+ DRM_DEBUG_KMS("use DPLLA for pipe B\n");
+ ref_value |= REF_CLK_DPLLA;
+ } else {
+ DRM_DEBUG_KMS("use their DPLL for pipe A/B\n");
+ ref_value |= REF_CLK_DPLL;
+ }
+ ret = cdv_sb_write(dev, ref_sfr, ref_value);
+ if (ret)
+ return ret;
+
ret = cdv_sb_read(dev, SB_M(pipe), &m);
if (ret)
return ret;
@@ -307,36 +336,29 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
if (ret)
return ret;
- /* always Program the Lane Register for the Pipe A*/
- if (pipe == 0) {
- /* Program the Lane0/1 for HDMI B */
- u32 lane_reg, lane_value;
-
- lane_reg = PSB_LANE0;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE;
- cdv_sb_write(dev, lane_reg, lane_value);
-
- lane_reg = PSB_LANE1;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE;
- cdv_sb_write(dev, lane_reg, lane_value);
-
- /* Program the Lane2/3 for HDMI C */
- lane_reg = PSB_LANE2;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE;
- cdv_sb_write(dev, lane_reg, lane_value);
-
- lane_reg = PSB_LANE3;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE;
- cdv_sb_write(dev, lane_reg, lane_value);
- }
+ lane_reg = PSB_LANE0;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE1;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE2;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE3;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
return 0;
}
@@ -480,14 +502,12 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
@@ -509,9 +529,9 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -533,15 +553,15 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
ret = -EINVAL;
goto psb_intel_pipe_set_base_exit;
}
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
dev_dbg(dev->dev,
"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
- REG_WRITE(dspbase, offset);
- REG_READ(dspbase);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
+ REG_WRITE(map->base, offset);
+ REG_READ(map->base);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
psb_intel_pipe_cleaner:
/* If there was a previous display we can now unpin it */
@@ -553,6 +573,199 @@ psb_intel_pipe_set_base_exit:
return ret;
}
+#define FIFO_PIPEA (1 << 0)
+#define FIFO_PIPEB (1 << 1)
+
+static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
+{
+ struct drm_crtc *crtc;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = NULL;
+
+ crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+ if (crtc->fb == NULL || !psb_intel_crtc->active)
+ return false;
+ return true;
+}
+
+static bool cdv_intel_single_pipe_active (struct drm_device *dev)
+{
+ uint32_t pipe_enabled = 0;
+
+ if (cdv_intel_pipe_enabled(dev, 0))
+ pipe_enabled |= FIFO_PIPEA;
+
+ if (cdv_intel_pipe_enabled(dev, 1))
+ pipe_enabled |= FIFO_PIPEB;
+
+
+ DRM_DEBUG_KMS("pipe enabled %x\n", pipe_enabled);
+
+ if (pipe_enabled == FIFO_PIPEA || pipe_enabled == FIFO_PIPEB)
+ return true;
+ else
+ return false;
+}
+
+static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ if (psb_intel_crtc->pipe != 1)
+ return false;
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ if (!connector->encoder
+ || connector->encoder->crtc != crtc)
+ continue;
+
+ if (psb_intel_encoder->type == INTEL_OUTPUT_LVDS)
+ return true;
+ }
+
+ return false;
+}
+
+static void cdv_intel_disable_self_refresh (struct drm_device *dev)
+{
+ if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
+
+ /* Disable self-refresh before adjust WM */
+ REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN));
+ REG_READ(FW_BLC_SELF);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ /* Cedarview workaround to write ovelay plane, which force to leave
+ * MAX_FIFO state.
+ */
+ REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/);
+ REG_READ(OV_OVADD);
+
+ cdv_intel_wait_for_vblank(dev);
+ }
+
+}
+
+static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc *crtc)
+{
+
+ if (cdv_intel_single_pipe_active(dev)) {
+ u32 fw;
+
+ fw = REG_READ(DSPFW1);
+ fw &= ~DSP_FIFO_SR_WM_MASK;
+ fw |= (0x7e << DSP_FIFO_SR_WM_SHIFT);
+ fw &= ~CURSOR_B_FIFO_WM_MASK;
+ fw |= (0x4 << CURSOR_B_FIFO_WM_SHIFT);
+ REG_WRITE(DSPFW1, fw);
+
+ fw = REG_READ(DSPFW2);
+ fw &= ~CURSOR_A_FIFO_WM_MASK;
+ fw |= (0x6 << CURSOR_A_FIFO_WM_SHIFT);
+ fw &= ~DSP_PLANE_C_FIFO_WM_MASK;
+ fw |= (0x8 << DSP_PLANE_C_FIFO_WM_SHIFT);
+ REG_WRITE(DSPFW2, fw);
+
+ REG_WRITE(DSPFW3, 0x36000000);
+
+ /* ignore FW4 */
+
+ if (is_pipeb_lvds(dev, crtc)) {
+ REG_WRITE(DSPFW5, 0x00040330);
+ } else {
+ fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) |
+ (4 << DSP_PLANE_A_FIFO_WM1_SHIFT) |
+ (3 << CURSOR_B_FIFO_WM1_SHIFT) |
+ (4 << CURSOR_FIFO_SR_WM1_SHIFT);
+ REG_WRITE(DSPFW5, fw);
+ }
+
+ REG_WRITE(DSPFW6, 0x10);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ /* enable self-refresh for single pipe active */
+ REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ REG_READ(FW_BLC_SELF);
+ cdv_intel_wait_for_vblank(dev);
+
+ } else {
+
+ /* HW team suggested values... */
+ REG_WRITE(DSPFW1, 0x3f880808);
+ REG_WRITE(DSPFW2, 0x0b020202);
+ REG_WRITE(DSPFW3, 0x24000000);
+ REG_WRITE(DSPFW4, 0x08030202);
+ REG_WRITE(DSPFW5, 0x01010101);
+ REG_WRITE(DSPFW6, 0x1d0);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ cdv_intel_disable_self_refresh(dev);
+
+ }
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int palreg = PALETTE_A;
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled)
+ return;
+
+ switch (psb_intel_crtc->pipe) {
+ case 0:
+ break;
+ case 1:
+ palreg = PALETTE_B;
+ break;
+ case 2:
+ palreg = PALETTE_C;
+ break;
+ default:
+ dev_err(dev->dev, "Illegal Pipe Number.\n");
+ return;
+ }
+
+ if (gma_power_begin(dev, false)) {
+ for (i = 0; i < 256; i++) {
+ REG_WRITE(palreg + 4 * i,
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]));
+ }
+ gma_power_end(dev);
+ } else {
+ for (i = 0; i < 256; i++) {
+ dev_priv->regs.pipe[0].palette[i] =
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]);
+ }
+
+ }
+}
+
/**
* Sets the power management mode of the pipe and plane.
*
@@ -562,62 +775,80 @@ psb_intel_pipe_set_base_exit:
static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
+ cdv_intel_disable_self_refresh(dev);
+
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
+ if (psb_intel_crtc->active)
+ return;
+
+ psb_intel_crtc->active = true;
+
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Jim Bish - switch plan and pipe per scott */
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
udelay(150);
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
+
+ temp = REG_READ(map->status);
+ temp &= ~(0xFFFF);
+ temp |= PIPE_FIFO_UNDERRUN;
+ REG_WRITE(map->status, temp);
+ REG_READ(map->status);
- psb_intel_crtc_load_lut(crtc);
+ cdv_intel_update_watermark(dev, crtc);
+ cdv_intel_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ psb_intel_crtc->crtc_enable = true;
break;
case DRM_MODE_DPMS_OFF:
+ if (!psb_intel_crtc->active)
+ return;
+
+ psb_intel_crtc->active = false;
+
/* Give the overlay scaler a chance to disable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
@@ -627,14 +858,15 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Jim Bish - changed pipe/plane here as well. */
+ drm_vblank_off(dev, pipe);
/* Wait for vblank for the disable to take effect */
cdv_intel_wait_for_vblank(dev);
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
}
/* Wait for vblank for the disable to take effect. */
@@ -643,23 +875,25 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(150);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
}
/* Wait for the clocks to turn off. */
udelay(150);
+ cdv_intel_update_watermark(dev, crtc);
+ psb_intel_crtc->crtc_enable = false;
break;
}
/*Set FIFO Watermarks*/
@@ -709,21 +943,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
struct cdv_intel_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
@@ -757,13 +980,18 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
- refclk = 96000;
-
- /* Hack selection about ref clk for CRT */
- /* Select 27MHz as the reference clk for HDMI */
- if (is_crt || is_hdmi)
+ if (dev_priv->dplla_96mhz)
+ /* low-end sku, 96/100 mhz */
+ refclk = 96000;
+ else
+ /* high-end sku, 27/100 mhz */
refclk = 27000;
+ if (is_lvds && dev_priv->lvds_use_ssc) {
+ refclk = dev_priv->lvds_ssc_freq * 1000;
+ DRM_DEBUG_KMS("Use SSC reference clock %d Mhz\n", dev_priv->lvds_ssc_freq);
+ }
+
drm_mode_debug_printmodeline(adjusted_mode);
ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
@@ -779,18 +1007,17 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
}
- dpll |= PLL_REF_INPUT_DREFCLK;
+/* dpll |= PLL_REF_INPUT_DREFCLK; */
dpll |= DPLL_SYNCLOCK_ENABLE;
- dpll |= DPLL_VGA_MODE_DIS;
- if (is_lvds)
+/* if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
else
- dpll |= DPLLB_MODE_DAC_SERIAL;
+ dpll |= DPLLB_MODE_DAC_SERIAL; */
/* dpll |= (2 << 11); */
/* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
+ pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -803,10 +1030,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
dspcntr |= DISPLAY_PLANE_ENABLE;
pipeconf |= PIPEACONF_ENABLE;
- REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
+ REG_READ(map->dpll);
- cdv_dpll_set_clock_cdv(dev, crtc, &clock);
+ cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds);
udelay(150);
@@ -848,48 +1075,48 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- REG_WRITE(dpll_reg,
- (REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll,
+ (REG_READ(map->dpll) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
- if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
+ if (!(REG_READ(map->dpll) & DPLL_LOCK)) {
dev_err(dev->dev, "Failed to get DPLL lock\n");
return -EBUSY;
}
{
int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+ REG_WRITE(map->dpll_md, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
}
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
- REG_WRITE(dspsize_reg,
+ REG_WRITE(map->size,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(dsppos_reg, 0);
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->pos, 0);
+ REG_WRITE(map->src,
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
cdv_intel_wait_for_vblank(dev);
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
/* Flush the plane changes */
{
@@ -903,58 +1130,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
return 0;
}
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int palreg = PALETTE_A;
- int i;
-
- /* The clocks have to be on to load the palette. */
- if (!crtc->enabled)
- return;
-
- switch (psb_intel_crtc->pipe) {
- case 0:
- break;
- case 1:
- palreg = PALETTE_B;
- break;
- case 2:
- palreg = PALETTE_C;
- break;
- default:
- dev_err(dev->dev, "Illegal Pipe Number.\n");
- return;
- }
-
- if (gma_power_begin(dev, false)) {
- for (i = 0; i < 256; i++) {
- REG_WRITE(palreg + 4 * i,
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]));
- }
- gma_power_end(dev);
- } else {
- for (i = 0; i < 256; i++) {
- dev_priv->regs.psb.save_palette_a[i] =
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]);
- }
-
- }
-}
/**
* Save HW states of giving crtc
@@ -962,11 +1137,10 @@ static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
static void cdv_intel_crtc_save(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -975,25 +1149,25 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
return;
}
- crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
- crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
- crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
- crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
- crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
- crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
- crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
- crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
- crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
- crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
- crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
- crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
- crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+ crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+ crtc_state->savePIPECONF = REG_READ(map->conf);
+ crtc_state->savePIPESRC = REG_READ(map->src);
+ crtc_state->saveFP0 = REG_READ(map->fp0);
+ crtc_state->saveFP1 = REG_READ(map->fp1);
+ crtc_state->saveDPLL = REG_READ(map->dpll);
+ crtc_state->saveHTOTAL = REG_READ(map->htotal);
+ crtc_state->saveHBLANK = REG_READ(map->hblank);
+ crtc_state->saveHSYNC = REG_READ(map->hsync);
+ crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+ crtc_state->saveVBLANK = REG_READ(map->vblank);
+ crtc_state->saveVSYNC = REG_READ(map->vsync);
+ crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
/*NOTE: DSPSIZE DSPPOS only for psb*/
- crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
- crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+ crtc_state->saveDSPSIZE = REG_READ(map->size);
+ crtc_state->saveDSPPOS = REG_READ(map->pos);
- crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+ crtc_state->saveDSPBASE = REG_READ(map->base);
DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
crtc_state->saveDSPCNTR,
@@ -1014,7 +1188,7 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
crtc_state->saveDSPBASE
);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
}
@@ -1025,12 +1199,10 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private * dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -1041,23 +1213,23 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
DRM_DEBUG(
"current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
- REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
- REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
- REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
- REG_READ(pipeA ? FPA0 : FPB0),
- REG_READ(pipeA ? FPA1 : FPB1),
- REG_READ(pipeA ? DPLL_A : DPLL_B),
- REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
- REG_READ(pipeA ? HBLANK_A : HBLANK_B),
- REG_READ(pipeA ? HSYNC_A : HSYNC_B),
- REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
- REG_READ(pipeA ? VBLANK_A : VBLANK_B),
- REG_READ(pipeA ? VSYNC_A : VSYNC_B),
- REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
- REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
- REG_READ(pipeA ? DSPAPOS : DSPBPOS),
- REG_READ(pipeA ? DSPABASE : DSPBBASE)
- );
+ REG_READ(map->cntr),
+ REG_READ(map->conf),
+ REG_READ(map->src),
+ REG_READ(map->fp0),
+ REG_READ(map->fp1),
+ REG_READ(map->dpll),
+ REG_READ(map->htotal),
+ REG_READ(map->hblank),
+ REG_READ(map->hsync),
+ REG_READ(map->vtotal),
+ REG_READ(map->vblank),
+ REG_READ(map->vsync),
+ REG_READ(map->stride),
+ REG_READ(map->size),
+ REG_READ(map->pos),
+ REG_READ(map->base)
+ );
DRM_DEBUG(
"saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
@@ -1077,51 +1249,51 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
crtc_state->saveDSPSIZE,
crtc_state->saveDSPPOS,
crtc_state->saveDSPBASE
- );
+ );
if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
- REG_WRITE(pipeA ? DPLL_A : DPLL_B,
- crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_WRITE(map->dpll,
+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
DRM_DEBUG("write dpll: %x\n",
- REG_READ(pipeA ? DPLL_A : DPLL_B));
+ REG_READ(map->dpll));
udelay(150);
}
- REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
- REG_READ(pipeA ? FPA0 : FPB0);
+ REG_WRITE(map->fp0, crtc_state->saveFP0);
+ REG_READ(map->fp0);
- REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
- REG_READ(pipeA ? FPA1 : FPB1);
+ REG_WRITE(map->fp1, crtc_state->saveFP1);
+ REG_READ(map->fp1);
- REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_WRITE(map->dpll, crtc_state->saveDPLL);
+ REG_READ(map->dpll);
udelay(150);
- REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
- REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
- REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
- REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
- REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
- REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
- REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+ REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+ REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+ REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+ REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+ REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+ REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+ REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
- REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
- REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+ REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+ REG_WRITE(map->pos, crtc_state->saveDSPPOS);
- REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
- REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+ REG_WRITE(map->src, crtc_state->savePIPESRC);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+ REG_WRITE(map->conf, crtc_state->savePIPECONF);
cdv_intel_wait_for_vblank(dev);
- REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+ REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
cdv_intel_wait_for_vblank(dev);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
}
@@ -1296,35 +1468,30 @@ static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
struct cdv_intel_clock_t clock;
bool is_lvds;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
if (gma_power_begin(dev, false)) {
- dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ dpll = REG_READ(map->dpll);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+ fp = REG_READ(map->fp0);
else
- fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+ fp = REG_READ(map->fp1);
is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
gma_power_end(dev);
} else {
- dpll = (pipe == 0) ?
- dev_priv->regs.psb.saveDPLL_A :
- dev_priv->regs.psb.saveDPLL_B;
-
+ dpll = p->dpll;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA0 :
- dev_priv->regs.psb.saveFPB0;
+ fp = p->fp0;
else
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA1 :
- dev_priv->regs.psb.saveFPB1;
+ fp = p->fp1;
is_lvds = (pipe == 1) &&
(dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN);
@@ -1382,32 +1549,26 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
struct drm_display_mode *mode;
int htot;
int hsync;
int vtot;
int vsync;
- struct drm_psb_private *dev_priv = dev->dev_private;
if (gma_power_begin(dev, false)) {
- htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
- hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
- vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
- vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ htot = REG_READ(map->htotal);
+ hsync = REG_READ(map->hsync);
+ vtot = REG_READ(map->vtotal);
+ vsync = REG_READ(map->vsync);
gma_power_end(dev);
} else {
- htot = (pipe == 0) ?
- dev_priv->regs.psb.saveHTOTAL_A :
- dev_priv->regs.psb.saveHTOTAL_B;
- hsync = (pipe == 0) ?
- dev_priv->regs.psb.saveHSYNC_A :
- dev_priv->regs.psb.saveHSYNC_B;
- vtot = (pipe == 0) ?
- dev_priv->regs.psb.saveVTOTAL_A :
- dev_priv->regs.psb.saveVTOTAL_B;
- vsync = (pipe == 0) ?
- dev_priv->regs.psb.saveVSYNC_A :
- dev_priv->regs.psb.saveVSYNC_B;
+ htot = p->htotal;
+ hsync = p->hsync;
+ vtot = p->vtotal;
+ vsync = p->vsync;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 8d5269555005..88b59d4a7b7f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -242,8 +242,6 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
static int cdv_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
-
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
@@ -257,11 +255,6 @@ static int cdv_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
- /* We assume worst case scenario of 32 bpp here, since we don't know */
- if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
- dev_priv->vram_stolen_size)
- return MODE_MEM;
-
return MODE_OK;
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 8359c1a3f45f..ff5b58eb878c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -356,6 +356,8 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(
+ encoder->crtc);
u32 pfit_control;
/*
@@ -377,6 +379,8 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
else
pfit_control = 0;
+ pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
+
if (dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
@@ -552,10 +556,60 @@ static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
drm_encoder_cleanup(encoder);
}
-const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
+static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
.destroy = cdv_intel_lvds_enc_destroy,
};
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ */
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+ u8 *i2c_pin)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (!dev_priv->child_dev_num)
+ return true;
+
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ struct child_device_config *child = dev_priv->child_dev + i;
+
+ /* If the device type is not LFP, continue.
+ * We have to check both the new identifiers as well as the
+ * old for compatibility with some BIOSes.
+ */
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+ if (child->i2c_pin)
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
+ */
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
+ */
+ if (dev_priv->opregion.vbt)
+ return true;
+ }
+
+ return false;
+}
+
/**
* cdv_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
@@ -576,6 +630,13 @@ void cdv_intel_lvds_init(struct drm_device *dev,
struct drm_psb_private *dev_priv = dev->dev_private;
u32 lvds;
int pipe;
+ u8 pin;
+
+ pin = GMBUS_PORT_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ return;
+ }
psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
GFP_KERNEL);
@@ -710,6 +771,19 @@ void cdv_intel_lvds_init(struct drm_device *dev,
goto failed_find;
}
+ /* setup PWM */
+ {
+ u32 pwm;
+
+ pwm = REG_READ(BLC_PWM_CTL2);
+ if (pipe == 1)
+ pwm |= PWM_PIPE_B;
+ else
+ pwm &= ~PWM_PIPE_B;
+ pwm |= PWM_ENABLE;
+ REG_WRITE(BLC_PWM_CTL2, pwm);
+ }
+
out:
drm_sysfs_connector_add(connector);
return;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 8ea202f1ba50..5732b5702e1c 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -153,7 +153,7 @@ static void psbfb_vm_close(struct vm_area_struct *vma)
{
}
-static struct vm_operations_struct psbfb_vm_ops = {
+static const struct vm_operations_struct psbfb_vm_ops = {
.fault = psbfb_vm_fault,
.open = psbfb_vm_open,
.close = psbfb_vm_close
@@ -408,6 +408,8 @@ static int psbfb_create(struct psb_fbdev *fbdev,
return -ENOMEM;
}
+ memset(dev_priv->vram_addr + backing->offset, 0, size);
+
mutex_lock(&dev->struct_mutex);
info = framebuffer_alloc(0, device);
@@ -453,8 +455,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->fix.ypanstep = 0;
/* Accessed stolen memory directly */
- info->screen_base = (char *)dev_priv->vram_addr +
- backing->offset;
+ info->screen_base = dev_priv->vram_addr + backing->offset;
info->screen_size = size;
if (dev_priv->gtt.stolen_size) {
@@ -475,7 +476,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- dev_info(dev->dev, "allocated %dx%d fb\n",
+ dev_dbg(dev->dev, "allocated %dx%d fb\n",
psbfb->base.width, psbfb->base.height);
mutex_unlock(&dev->struct_mutex);
@@ -543,9 +544,25 @@ static int psbfb_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
+ struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
int new_fb = 0;
+ int bytespp;
int ret;
+ bytespp = sizes->surface_bpp / 8;
+ if (bytespp == 3) /* no 24bit packed */
+ bytespp = 4;
+
+ /* If the mode will not fit in 32bit then switch to 16bit to get
+ a console on full resolution. The X mode setting server will
+ allocate its own 32bit GEM framebuffer */
+ if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
+ dev_priv->vram_stolen_size) {
+ sizes->surface_bpp = 16;
+ sizes->surface_depth = 16;
+ }
+
if (!helper->fb) {
ret = psbfb_create(psb_fbdev, sizes);
if (ret)
@@ -555,7 +572,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
return new_fb;
}
-struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
.gamma_set = psbfb_gamma_set,
.gamma_get = psbfb_gamma_get,
.fb_probe = psbfb_probe,
@@ -732,10 +749,7 @@ static void psb_setup_outputs(struct drm_device *dev)
clone_mask = (1 << INTEL_OUTPUT_SDVO);
break;
case INTEL_OUTPUT_LVDS:
- if (IS_MRST(dev))
- crtc_mask = (1 << 0);
- else
- crtc_mask = (1 << 1);
+ crtc_mask = dev_priv->ops->lvds_mask;
clone_mask = (1 << INTEL_OUTPUT_LVDS);
break;
case INTEL_OUTPUT_MIPI:
@@ -747,10 +761,7 @@ static void psb_setup_outputs(struct drm_device *dev)
clone_mask = (1 << INTEL_OUTPUT_MIPI2);
break;
case INTEL_OUTPUT_HDMI:
- if (IS_MFLD(dev))
- crtc_mask = (1 << 1);
- else
- crtc_mask = (1 << 0);
+ crtc_mask = dev_priv->ops->hdmi_mask;
clone_mask = (1 << INTEL_OUTPUT_HDMI);
break;
}
@@ -771,7 +782,7 @@ void psb_modeset_init(struct drm_device *dev)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- dev->mode_config.funcs = (void *) &psb_mode_funcs;
+ dev->mode_config.funcs = &psb_mode_funcs;
/* set memory base */
/* Oaktrail and Poulsbo should use BAR 2*/
@@ -786,15 +797,23 @@ void psb_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 2048;
psb_setup_outputs(dev);
+
+ if (dev_priv->ops->errata)
+ dev_priv->ops->errata(dev);
+
+ dev_priv->modeset = true;
}
void psb_modeset_cleanup(struct drm_device *dev)
{
- mutex_lock(&dev->struct_mutex);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->modeset) {
+ mutex_lock(&dev->struct_mutex);
- drm_kms_helper_poll_fini(dev);
- psb_fbdev_fini(dev);
- drm_mode_config_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
+ psb_fbdev_fini(dev);
+ drm_mode_config_cleanup(dev);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 9fbb86868e2e..fc7d144bc2d3 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -124,6 +124,8 @@ static int psb_gem_create(struct drm_file *file,
dev_err(dev->dev, "GEM init failed for %lld\n", size);
return -ENOMEM;
}
+ /* Limit the object to 32bit mappings */
+ mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
/* Give the object a handle so we can carry it more easily */
ret = drm_gem_handle_create(file, &r->gem, &handle);
if (ret) {
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index c6465b40090f..04a371aceb34 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -39,6 +39,10 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
{
uint32_t mask = PSB_PTE_VALID;
+ /* Ensure we explode rather than put an invalid low mapping of
+ a high mapping page into the gtt */
+ BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
+
if (type & PSB_MMU_CACHED_MEMORY)
mask |= PSB_PTE_CACHED;
if (type & PSB_MMU_RO_MEMORY)
@@ -57,7 +61,7 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
* Given a gtt_range object return the GTT offset of the page table
* entries for this gtt_range
*/
-static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
+static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long offset;
@@ -78,7 +82,8 @@ static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
*/
static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
{
- u32 *gtt_slot, pte;
+ u32 __iomem *gtt_slot;
+ u32 pte;
struct page **pages;
int i;
@@ -93,7 +98,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
pages = r->pages;
/* Make sure changes are visible to the GPU */
- set_pages_array_uc(pages, r->npage);
+ set_pages_array_wc(pages, r->npage);
/* Write our page entries into the GTT itself */
for (i = r->roll; i < r->npage; i++) {
@@ -122,7 +127,8 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- u32 *gtt_slot, pte;
+ u32 __iomem *gtt_slot;
+ u32 pte;
int i;
WARN_ON(r->stolen);
@@ -148,7 +154,8 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
*/
void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
{
- u32 *gtt_slot, pte;
+ u32 __iomem *gtt_slot;
+ u32 pte;
int i;
if (roll >= r->npage) {
@@ -409,8 +416,6 @@ int psb_gtt_init(struct drm_device *dev, int resume)
unsigned long stolen_size, vram_stolen_size;
unsigned i, num_pages;
unsigned pfn_base;
- uint32_t vram_pages;
- uint32_t dvmt_mode = 0;
struct psb_gtt *pg;
int ret = 0;
@@ -483,13 +488,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
stolen_size = vram_stolen_size;
- printk(KERN_INFO "Stolen memory information\n");
- printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base);
- printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
- vram_stolen_size/1024);
- dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
- printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
- (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
+ dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
+ dev_priv->stolen_base, vram_stolen_size / 1024);
if (resume && (gtt_pages != pg->gtt_pages) &&
(stolen_size != pg->stolen_size)) {
@@ -525,8 +525,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
*/
pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
- vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
- printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+ num_pages = vram_stolen_size >> PAGE_SHIFT;
+ dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
num_pages, pfn_base << PAGE_SHIFT, 0);
for (i = 0; i < num_pages; ++i) {
pte = psb_gtt_mask_pte(pfn_base + i, 0);
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index d4d0c5b8bf91..973d7f6d66b7 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -26,6 +26,8 @@
#include "psb_intel_reg.h"
#include "intel_bios.h"
+#define SLAVE_ADDR1 0x70
+#define SLAVE_ADDR2 0x72
static void *find_section(struct bdb_header *bdb, int section_id)
{
@@ -52,6 +54,16 @@ static void *find_section(struct bdb_header *bdb, int section_id)
return NULL;
}
+static u16
+get_blocksize(void *p)
+{
+ u16 *block_ptr, block_size;
+
+ block_ptr = (u16 *)((char *)p - 2);
+ block_size = *block_ptr;
+ return block_size;
+}
+
static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
struct lvds_dvo_timing *dvo_timing)
{
@@ -75,6 +87,16 @@ static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
panel_fixed_mode->clock = dvo_timing->clock * 10;
panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+ if (dvo_timing->hsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dvo_timing->vsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
/* Some VBTs have bogus h/vtotal values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -217,6 +239,180 @@ static void parse_general_features(struct drm_psb_private *dev_priv,
}
}
+static void
+parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct sdvo_device_mapping *p_mapping;
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ if (p_child->slave_addr != SLAVE_ADDR1 &&
+ p_child->slave_addr != SLAVE_ADDR2) {
+ /*
+ * If the slave address is neither 0x70 nor 0x72,
+ * it is not a SDVO device. Skip it.
+ */
+ continue;
+ }
+ if (p_child->dvo_port != DEVICE_PORT_DVOB &&
+ p_child->dvo_port != DEVICE_PORT_DVOC) {
+ /* skip the incorrect SDVO port */
+ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+ continue;
+ }
+ DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+ " %s port\n",
+ p_child->slave_addr,
+ (p_child->dvo_port == DEVICE_PORT_DVOB) ?
+ "SDVOB" : "SDVOC");
+ p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+ if (!p_mapping->initialized) {
+ p_mapping->dvo_port = p_child->dvo_port;
+ p_mapping->slave_addr = p_child->slave_addr;
+ p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->ddc_pin = p_child->ddc_pin;
+ p_mapping->i2c_pin = p_child->i2c_pin;
+ p_mapping->initialized = 1;
+ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+ p_mapping->dvo_port,
+ p_mapping->slave_addr,
+ p_mapping->dvo_wiring,
+ p_mapping->ddc_pin,
+ p_mapping->i2c_pin);
+ } else {
+ DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+ "two SDVO device.\n");
+ }
+ if (p_child->slave2_addr) {
+ /* Maybe this is a SDVO device with multiple inputs */
+ /* And the mapping info is not added */
+ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
+ }
+ count++;
+ }
+
+ if (!count) {
+ /* No SDVO device info is found */
+ DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+ }
+ return;
+}
+
+
+static void
+parse_driver_features(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_driver_features *driver;
+
+ driver = find_section(bdb, BDB_DRIVER_FEATURES);
+ if (!driver)
+ return;
+
+ /* This bit means to use 96Mhz for DPLL_A or not */
+ if (driver->primary_lfp_id)
+ dev_priv->dplla_96mhz = true;
+ else
+ dev_priv->dplla_96mhz = false;
+}
+
+static void
+parse_device_mapping(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child, *child_dev_ptr;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ /* get the number of child devices that are present */
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ count++;
+ }
+ if (!count) {
+ DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+ return;
+ }
+ dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+ if (!dev_priv->child_dev) {
+ DRM_DEBUG_KMS("No memory space for child devices\n");
+ return;
+ }
+
+ dev_priv->child_dev_num = count;
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ child_dev_ptr = dev_priv->child_dev + count;
+ count++;
+ memcpy((void *)child_dev_ptr, (void *)p_child,
+ sizeof(*p_child));
+ }
+ return;
+}
+
+
/**
* psb_intel_init_bios - initialize VBIOS settings & find VBT
* @dev: DRM device
@@ -236,38 +432,54 @@ bool psb_intel_init_bios(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
struct vbt_header *vbt = NULL;
- struct bdb_header *bdb;
- u8 __iomem *bios;
+ struct bdb_header *bdb = NULL;
+ u8 __iomem *bios = NULL;
size_t size;
int i;
- bios = pci_map_rom(pdev, &size);
- if (!bios)
- return -1;
+ /* XXX Should this validation be moved to intel_opregion.c? */
+ if (dev_priv->opregion.vbt) {
+ struct vbt_header *vbt = dev_priv->opregion.vbt;
+ if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+ DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
+ vbt->signature);
+ bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
+ } else
+ dev_priv->opregion.vbt = NULL;
+ }
- /* Scour memory looking for the VBT signature */
- for (i = 0; i + 4 < size; i++) {
- if (!memcmp(bios + i, "$VBT", 4)) {
- vbt = (struct vbt_header *)(bios + i);
- break;
+ if (bdb == NULL) {
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+ return -1;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (!memcmp(bios + i, "$VBT", 4)) {
+ vbt = (struct vbt_header *)(bios + i);
+ break;
+ }
}
- }
- if (!vbt) {
- dev_err(dev->dev, "VBT signature missing\n");
- pci_unmap_rom(pdev, bios);
- return -1;
+ if (!vbt) {
+ dev_err(dev->dev, "VBT signature missing\n");
+ pci_unmap_rom(pdev, bios);
+ return -1;
+ }
+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
}
- bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
-
- /* Grab useful general definitions */
+ /* Grab useful general dxefinitions */
parse_general_features(dev_priv, bdb);
+ parse_driver_features(dev_priv, bdb);
parse_lfp_panel_data(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
+ parse_sdvo_device_mapping(dev_priv, bdb);
+ parse_device_mapping(dev_priv, bdb);
parse_backlight_data(dev_priv, bdb);
- pci_unmap_rom(pdev, bios);
+ if (bios)
+ pci_unmap_rom(pdev, bios);
return 0;
}
@@ -278,26 +490,8 @@ bool psb_intel_init_bios(struct drm_device *dev)
void psb_intel_destroy_bios(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct drm_display_mode *sdvo_lvds_vbt_mode =
- dev_priv->sdvo_lvds_vbt_mode;
- struct drm_display_mode *lfp_lvds_vbt_mode =
- dev_priv->lfp_lvds_vbt_mode;
- struct bdb_lvds_backlight *lvds_bl =
- dev_priv->lvds_bl;
-
- /*free sdvo panel mode*/
- if (sdvo_lvds_vbt_mode) {
- dev_priv->sdvo_lvds_vbt_mode = NULL;
- kfree(sdvo_lvds_vbt_mode);
- }
- if (lfp_lvds_vbt_mode) {
- dev_priv->lfp_lvds_vbt_mode = NULL;
- kfree(lfp_lvds_vbt_mode);
- }
-
- if (lvds_bl) {
- dev_priv->lvds_bl = NULL;
- kfree(lvds_bl);
- }
+ kfree(dev_priv->sdvo_lvds_vbt_mode);
+ kfree(dev_priv->lfp_lvds_vbt_mode);
+ kfree(dev_priv->lvds_bl);
}
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 70f1bf018183..0a738663eb5a 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -127,9 +127,93 @@ struct bdb_general_features {
/* bits 5 */
u8 int_crt_support:1;
u8 int_tv_support:1;
- u8 rsvd11:6; /* finish byte */
+ u8 int_efp_support:1;
+ u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
+ u8 rsvd11:3; /* finish byte */
} __attribute__((packed));
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE 0x00
+#define DEVICE_TYPE_CRT 0x01
+#define DEVICE_TYPE_TV 0x09
+#define DEVICE_TYPE_EFP 0x12
+#define DEVICE_TYPE_LFP 0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS 0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
+#define DEVICE_TYPE_TV_COMPOSITE 0x0209
+#define DEVICE_TYPE_TV_MACROVISION 0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
+#define DEVICE_TYPE_TV_SCART 0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
+#define DEVICE_TYPE_EFP_DVI_I 0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
+#define DEVICE_TYPE_LFP_PANELLINK 0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+
+#define DEVICE_CFG_NONE 0x00
+#define DEVICE_CFG_12BIT_DVOB 0x01
+#define DEVICE_CFG_12BIT_DVOC 0x02
+#define DEVICE_CFG_24BIT_DVOBC 0x09
+#define DEVICE_CFG_24BIT_DVOCB 0x0a
+#define DEVICE_CFG_DUAL_DVOB 0x11
+#define DEVICE_CFG_DUAL_DVOC 0x12
+#define DEVICE_CFG_DUAL_DVOBC 0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
+
+#define DEVICE_WIRE_NONE 0x00
+#define DEVICE_WIRE_DVOB 0x01
+#define DEVICE_WIRE_DVOC 0x02
+#define DEVICE_WIRE_DVOBC 0x03
+#define DEVICE_WIRE_DVOBB 0x05
+#define DEVICE_WIRE_DVOCC 0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB 0x01
+#define DEVICE_PORT_DVOC 0x02
+
+struct child_device_config {
+ u16 handle;
+ u16 device_type;
+ u8 device_id[10]; /* ascii string */
+ u16 addin_offset;
+ u8 dvo_port; /* See Device_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_cfg; /* See DEVICE_CFG_* above */
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ u8 capabilities;
+ u8 dvo_wiring;/* See DEVICE_WIRE_* above */
+ u8 dvo2_wiring;
+ u16 extended_type;
+ u8 dvo_function;
+} __attribute__((packed));
+
+
struct bdb_general_definitions {
/* DDC GPIO */
u8 crt_ddc_gmbus_pin;
@@ -144,13 +228,18 @@ struct bdb_general_definitions {
u8 boot_display[2];
u8 child_dev_size;
- /* device info */
- u8 tv_or_lvds_info[33];
- u8 dev1[33];
- u8 dev2[33];
- u8 dev3[33];
- u8 dev4[33];
- /* may be another device block here on some platforms */
+ /*
+ * Device info:
+ * If TV is present, it'll be at devices[0].
+ * LVDS will be next, either devices[0] or [1], if present.
+ * On some platforms the number of device is 6. But could be as few as
+ * 4 if both TV and LVDS are missing.
+ * And the device num is related with the size of general definition
+ * block. It is obtained by using the following formula:
+ * number = (block_size - sizeof(bdb_general_definitions))/
+ * sizeof(child_device_config);
+ */
+ struct child_device_config devices[0];
};
struct bdb_lvds_options {
@@ -302,6 +391,45 @@ struct bdb_sdvo_lvds_options {
u8 panel_misc_bits_4;
} __attribute__((packed));
+struct bdb_driver_features {
+ u8 boot_dev_algorithm:1;
+ u8 block_display_switch:1;
+ u8 allow_display_switch:1;
+ u8 hotplug_dvo:1;
+ u8 dual_view_zoom:1;
+ u8 int15h_hook:1;
+ u8 sprite_in_clone:1;
+ u8 primary_lfp_id:1;
+
+ u16 boot_mode_x;
+ u16 boot_mode_y;
+ u8 boot_mode_bpp;
+ u8 boot_mode_refresh;
+
+ u16 enable_lfp_primary:1;
+ u16 selective_mode_pruning:1;
+ u16 dual_frequency:1;
+ u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+ u16 nt_clone_support:1;
+ u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+ u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+ u16 cui_aspect_scaling:1;
+ u16 preserve_aspect_ratio:1;
+ u16 sdvo_device_power_down:1;
+ u16 crt_hotplug:1;
+ u16 lvds_config:2;
+ u16 tv_hotplug:1;
+ u16 hdmi_config:2;
+
+ u8 static_display:1;
+ u8 reserved2:7;
+ u16 legacy_crt_max_x;
+ u16 legacy_crt_max_y;
+ u8 legacy_crt_max_refresh;
+
+ u8 hdmi_termination;
+ u8 custom_vbt_version;
+} __attribute__((packed));
extern bool psb_intel_init_bios(struct drm_device *dev);
extern void psb_intel_destroy_bios(struct drm_device *dev);
@@ -427,4 +555,21 @@ extern void psb_intel_destroy_bios(struct drm_device *dev);
#define SWF14_APM_STANDBY 0x1
#define SWF14_APM_RESTORE 0x0
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_eDP 0x78C6
+
+/* define the DVO port for HDMI output type */
+#define DVO_B 1
+#define DVO_C 2
+#define DVO_D 3
+
+/* define the PORT for DP output type */
+#define PORT_IDPB 7
+#define PORT_IDPC 8
+#define PORT_IDPD 9
+
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
index af656787db0f..265ad0de44a6 100644
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -163,142 +163,30 @@ struct backlight_device *mdfld_get_backlight_device(void)
*
* Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
*/
-static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
+static int mdfld_save_display_registers(struct drm_device *dev, int pipenum)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct medfield_state *regs = &dev_priv->regs.mdfld;
+ struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+ const struct psb_offset *map = &dev_priv->regmap[pipenum];
int i;
+ u32 *mipi_val;
/* register */
- u32 dpll_reg = MRST_DPLL_A;
- u32 fp_reg = MRST_FPA0;
- u32 pipeconf_reg = PIPEACONF;
- u32 htot_reg = HTOTAL_A;
- u32 hblank_reg = HBLANK_A;
- u32 hsync_reg = HSYNC_A;
- u32 vtot_reg = VTOTAL_A;
- u32 vblank_reg = VBLANK_A;
- u32 vsync_reg = VSYNC_A;
- u32 pipesrc_reg = PIPEASRC;
- u32 dspstride_reg = DSPASTRIDE;
- u32 dsplinoff_reg = DSPALINOFF;
- u32 dsptileoff_reg = DSPATILEOFF;
- u32 dspsize_reg = DSPASIZE;
- u32 dsppos_reg = DSPAPOS;
- u32 dspsurf_reg = DSPASURF;
u32 mipi_reg = MIPI;
- u32 dspcntr_reg = DSPACNTR;
- u32 dspstatus_reg = PIPEASTAT;
- u32 palette_reg = PALETTE_A;
-
- /* pointer to values */
- u32 *dpll_val = &regs->saveDPLL_A;
- u32 *fp_val = &regs->saveFPA0;
- u32 *pipeconf_val = &regs->savePIPEACONF;
- u32 *htot_val = &regs->saveHTOTAL_A;
- u32 *hblank_val = &regs->saveHBLANK_A;
- u32 *hsync_val = &regs->saveHSYNC_A;
- u32 *vtot_val = &regs->saveVTOTAL_A;
- u32 *vblank_val = &regs->saveVBLANK_A;
- u32 *vsync_val = &regs->saveVSYNC_A;
- u32 *pipesrc_val = &regs->savePIPEASRC;
- u32 *dspstride_val = &regs->saveDSPASTRIDE;
- u32 *dsplinoff_val = &regs->saveDSPALINOFF;
- u32 *dsptileoff_val = &regs->saveDSPATILEOFF;
- u32 *dspsize_val = &regs->saveDSPASIZE;
- u32 *dsppos_val = &regs->saveDSPAPOS;
- u32 *dspsurf_val = &regs->saveDSPASURF;
- u32 *mipi_val = &regs->saveMIPI;
- u32 *dspcntr_val = &regs->saveDSPACNTR;
- u32 *dspstatus_val = &regs->saveDSPASTATUS;
- u32 *palette_val = regs->save_palette_a;
-
- switch (pipe) {
+
+ switch (pipenum) {
case 0:
+ mipi_val = &regs->saveMIPI;
break;
case 1:
- /* regester */
- dpll_reg = MDFLD_DPLL_B;
- fp_reg = MDFLD_DPLL_DIV0;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipesrc_reg = PIPEBSRC;
- dspstride_reg = DSPBSTRIDE;
- dsplinoff_reg = DSPBLINOFF;
- dsptileoff_reg = DSPBTILEOFF;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- dspsurf_reg = DSPBSURF;
- dspcntr_reg = DSPBCNTR;
- dspstatus_reg = PIPEBSTAT;
- palette_reg = PALETTE_B;
-
- /* values */
- dpll_val = &regs->saveDPLL_B;
- fp_val = &regs->saveFPB0;
- pipeconf_val = &regs->savePIPEBCONF;
- htot_val = &regs->saveHTOTAL_B;
- hblank_val = &regs->saveHBLANK_B;
- hsync_val = &regs->saveHSYNC_B;
- vtot_val = &regs->saveVTOTAL_B;
- vblank_val = &regs->saveVBLANK_B;
- vsync_val = &regs->saveVSYNC_B;
- pipesrc_val = &regs->savePIPEBSRC;
- dspstride_val = &regs->saveDSPBSTRIDE;
- dsplinoff_val = &regs->saveDSPBLINOFF;
- dsptileoff_val = &regs->saveDSPBTILEOFF;
- dspsize_val = &regs->saveDSPBSIZE;
- dsppos_val = &regs->saveDSPBPOS;
- dspsurf_val = &regs->saveDSPBSURF;
- dspcntr_val = &regs->saveDSPBCNTR;
- dspstatus_val = &regs->saveDSPBSTATUS;
- palette_val = regs->save_palette_b;
+ mipi_val = &regs->saveMIPI;
break;
case 2:
/* register */
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- pipesrc_reg = PIPECSRC;
- dspstride_reg = DSPCSTRIDE;
- dsplinoff_reg = DSPCLINOFF;
- dsptileoff_reg = DSPCTILEOFF;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- dspsurf_reg = DSPCSURF;
mipi_reg = MIPI_C;
- dspcntr_reg = DSPCCNTR;
- dspstatus_reg = PIPECSTAT;
- palette_reg = PALETTE_C;
-
/* pointer to values */
- pipeconf_val = &regs->savePIPECCONF;
- htot_val = &regs->saveHTOTAL_C;
- hblank_val = &regs->saveHBLANK_C;
- hsync_val = &regs->saveHSYNC_C;
- vtot_val = &regs->saveVTOTAL_C;
- vblank_val = &regs->saveVBLANK_C;
- vsync_val = &regs->saveVSYNC_C;
- pipesrc_val = &regs->savePIPECSRC;
- dspstride_val = &regs->saveDSPCSTRIDE;
- dsplinoff_val = &regs->saveDSPCLINOFF;
- dsptileoff_val = &regs->saveDSPCTILEOFF;
- dspsize_val = &regs->saveDSPCSIZE;
- dsppos_val = &regs->saveDSPCPOS;
- dspsurf_val = &regs->saveDSPCSURF;
mipi_val = &regs->saveMIPI_C;
- dspcntr_val = &regs->saveDSPCCNTR;
- dspstatus_val = &regs->saveDSPCSTATUS;
- palette_val = regs->save_palette_c;
break;
default:
DRM_ERROR("%s, invalid pipe number.\n", __func__);
@@ -306,30 +194,30 @@ static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
}
/* Pipe & plane A info */
- *dpll_val = PSB_RVDC32(dpll_reg);
- *fp_val = PSB_RVDC32(fp_reg);
- *pipeconf_val = PSB_RVDC32(pipeconf_reg);
- *htot_val = PSB_RVDC32(htot_reg);
- *hblank_val = PSB_RVDC32(hblank_reg);
- *hsync_val = PSB_RVDC32(hsync_reg);
- *vtot_val = PSB_RVDC32(vtot_reg);
- *vblank_val = PSB_RVDC32(vblank_reg);
- *vsync_val = PSB_RVDC32(vsync_reg);
- *pipesrc_val = PSB_RVDC32(pipesrc_reg);
- *dspstride_val = PSB_RVDC32(dspstride_reg);
- *dsplinoff_val = PSB_RVDC32(dsplinoff_reg);
- *dsptileoff_val = PSB_RVDC32(dsptileoff_reg);
- *dspsize_val = PSB_RVDC32(dspsize_reg);
- *dsppos_val = PSB_RVDC32(dsppos_reg);
- *dspsurf_val = PSB_RVDC32(dspsurf_reg);
- *dspcntr_val = PSB_RVDC32(dspcntr_reg);
- *dspstatus_val = PSB_RVDC32(dspstatus_reg);
+ pipe->dpll = PSB_RVDC32(map->dpll);
+ pipe->fp0 = PSB_RVDC32(map->fp0);
+ pipe->conf = PSB_RVDC32(map->conf);
+ pipe->htotal = PSB_RVDC32(map->htotal);
+ pipe->hblank = PSB_RVDC32(map->hblank);
+ pipe->hsync = PSB_RVDC32(map->hsync);
+ pipe->vtotal = PSB_RVDC32(map->vtotal);
+ pipe->vblank = PSB_RVDC32(map->vblank);
+ pipe->vsync = PSB_RVDC32(map->vsync);
+ pipe->src = PSB_RVDC32(map->src);
+ pipe->stride = PSB_RVDC32(map->stride);
+ pipe->linoff = PSB_RVDC32(map->linoff);
+ pipe->tileoff = PSB_RVDC32(map->tileoff);
+ pipe->size = PSB_RVDC32(map->size);
+ pipe->pos = PSB_RVDC32(map->pos);
+ pipe->surf = PSB_RVDC32(map->surf);
+ pipe->cntr = PSB_RVDC32(map->cntr);
+ pipe->status = PSB_RVDC32(map->status);
/*save palette (gamma) */
for (i = 0; i < 256; i++)
- palette_val[i] = PSB_RVDC32(palette_reg + (i << 2));
+ pipe->palette[i] = PSB_RVDC32(map->palette + (i << 2));
- if (pipe == 1) {
+ if (pipenum == 1) {
regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
@@ -349,7 +237,7 @@ static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
*
* Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
*/
-static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
+static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum)
{
/* To get panel out of ULPS mode. */
u32 temp = 0;
@@ -357,142 +245,30 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
struct drm_psb_private *dev_priv = dev->dev_private;
struct mdfld_dsi_config *dsi_config = NULL;
struct medfield_state *regs = &dev_priv->regs.mdfld;
- u32 i = 0;
- u32 dpll = 0;
+ struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+ const struct psb_offset *map = &dev_priv->regmap[pipenum];
+ u32 i;
+ u32 dpll;
u32 timeout = 0;
- /* regester */
- u32 dpll_reg = MRST_DPLL_A;
- u32 fp_reg = MRST_FPA0;
- u32 pipeconf_reg = PIPEACONF;
- u32 htot_reg = HTOTAL_A;
- u32 hblank_reg = HBLANK_A;
- u32 hsync_reg = HSYNC_A;
- u32 vtot_reg = VTOTAL_A;
- u32 vblank_reg = VBLANK_A;
- u32 vsync_reg = VSYNC_A;
- u32 pipesrc_reg = PIPEASRC;
- u32 dspstride_reg = DSPASTRIDE;
- u32 dsplinoff_reg = DSPALINOFF;
- u32 dsptileoff_reg = DSPATILEOFF;
- u32 dspsize_reg = DSPASIZE;
- u32 dsppos_reg = DSPAPOS;
- u32 dspsurf_reg = DSPASURF;
- u32 dspstatus_reg = PIPEASTAT;
+ /* register */
u32 mipi_reg = MIPI;
- u32 dspcntr_reg = DSPACNTR;
- u32 palette_reg = PALETTE_A;
/* values */
- u32 dpll_val = regs->saveDPLL_A & ~DPLL_VCO_ENABLE;
- u32 fp_val = regs->saveFPA0;
- u32 pipeconf_val = regs->savePIPEACONF;
- u32 htot_val = regs->saveHTOTAL_A;
- u32 hblank_val = regs->saveHBLANK_A;
- u32 hsync_val = regs->saveHSYNC_A;
- u32 vtot_val = regs->saveVTOTAL_A;
- u32 vblank_val = regs->saveVBLANK_A;
- u32 vsync_val = regs->saveVSYNC_A;
- u32 pipesrc_val = regs->savePIPEASRC;
- u32 dspstride_val = regs->saveDSPASTRIDE;
- u32 dsplinoff_val = regs->saveDSPALINOFF;
- u32 dsptileoff_val = regs->saveDSPATILEOFF;
- u32 dspsize_val = regs->saveDSPASIZE;
- u32 dsppos_val = regs->saveDSPAPOS;
- u32 dspsurf_val = regs->saveDSPASURF;
- u32 dspstatus_val = regs->saveDSPASTATUS;
+ u32 dpll_val = pipe->dpll;
u32 mipi_val = regs->saveMIPI;
- u32 dspcntr_val = regs->saveDSPACNTR;
- u32 *palette_val = regs->save_palette_a;
- switch (pipe) {
+ switch (pipenum) {
case 0:
+ dpll_val &= ~DPLL_VCO_ENABLE;
dsi_config = dev_priv->dsi_configs[0];
break;
case 1:
- /* regester */
- dpll_reg = MDFLD_DPLL_B;
- fp_reg = MDFLD_DPLL_DIV0;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipesrc_reg = PIPEBSRC;
- dspstride_reg = DSPBSTRIDE;
- dsplinoff_reg = DSPBLINOFF;
- dsptileoff_reg = DSPBTILEOFF;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- dspsurf_reg = DSPBSURF;
- dspcntr_reg = DSPBCNTR;
- dspstatus_reg = PIPEBSTAT;
- palette_reg = PALETTE_B;
-
- /* values */
- dpll_val = regs->saveDPLL_B & ~DPLL_VCO_ENABLE;
- fp_val = regs->saveFPB0;
- pipeconf_val = regs->savePIPEBCONF;
- htot_val = regs->saveHTOTAL_B;
- hblank_val = regs->saveHBLANK_B;
- hsync_val = regs->saveHSYNC_B;
- vtot_val = regs->saveVTOTAL_B;
- vblank_val = regs->saveVBLANK_B;
- vsync_val = regs->saveVSYNC_B;
- pipesrc_val = regs->savePIPEBSRC;
- dspstride_val = regs->saveDSPBSTRIDE;
- dsplinoff_val = regs->saveDSPBLINOFF;
- dsptileoff_val = regs->saveDSPBTILEOFF;
- dspsize_val = regs->saveDSPBSIZE;
- dsppos_val = regs->saveDSPBPOS;
- dspsurf_val = regs->saveDSPBSURF;
- dspcntr_val = regs->saveDSPBCNTR;
- dspstatus_val = regs->saveDSPBSTATUS;
- palette_val = regs->save_palette_b;
+ dpll_val &= ~DPLL_VCO_ENABLE;
break;
case 2:
- /* regester */
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- pipesrc_reg = PIPECSRC;
- dspstride_reg = DSPCSTRIDE;
- dsplinoff_reg = DSPCLINOFF;
- dsptileoff_reg = DSPCTILEOFF;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- dspsurf_reg = DSPCSURF;
mipi_reg = MIPI_C;
- dspcntr_reg = DSPCCNTR;
- dspstatus_reg = PIPECSTAT;
- palette_reg = PALETTE_C;
-
- /* values */
- pipeconf_val = regs->savePIPECCONF;
- htot_val = regs->saveHTOTAL_C;
- hblank_val = regs->saveHBLANK_C;
- hsync_val = regs->saveHSYNC_C;
- vtot_val = regs->saveVTOTAL_C;
- vblank_val = regs->saveVBLANK_C;
- vsync_val = regs->saveVSYNC_C;
- pipesrc_val = regs->savePIPECSRC;
- dspstride_val = regs->saveDSPCSTRIDE;
- dsplinoff_val = regs->saveDSPCLINOFF;
- dsptileoff_val = regs->saveDSPCTILEOFF;
- dspsize_val = regs->saveDSPCSIZE;
- dsppos_val = regs->saveDSPCPOS;
- dspsurf_val = regs->saveDSPCSURF;
mipi_val = regs->saveMIPI_C;
- dspcntr_val = regs->saveDSPCCNTR;
- dspstatus_val = regs->saveDSPCSTATUS;
- palette_val = regs->save_palette_c;
-
dsi_config = dev_priv->dsi_configs[1];
break;
default:
@@ -503,14 +279,14 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
/*make sure VGA plane is off. it initializes to on after reset!*/
PSB_WVDC32(0x80000000, VGACNTRL);
- if (pipe == 1) {
- PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg);
- PSB_RVDC32(dpll_reg);
+ if (pipenum == 1) {
+ PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, map->dpll);
+ PSB_RVDC32(map->dpll);
- PSB_WVDC32(fp_val, fp_reg);
+ PSB_WVDC32(pipe->fp0, map->fp0);
} else {
- dpll = PSB_RVDC32(dpll_reg);
+ dpll = PSB_RVDC32(map->dpll);
if (!(dpll & DPLL_VCO_ENABLE)) {
@@ -518,23 +294,23 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
before enable the VCO */
if (dpll & MDFLD_PWR_GATE_EN) {
dpll &= ~MDFLD_PWR_GATE_EN;
- PSB_WVDC32(dpll, dpll_reg);
+ PSB_WVDC32(dpll, map->dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
- PSB_WVDC32(fp_val, fp_reg);
- PSB_WVDC32(dpll_val, dpll_reg);
+ PSB_WVDC32(pipe->fp0, map->fp0);
+ PSB_WVDC32(dpll_val, map->dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
dpll_val |= DPLL_VCO_ENABLE;
- PSB_WVDC32(dpll_val, dpll_reg);
- PSB_RVDC32(dpll_reg);
+ PSB_WVDC32(dpll_val, map->dpll);
+ PSB_RVDC32(map->dpll);
/* wait for DSI PLL to lock */
while (timeout < 20000 &&
- !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ !(PSB_RVDC32(map->conf) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
@@ -547,28 +323,28 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
}
}
/* Restore mode */
- PSB_WVDC32(htot_val, htot_reg);
- PSB_WVDC32(hblank_val, hblank_reg);
- PSB_WVDC32(hsync_val, hsync_reg);
- PSB_WVDC32(vtot_val, vtot_reg);
- PSB_WVDC32(vblank_val, vblank_reg);
- PSB_WVDC32(vsync_val, vsync_reg);
- PSB_WVDC32(pipesrc_val, pipesrc_reg);
- PSB_WVDC32(dspstatus_val, dspstatus_reg);
+ PSB_WVDC32(pipe->htotal, map->htotal);
+ PSB_WVDC32(pipe->hblank, map->hblank);
+ PSB_WVDC32(pipe->hsync, map->hsync);
+ PSB_WVDC32(pipe->vtotal, map->vtotal);
+ PSB_WVDC32(pipe->vblank, map->vblank);
+ PSB_WVDC32(pipe->vsync, map->vsync);
+ PSB_WVDC32(pipe->src, map->src);
+ PSB_WVDC32(pipe->status, map->status);
/*set up the plane*/
- PSB_WVDC32(dspstride_val, dspstride_reg);
- PSB_WVDC32(dsplinoff_val, dsplinoff_reg);
- PSB_WVDC32(dsptileoff_val, dsptileoff_reg);
- PSB_WVDC32(dspsize_val, dspsize_reg);
- PSB_WVDC32(dsppos_val, dsppos_reg);
- PSB_WVDC32(dspsurf_val, dspsurf_reg);
-
- if (pipe == 1) {
+ PSB_WVDC32(pipe->stride, map->stride);
+ PSB_WVDC32(pipe->linoff, map->linoff);
+ PSB_WVDC32(pipe->tileoff, map->tileoff);
+ PSB_WVDC32(pipe->size, map->size);
+ PSB_WVDC32(pipe->pos, map->pos);
+ PSB_WVDC32(pipe->surf, map->surf);
+
+ if (pipenum == 1) {
/* restore palette (gamma) */
/*DRM_UDELAY(50000); */
for (i = 0; i < 256; i++)
- PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
+ PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL);
PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
@@ -578,7 +354,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
/*TODO: resume pipe*/
/*enable the plane*/
- PSB_WVDC32(dspcntr_val & ~DISPLAY_PLANE_ENABLE, dspcntr_reg);
+ PSB_WVDC32(pipe->cntr & ~DISPLAY_PLANE_ENABLE, map->cntr);
return 0;
}
@@ -588,7 +364,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
/*setup MIPI adapter + MIPI IP registers*/
if (dsi_config)
- mdfld_dsi_controller_init(dsi_config, pipe);
+ mdfld_dsi_controller_init(dsi_config, pipenum);
if (in_atomic() || in_interrupt())
mdelay(20);
@@ -596,7 +372,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
msleep(20);
/*enable the plane*/
- PSB_WVDC32(dspcntr_val, dspcntr_reg);
+ PSB_WVDC32(pipe->cntr, map->cntr);
if (in_atomic() || in_interrupt())
mdelay(20);
@@ -625,12 +401,12 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
mdelay(1);
/*enable the pipe*/
- PSB_WVDC32(pipeconf_val, pipeconf_reg);
+ PSB_WVDC32(pipe->conf, map->conf);
/* restore palette (gamma) */
/*DRM_UDELAY(50000); */
for (i = 0; i < 256; i++)
- PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
+ PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
return 0;
}
@@ -667,14 +443,98 @@ static int mdfld_power_up(struct drm_device *dev)
return 0;
}
+/* Medfield */
+static const struct psb_offset mdfld_regmap[3] = {
+ {
+ .fp0 = MRST_FPA0,
+ .fp1 = MRST_FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .surf = DSPASURF,
+ .addr = MRST_DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = MDFLD_DPLL_DIV0,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = MDFLD_DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .surf = DSPBSURF,
+ .addr = MRST_DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ },
+ {
+ .fp0 = MRST_FPA0, /* This is what the old code did ?? */
+ .cntr = DSPCCNTR,
+ .conf = PIPECCONF,
+ .src = PIPECSRC,
+ /* No DPLL_C */
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_C,
+ .hblank = HBLANK_C,
+ .hsync = HSYNC_C,
+ .vtotal = VTOTAL_C,
+ .vblank = VBLANK_C,
+ .vsync = VSYNC_C,
+ .stride = DSPCSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPCPOS,
+ .surf = DSPCSURF,
+ .addr = MDFLD_DSPCBASE,
+ .status = PIPECSTAT,
+ .linoff = DSPCLINOFF,
+ .tileoff = DSPCTILEOFF,
+ .palette = PALETTE_C,
+ },
+};
+
+static int mdfld_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->regmap = mdfld_regmap;
+ return mid_chip_setup(dev);
+}
+
const struct psb_ops mdfld_chip_ops = {
.name = "mdfld",
.accel_2d = 0,
.pipes = 3,
.crtcs = 3,
+ .lvds_mask = (1 << 1),
+ .hdmi_mask = (1 << 1),
+ .cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
- .chip_setup = mid_chip_setup,
+ .chip_setup = mdfld_chip_setup,
.crtc_helper = &mdfld_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d52358b744a0..b34ff097b979 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -869,7 +869,6 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
mdfld_set_pipe_timing(dsi_config, pipe);
REG_WRITE(DSPABASE, 0x00);
- REG_WRITE(DSPASTRIDE, (mode->hdisplay * 4));
REG_WRITE(DSPASIZE,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index baa0e14165e0..489ffd2c66e5 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -605,6 +605,8 @@ int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
struct mdfld_dsi_config *dsi_config =
mdfld_dsi_get_config(dsi_connector);
struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 mipi_val = 0;
if (!dsi_connector) {
@@ -632,21 +634,13 @@ int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
/*init regs*/
- if (pipe == 0) {
- pkg_sender->dpll_reg = MRST_DPLL_A;
- pkg_sender->dspcntr_reg = DSPACNTR;
- pkg_sender->pipeconf_reg = PIPEACONF;
- pkg_sender->dsplinoff_reg = DSPALINOFF;
- pkg_sender->dspsurf_reg = DSPASURF;
- pkg_sender->pipestat_reg = PIPEASTAT;
- } else if (pipe == 2) {
- pkg_sender->dpll_reg = MRST_DPLL_A;
- pkg_sender->dspcntr_reg = DSPCCNTR;
- pkg_sender->pipeconf_reg = PIPECCONF;
- pkg_sender->dsplinoff_reg = DSPCLINOFF;
- pkg_sender->dspsurf_reg = DSPCSURF;
- pkg_sender->pipestat_reg = PIPECSTAT;
- }
+ /* FIXME: should just copy the regmap ptr ? */
+ pkg_sender->dpll_reg = map->dpll;
+ pkg_sender->dspcntr_reg = map->cntr;
+ pkg_sender->pipeconf_reg = map->conf;
+ pkg_sender->dsplinoff_reg = map->linoff;
+ pkg_sender->dspsurf_reg = map->surf;
+ pkg_sender->pipestat_reg = map->status;
pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index a35a2921bdf7..3f3cd619c79f 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -50,17 +50,14 @@ struct mrst_clock_t {
void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int count, temp;
- u32 pipeconf_reg = PIPEACONF;
switch (pipe) {
case 0:
- break;
case 1:
- pipeconf_reg = PIPEBCONF;
- break;
case 2:
- pipeconf_reg = PIPECCONF;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
@@ -73,7 +70,7 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
/* Wait for for the pipe disable to take effect. */
for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_PIPE_STATE) == 0)
break;
}
@@ -81,17 +78,14 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int count, temp;
- u32 pipeconf_reg = PIPEACONF;
switch (pipe) {
case 0:
- break;
case 1:
- pipeconf_reg = PIPEBCONF;
- break;
case 2:
- pipeconf_reg = PIPECCONF;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
@@ -104,7 +98,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
/* Wait for for the pipe enable to take effect. */
for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_PIPE_STATE) == 1)
break;
}
@@ -189,15 +183,12 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dsplinoff = DSPALINOFF;
- int dspsurf = DSPASURF;
- int dspstride = DSPASTRIDE;
- int dspcntr_reg = DSPACNTR;
u32 dspcntr;
int ret;
@@ -215,23 +206,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (ret)
return ret;
- switch (pipe) {
- case 0:
- dsplinoff = DSPALINOFF;
- break;
- case 1:
- dsplinoff = DSPBLINOFF;
- dspsurf = DSPBSURF;
- dspstride = DSPBSTRIDE;
- dspcntr_reg = DSPBCNTR;
- break;
- case 2:
- dsplinoff = DSPCLINOFF;
- dspsurf = DSPCSURF;
- dspstride = DSPCSTRIDE;
- dspcntr_reg = DSPCCNTR;
- break;
- default:
+ if (pipe > 2) {
DRM_ERROR("Illegal Pipe Number.\n");
return -EINVAL;
}
@@ -242,8 +217,8 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -261,14 +236,14 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
}
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
start, offset, x, y);
- REG_WRITE(dsplinoff, offset);
- REG_READ(dsplinoff);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
+ REG_WRITE(map->linoff, offset);
+ REG_READ(map->linoff);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
gma_power_end(dev);
@@ -281,78 +256,56 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
*/
void mdfld_disable_crtc(struct drm_device *dev, int pipe)
{
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int dspbase_reg = MRST_DSPABASE;
- int pipeconf_reg = PIPEACONF;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
dev_dbg(dev->dev, "pipe = %d\n", pipe);
- switch (pipe) {
- case 0:
- break;
- case 1:
- dpll_reg = MDFLD_DPLL_B;
- dspcntr_reg = DSPBCNTR;
- dspbase_reg = DSPBSURF;
- pipeconf_reg = PIPEBCONF;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- dspbase_reg = MDFLD_DSPCBASE;
- pipeconf_reg = PIPECCONF;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return;
- }
-
if (pipe != 1)
mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* FIXME_JLIU7 MDFLD_PO revisit */
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
temp &= ~PIPEACONF_ENABLE;
temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(pipeconf_reg, temp);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp);
+ REG_READ(map->conf);
/* Wait for for the pipe disable to take effect. */
mdfldWaitForPipeDisable(dev, pipe);
}
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if (temp & DPLL_VCO_ENABLE) {
if ((pipe != 1 &&
!((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
& PIPEACONF_ENABLE)) || pipe == 1) {
temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to turn off. */
/* FIXME_MDFLD PO may need more delay */
udelay(500);
if (!(temp & MDFLD_PWR_GATE_EN)) {
/* gating power of DPLL */
- REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
+ REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(5000);
}
@@ -373,41 +326,15 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int dspbase_reg = MRST_DSPABASE;
- int pipeconf_reg = PIPEACONF;
- u32 pipestat_reg = PIPEASTAT;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 pipeconf = dev_priv->pipeconf[pipe];
u32 temp;
int timeout = 0;
dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
-/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
-/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
-
- switch (pipe) {
- case 0:
- break;
- case 1:
- dpll_reg = DPLL_B;
- dspcntr_reg = DSPBCNTR;
- dspbase_reg = MRST_DSPBBASE;
- pipeconf_reg = PIPEBCONF;
- dpll_reg = MDFLD_DPLL_B;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- dspbase_reg = MDFLD_DSPCBASE;
- pipeconf_reg = PIPECCONF;
- pipestat_reg = PIPECSTAT;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return;
- }
+ /* Note: Old code uses pipe a stat for pipe b but that appears
+ to be a bug */
if (!gma_power_begin(dev, true))
return;
@@ -420,25 +347,25 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
/* When ungating power of DPLL, needs to wait 0.5us
before enable the VCO */
if (temp & MDFLD_PWR_GATE_EN) {
temp &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(dpll_reg, temp);
+ REG_WRITE(map->dpll, temp);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/**
* wait for DSI PLL to lock
@@ -446,25 +373,25 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
* since both MIPI pipes share the same PLL.
*/
while ((pipe != 2) && (timeout < 20000) &&
- !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
}
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0) {
- REG_WRITE(pipeconf_reg, pipeconf);
+ REG_WRITE(map->conf, pipeconf);
/* Wait for for the pipe enable to take effect. */
mdfldWaitForPipeEnable(dev, pipe);
@@ -473,39 +400,39 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
/*workaround for sighting 3741701 Random X blank display*/
/*perform w/a in video mode only on pipe A or C*/
if (pipe == 0 || pipe == 2) {
- REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
+ REG_WRITE(map->status, REG_READ(map->status));
msleep(100);
- if (PIPE_VBLANK_STATUS & REG_READ(pipestat_reg))
+ if (PIPE_VBLANK_STATUS & REG_READ(map->status))
dev_dbg(dev->dev, "OK");
else {
dev_dbg(dev->dev, "STUCK!!!!");
/*shutdown controller*/
- temp = REG_READ(dspcntr_reg);
- REG_WRITE(dspcntr_reg,
+ temp = REG_READ(map->cntr);
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
/*mdfld_dsi_dpi_shut_down(dev, pipe);*/
REG_WRITE(0xb048, 1);
msleep(100);
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
temp &= ~PIPEACONF_ENABLE;
- REG_WRITE(pipeconf_reg, temp);
+ REG_WRITE(map->conf, temp);
msleep(100); /*wait for pipe disable*/
REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
msleep(100);
REG_WRITE(0xb004, REG_READ(0xb004));
/* try to bring the controller back up again*/
REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
- temp = REG_READ(dspcntr_reg);
- REG_WRITE(dspcntr_reg,
+ temp = REG_READ(map->cntr);
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
/*mdfld_dsi_dpi_turn_on(dev, pipe);*/
REG_WRITE(0xb048, 2);
msleep(100);
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
temp |= PIPEACONF_ENABLE;
- REG_WRITE(pipeconf_reg, temp);
+ REG_WRITE(map->conf, temp);
}
}
@@ -529,35 +456,35 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
temp &= ~PIPEACONF_ENABLE;
temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(pipeconf_reg, temp);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp);
+ REG_READ(map->conf);
/* Wait for for the pipe disable to take effect. */
mdfldWaitForPipeDisable(dev, pipe);
}
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if (temp & DPLL_VCO_ENABLE) {
if ((pipe != 1 && !((REG_READ(PIPEACONF)
| REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
|| pipe == 1) {
temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to turn off. */
/* FIXME_MDFLD PO may need more delay */
udelay(500);
@@ -764,21 +691,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
- int fp_reg = MRST_FPA0;
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int pipeconf_reg = PIPEACONF;
- int htot_reg = HTOTAL_A;
- int hblank_reg = HBLANK_A;
- int hsync_reg = HSYNC_A;
- int vtot_reg = VTOTAL_A;
- int vblank_reg = VBLANK_A;
- int vsync_reg = VSYNC_A;
- int dspsize_reg = DSPASIZE;
- int dsppos_reg = DSPAPOS;
- int pipesrc_reg = PIPEASRC;
- u32 *pipeconf = &dev_priv->pipeconf[pipe];
- u32 *dspcntr = &dev_priv->dspcntr[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
clk_tmp = 0;
@@ -806,45 +719,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
}
#endif
- switch (pipe) {
- case 0:
- break;
- case 1:
- fp_reg = FPB0;
- dpll_reg = DPLL_B;
- dspcntr_reg = DSPBCNTR;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- pipesrc_reg = PIPEBSRC;
- fp_reg = MDFLD_DPLL_DIV0;
- dpll_reg = MDFLD_DPLL_B;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- pipesrc_reg = PIPECSRC;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return 0;
- }
-
ret = check_fb(crtc->fb);
if (ret)
return ret;
@@ -929,21 +803,21 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
* contained within the displayable area of the screen image
* (frame buffer).
*/
- REG_WRITE(dspsize_reg, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
+ REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
| (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
/* Set the CRTC with encoder mode. */
- REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16)
+ REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16)
| (mode->crtc_vdisplay - 1));
} else {
- REG_WRITE(dspsize_reg,
+ REG_WRITE(map->size,
((mode->crtc_vdisplay - 1) << 16) |
(mode->crtc_hdisplay - 1));
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->src,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
}
- REG_WRITE(dsppos_reg, 0);
+ REG_WRITE(map->pos, 0);
if (psb_intel_encoder)
drm_connector_property_get_value(connector,
@@ -961,34 +835,34 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
offsetY = (adjusted_mode->crtc_vdisplay -
mode->crtc_vdisplay) / 2;
- REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start -
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start -
offsetX - 1) |
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start -
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start -
offsetX - 1) |
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start -
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start -
offsetY - 1) |
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start -
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start -
offsetY - 1) |
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
} else {
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
}
@@ -1000,12 +874,12 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
}
/* setup pipeconf */
- *pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
+ dev_priv->pipeconf[pipe] = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
/* Set up the display plane register */
- *dspcntr = REG_READ(dspcntr_reg);
- *dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
- *dspcntr |= DISPLAY_PLANE_ENABLE;
+ dev_priv->dspcntr[pipe] = REG_READ(map->cntr);
+ dev_priv->dspcntr[pipe] |= pipe << DISPPLANE_SEL_PIPE_POS;
+ dev_priv->dspcntr[pipe] |= DISPLAY_PLANE_ENABLE;
if (is_mipi2)
goto mrst_crtc_mode_set_exit;
@@ -1070,21 +944,21 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
clock.p1, m_conv);
}
- dpll = REG_READ(dpll_reg);
+ dpll = REG_READ(map->dpll);
if (dpll & DPLL_VCO_ENABLE) {
dpll &= ~DPLL_VCO_ENABLE;
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
/* reset M1, N1 & P1 */
- REG_WRITE(fp_reg, 0);
+ REG_WRITE(map->fp0, 0);
dpll &= ~MDFLD_P1_MASK;
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->dpll, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
@@ -1093,7 +967,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
* enable the VCO */
if (dpll & MDFLD_PWR_GATE_EN) {
dpll &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->dpll, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
@@ -1134,18 +1008,18 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
fp = 0x000000c1;
}
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
dpll |= DPLL_VCO_ENABLE;
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* wait for DSI PLL to lock */
while (timeout < 20000 &&
- !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
@@ -1155,11 +1029,11 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
- REG_WRITE(pipeconf_reg, *pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, dev_priv->pipeconf[pipe]);
+ REG_READ(map->conf);
/* Wait for for the pipe enable to take effect. */
- REG_WRITE(dspcntr_reg, *dspcntr);
+ REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
psb_intel_wait_for_vblank(dev);
mrst_crtc_mode_set_exit:
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 5eee9ad80da4..b2a790bd9899 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -118,139 +118,214 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
dev_priv->platform_rev_id);
}
+struct vbt_header {
+ u32 signature;
+ u8 revision;
+} __packed;
+
+/* The same for r0 and r1 */
+struct vbt_r0 {
+ struct vbt_header vbt_header;
+ u8 size;
+ u8 checksum;
+} __packed;
+
+struct vbt_r10 {
+ struct vbt_header vbt_header;
+ u8 checksum;
+ u16 size;
+ u8 panel_count;
+ u8 primary_panel_idx;
+ u8 secondary_panel_idx;
+ u8 __reserved[5];
+} __packed;
+
+static int read_vbt_r0(u32 addr, struct vbt_r0 *vbt)
+{
+ void __iomem *vbt_virtual;
+
+ vbt_virtual = ioremap(addr, sizeof(*vbt));
+ if (vbt_virtual == NULL)
+ return -1;
+
+ memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
+ iounmap(vbt_virtual);
+
+ return 0;
+}
+
+static int read_vbt_r10(u32 addr, struct vbt_r10 *vbt)
+{
+ void __iomem *vbt_virtual;
+
+ vbt_virtual = ioremap(addr, sizeof(*vbt));
+ if (!vbt_virtual)
+ return -1;
+
+ memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
+ iounmap(vbt_virtual);
+
+ return 0;
+}
+
+static int mid_get_vbt_data_r0(struct drm_psb_private *dev_priv, u32 addr)
+{
+ struct vbt_r0 vbt;
+ void __iomem *gct_virtual;
+ struct gct_r0 gct;
+ u8 bpi;
+
+ if (read_vbt_r0(addr, &vbt))
+ return -1;
+
+ gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
+ if (!gct_virtual)
+ return -1;
+ memcpy_fromio(&gct, gct_virtual, sizeof(gct));
+ iounmap(gct_virtual);
+
+ bpi = gct.PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt = gct.PD.PanelType;
+ dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
+ dev_priv->gct_data.Panel_Port_Control =
+ gct.panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ gct.panel[bpi].Panel_MIPI_Display_Descriptor;
+
+ return 0;
+}
+
+static int mid_get_vbt_data_r1(struct drm_psb_private *dev_priv, u32 addr)
+{
+ struct vbt_r0 vbt;
+ void __iomem *gct_virtual;
+ struct gct_r1 gct;
+ u8 bpi;
+
+ if (read_vbt_r0(addr, &vbt))
+ return -1;
+
+ gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
+ if (!gct_virtual)
+ return -1;
+ memcpy_fromio(&gct, gct_virtual, sizeof(gct));
+ iounmap(gct_virtual);
+
+ bpi = gct.PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt = gct.PD.PanelType;
+ dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
+ dev_priv->gct_data.Panel_Port_Control =
+ gct.panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ gct.panel[bpi].Panel_MIPI_Display_Descriptor;
+
+ return 0;
+}
+
+static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
+{
+ struct vbt_r10 vbt;
+ void __iomem *gct_virtual;
+ struct gct_r10 *gct;
+ struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
+ struct gct_r10_timing_info *ti;
+ int ret = -1;
+
+ if (read_vbt_r10(addr, &vbt))
+ return -1;
+
+ gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL);
+ if (!gct)
+ return -1;
+
+ gct_virtual = ioremap(addr + sizeof(vbt),
+ sizeof(*gct) * vbt.panel_count);
+ if (!gct_virtual)
+ goto out;
+ memcpy_fromio(gct, gct_virtual, sizeof(*gct));
+ iounmap(gct_virtual);
+
+ dev_priv->gct_data.bpi = vbt.primary_panel_idx;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ gct[vbt.primary_panel_idx].Panel_MIPI_Display_Descriptor;
+
+ ti = &gct[vbt.primary_panel_idx].DTD;
+ dp_ti->pixel_clock = ti->pixel_clock;
+ dp_ti->hactive_hi = ti->hactive_hi;
+ dp_ti->hactive_lo = ti->hactive_lo;
+ dp_ti->hblank_hi = ti->hblank_hi;
+ dp_ti->hblank_lo = ti->hblank_lo;
+ dp_ti->hsync_offset_hi = ti->hsync_offset_hi;
+ dp_ti->hsync_offset_lo = ti->hsync_offset_lo;
+ dp_ti->hsync_pulse_width_hi = ti->hsync_pulse_width_hi;
+ dp_ti->hsync_pulse_width_lo = ti->hsync_pulse_width_lo;
+ dp_ti->vactive_hi = ti->vactive_hi;
+ dp_ti->vactive_lo = ti->vactive_lo;
+ dp_ti->vblank_hi = ti->vblank_hi;
+ dp_ti->vblank_lo = ti->vblank_lo;
+ dp_ti->vsync_offset_hi = ti->vsync_offset_hi;
+ dp_ti->vsync_offset_lo = ti->vsync_offset_lo;
+ dp_ti->vsync_pulse_width_hi = ti->vsync_pulse_width_hi;
+ dp_ti->vsync_pulse_width_lo = ti->vsync_pulse_width_lo;
+
+ ret = 0;
+out:
+ kfree(gct);
+ return ret;
+}
+
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
u32 addr;
- u16 new_size;
- u8 *vbt_virtual;
- u8 bpi;
- u8 number_desc = 0;
- struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
- struct gct_r10_timing_info ti;
- void *pGCT;
+ u8 __iomem *vbt_virtual;
+ struct vbt_header vbt_header;
struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+ int ret = -1;
- /* Get the address of the platform config vbt, B0:D2:F0;0xFC */
+ /* Get the address of the platform config vbt */
pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
pci_dev_put(pci_gfx_root);
dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
- /* check for platform config address == 0. */
- /* this means fw doesn't support vbt */
-
- if (addr == 0) {
- vbt->size = 0;
- return;
- }
+ if (!addr)
+ goto out;
/* get the virtual address of the vbt */
- vbt_virtual = ioremap(addr, sizeof(*vbt));
- if (vbt_virtual == NULL) {
- vbt->size = 0;
- return;
- }
+ vbt_virtual = ioremap(addr, sizeof(vbt_header));
+ if (!vbt_virtual)
+ goto out;
- memcpy(vbt, vbt_virtual, sizeof(*vbt));
- iounmap(vbt_virtual); /* Free virtual address space */
+ memcpy_fromio(&vbt_header, vbt_virtual, sizeof(vbt_header));
+ iounmap(vbt_virtual);
- /* No matching signature don't process the data */
- if (memcmp(vbt->signature, "$GCT", 4)) {
- vbt->size = 0;
- return;
- }
+ if (memcmp(&vbt_header.signature, "$GCT", 4))
+ goto out;
+
+ dev_dbg(dev->dev, "GCT revision is %02x\n", vbt_header.revision);
- dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
-
- switch (vbt->revision) {
- case 0:
- vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
- vbt->size - sizeof(*vbt) + 4);
- pGCT = vbt->oaktrail_gct;
- bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
- dev_priv->gct_data.bpi = bpi;
- dev_priv->gct_data.pt =
- ((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
- memcpy(&dev_priv->gct_data.DTD,
- &((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
- sizeof(struct oaktrail_timing_info));
- dev_priv->gct_data.Panel_Port_Control =
- ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ switch (vbt_header.revision) {
+ case 0x00:
+ ret = mid_get_vbt_data_r0(dev_priv, addr);
break;
- case 1:
- vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
- vbt->size - sizeof(*vbt) + 4);
- pGCT = vbt->oaktrail_gct;
- bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
- dev_priv->gct_data.bpi = bpi;
- dev_priv->gct_data.pt =
- ((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
- memcpy(&dev_priv->gct_data.DTD,
- &((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
- sizeof(struct oaktrail_timing_info));
- dev_priv->gct_data.Panel_Port_Control =
- ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ case 0x01:
+ ret = mid_get_vbt_data_r1(dev_priv, addr);
break;
case 0x10:
- /*header definition changed from rev 01 (v2) to rev 10h. */
- /*so, some values have changed location*/
- new_size = vbt->checksum; /*checksum contains lo size byte*/
- /*LSB of oaktrail_gct contains hi size byte*/
- new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;
-
- vbt->checksum = vbt->size; /*size contains the checksum*/
- if (new_size > 0xff)
- vbt->size = 0xff; /*restrict size to 255*/
- else
- vbt->size = new_size;
-
- /* number of descriptors defined in the GCT */
- number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
- bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
- vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
- GCT_R10_DISPLAY_DESC_SIZE * number_desc);
- pGCT = vbt->oaktrail_gct;
- pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
- dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
-
- /*copy the GCT display timings into a temp structure*/
- memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
-
- /*now copy the temp struct into the dev_priv->gct_data*/
- dp_ti->pixel_clock = ti.pixel_clock;
- dp_ti->hactive_hi = ti.hactive_hi;
- dp_ti->hactive_lo = ti.hactive_lo;
- dp_ti->hblank_hi = ti.hblank_hi;
- dp_ti->hblank_lo = ti.hblank_lo;
- dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
- dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
- dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
- dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
- dp_ti->vactive_hi = ti.vactive_hi;
- dp_ti->vactive_lo = ti.vactive_lo;
- dp_ti->vblank_hi = ti.vblank_hi;
- dp_ti->vblank_lo = ti.vblank_lo;
- dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
- dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
- dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
- dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
-
- /* Move the MIPI_Display_Descriptor data from GCT to dev priv */
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- *((u8 *)pGCT + 0x0d);
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
- (*((u8 *)pGCT + 0x0e)) << 8;
+ ret = mid_get_vbt_data_r10(dev_priv, addr);
break;
default:
dev_err(dev->dev, "Unknown revision of GCT!\n");
- vbt->size = 0;
}
+
+out:
+ if (ret)
+ dev_err(dev->dev, "Unable to read GCT!");
+ else
+ dev_priv->has_gct = true;
}
int mid_chip_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
index 2da1f368f14e..f2f9f38a5362 100644
--- a/drivers/gpu/drm/gma500/oaktrail.h
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -19,14 +19,6 @@
/* MID device specific descriptors */
-struct oaktrail_vbt {
- s8 signature[4]; /*4 bytes,"$GCT" */
- u8 revision;
- u8 size;
- u8 checksum;
- void *oaktrail_gct;
-} __packed;
-
struct oaktrail_timing_info {
u16 pixel_clock;
u8 hactive_lo;
@@ -161,7 +153,7 @@ union oaktrail_panel_rx {
u16 panel_receiver;
} __packed;
-struct oaktrail_gct_v1 {
+struct gct_r0 {
union { /*8 bits,Defined as follows: */
struct {
u8 PanelType:4; /*4 bits, Bit field for panels*/
@@ -178,7 +170,7 @@ struct oaktrail_gct_v1 {
union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
} __packed;
-struct oaktrail_gct_v2 {
+struct gct_r1 {
union { /*8 bits,Defined as follows: */
struct {
u8 PanelType:4; /*4 bits, Bit field for panels*/
@@ -195,6 +187,16 @@ struct oaktrail_gct_v2 {
union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
} __packed;
+struct gct_r10 {
+ struct gct_r10_timing_info DTD;
+ u16 Panel_MIPI_Display_Descriptor;
+ u16 Panel_MIPI_Receiver_Descriptor;
+ u16 Panel_Backlight_Inverter_Descriptor;
+ u8 Panel_Initial_Brightness;
+ u32 MIPI_Ctlr_Init_ptr;
+ u32 MIPI_Panel_Init_ptr;
+} __packed;
+
struct oaktrail_gct_data {
u8 bpi; /* boot panel index, number of panel used during boot */
u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
@@ -213,9 +215,6 @@ struct oaktrail_gct_data {
#define MODE_SETTING_IN_DSR 0x4
#define MODE_SETTING_ENCODER_DONE 0x8
-#define GCT_R10_HEADER_SIZE 16
-#define GCT_R10_DISPLAY_DESC_SIZE 28
-
/*
* Moorestown HDMI interfaces
*/
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index a39b0d0d680f..f821c835ca90 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -162,12 +162,10 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
if (!gma_power_begin(dev, true))
@@ -181,32 +179,32 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
psb_intel_crtc_load_lut(crtc);
@@ -223,28 +221,28 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
}
/* Wait for for the pipe disable to take effect. */
psb_intel_wait_for_vblank(dev);
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
}
/* Wait for the clocks to turn off. */
@@ -292,17 +290,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
- int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
- int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
struct oaktrail_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
@@ -350,7 +338,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
if (oaktrail_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->src,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
@@ -369,34 +357,34 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
offsetY = (adjusted_mode->crtc_vdisplay -
mode->crtc_vdisplay) / 2;
- REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg,
+ REG_WRITE(map->hblank,
(adjusted_mode->crtc_hblank_start - offsetX - 1) |
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
- REG_WRITE(hsync_reg,
+ REG_WRITE(map->hsync,
(adjusted_mode->crtc_hsync_start - offsetX - 1) |
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
- REG_WRITE(vblank_reg,
+ REG_WRITE(map->vblank,
(adjusted_mode->crtc_vblank_start - offsetY - 1) |
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
- REG_WRITE(vsync_reg,
+ REG_WRITE(map->vsync,
(adjusted_mode->crtc_vsync_start - offsetY - 1) |
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
} else {
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
}
@@ -408,10 +396,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
}
/* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
+ pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr |= DISPPLANE_GAMMA_ENABLE;
if (pipe == 0)
@@ -467,30 +455,30 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
mrstPrintPll("chosen", &clock);
if (dpll & DPLL_VCO_ENABLE) {
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Check the DPLLA lock bit PIPEACONF[29] */
udelay(150);
}
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
/* write it again -- the BIOS does, after all */
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
psb_intel_wait_for_vblank(dev);
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
psb_intel_wait_for_vblank(dev);
oaktrail_crtc_mode_set_exit:
@@ -509,15 +497,13 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
@@ -533,9 +519,9 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -557,12 +543,12 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
ret = -EINVAL;
goto pipe_set_base_exit;
}
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
- REG_WRITE(dspbase, offset);
- REG_READ(dspbase);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
+ REG_WRITE(map->base, offset);
+ REG_READ(map->base);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
pipe_set_base_exit:
gma_power_end(dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 41d1924ea31e..0f9b7db80f6b 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -187,6 +187,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_save_area *regs = &dev_priv->regs;
+ struct psb_pipe *p = &regs->pipe[0];
int i;
u32 pp_stat;
@@ -201,24 +202,24 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
regs->psb.saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
/* Pipe & plane A info */
- regs->psb.savePIPEACONF = PSB_RVDC32(PIPEACONF);
- regs->psb.savePIPEASRC = PSB_RVDC32(PIPEASRC);
- regs->psb.saveFPA0 = PSB_RVDC32(MRST_FPA0);
- regs->psb.saveFPA1 = PSB_RVDC32(MRST_FPA1);
- regs->psb.saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
- regs->psb.saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
- regs->psb.saveHBLANK_A = PSB_RVDC32(HBLANK_A);
- regs->psb.saveHSYNC_A = PSB_RVDC32(HSYNC_A);
- regs->psb.saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
- regs->psb.saveVBLANK_A = PSB_RVDC32(VBLANK_A);
- regs->psb.saveVSYNC_A = PSB_RVDC32(VSYNC_A);
+ p->conf = PSB_RVDC32(PIPEACONF);
+ p->src = PSB_RVDC32(PIPEASRC);
+ p->fp0 = PSB_RVDC32(MRST_FPA0);
+ p->fp1 = PSB_RVDC32(MRST_FPA1);
+ p->dpll = PSB_RVDC32(MRST_DPLL_A);
+ p->htotal = PSB_RVDC32(HTOTAL_A);
+ p->hblank = PSB_RVDC32(HBLANK_A);
+ p->hsync = PSB_RVDC32(HSYNC_A);
+ p->vtotal = PSB_RVDC32(VTOTAL_A);
+ p->vblank = PSB_RVDC32(VBLANK_A);
+ p->vsync = PSB_RVDC32(VSYNC_A);
regs->psb.saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
- regs->psb.saveDSPACNTR = PSB_RVDC32(DSPACNTR);
- regs->psb.saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
- regs->psb.saveDSPAADDR = PSB_RVDC32(DSPABASE);
- regs->psb.saveDSPASURF = PSB_RVDC32(DSPASURF);
- regs->psb.saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
- regs->psb.saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
+ p->cntr = PSB_RVDC32(DSPACNTR);
+ p->stride = PSB_RVDC32(DSPASTRIDE);
+ p->addr = PSB_RVDC32(DSPABASE);
+ p->surf = PSB_RVDC32(DSPASURF);
+ p->linoff = PSB_RVDC32(DSPALINOFF);
+ p->tileoff = PSB_RVDC32(DSPATILEOFF);
/* Save cursor regs */
regs->psb.saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
@@ -227,7 +228,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
/* Save palette (gamma) */
for (i = 0; i < 256; i++)
- regs->psb.save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+ p->palette[i] = PSB_RVDC32(PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
oaktrail_hdmi_save(dev);
@@ -300,6 +301,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_save_area *regs = &dev_priv->regs;
+ struct psb_pipe *p = &regs->pipe[0];
u32 pp_stat;
int i;
@@ -317,21 +319,21 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
PSB_WVDC32(0x80000000, VGACNTRL);
/* set the plls */
- PSB_WVDC32(regs->psb.saveFPA0, MRST_FPA0);
- PSB_WVDC32(regs->psb.saveFPA1, MRST_FPA1);
+ PSB_WVDC32(p->fp0, MRST_FPA0);
+ PSB_WVDC32(p->fp1, MRST_FPA1);
/* Actually enable it */
- PSB_WVDC32(regs->psb.saveDPLL_A, MRST_DPLL_A);
+ PSB_WVDC32(p->dpll, MRST_DPLL_A);
DRM_UDELAY(150);
/* Restore mode */
- PSB_WVDC32(regs->psb.saveHTOTAL_A, HTOTAL_A);
- PSB_WVDC32(regs->psb.saveHBLANK_A, HBLANK_A);
- PSB_WVDC32(regs->psb.saveHSYNC_A, HSYNC_A);
- PSB_WVDC32(regs->psb.saveVTOTAL_A, VTOTAL_A);
- PSB_WVDC32(regs->psb.saveVBLANK_A, VBLANK_A);
- PSB_WVDC32(regs->psb.saveVSYNC_A, VSYNC_A);
- PSB_WVDC32(regs->psb.savePIPEASRC, PIPEASRC);
+ PSB_WVDC32(p->htotal, HTOTAL_A);
+ PSB_WVDC32(p->hblank, HBLANK_A);
+ PSB_WVDC32(p->hsync, HSYNC_A);
+ PSB_WVDC32(p->vtotal, VTOTAL_A);
+ PSB_WVDC32(p->vblank, VBLANK_A);
+ PSB_WVDC32(p->vsync, VSYNC_A);
+ PSB_WVDC32(p->src, PIPEASRC);
PSB_WVDC32(regs->psb.saveBCLRPAT_A, BCLRPAT_A);
/* Restore performance mode*/
@@ -339,16 +341,16 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
/* Enable the pipe*/
if (dev_priv->iLVDS_enable)
- PSB_WVDC32(regs->psb.savePIPEACONF, PIPEACONF);
+ PSB_WVDC32(p->conf, PIPEACONF);
/* Set up the plane*/
- PSB_WVDC32(regs->psb.saveDSPALINOFF, DSPALINOFF);
- PSB_WVDC32(regs->psb.saveDSPASTRIDE, DSPASTRIDE);
- PSB_WVDC32(regs->psb.saveDSPATILEOFF, DSPATILEOFF);
+ PSB_WVDC32(p->linoff, DSPALINOFF);
+ PSB_WVDC32(p->stride, DSPASTRIDE);
+ PSB_WVDC32(p->tileoff, DSPATILEOFF);
/* Enable the plane */
- PSB_WVDC32(regs->psb.saveDSPACNTR, DSPACNTR);
- PSB_WVDC32(regs->psb.saveDSPASURF, DSPASURF);
+ PSB_WVDC32(p->cntr, DSPACNTR);
+ PSB_WVDC32(p->surf, DSPASURF);
/* Enable Cursor A */
PSB_WVDC32(regs->psb.saveDSPACURSOR_CTRL, CURACNTR);
@@ -357,7 +359,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
/* Restore palette (gamma) */
for (i = 0; i < 256; i++)
- PSB_WVDC32(regs->psb.save_palette_a[i], PALETTE_A + (i << 2));
+ PSB_WVDC32(p->palette[i], PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
oaktrail_hdmi_restore(dev);
@@ -454,31 +456,84 @@ static int oaktrail_power_up(struct drm_device *dev)
return 0;
}
+/* Oaktrail */
+static const struct psb_offset oaktrail_regmap[2] = {
+ {
+ .fp0 = MRST_FPA0,
+ .fp1 = MRST_FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .surf = DSPASURF,
+ .addr = MRST_DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ },
+};
static int oaktrail_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
int ret;
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+
+ dev_priv->regmap = oaktrail_regmap;
+
ret = mid_chip_setup(dev);
if (ret < 0)
return ret;
- if (vbt->size == 0) {
+ if (!dev_priv->has_gct) {
/* Now pull the BIOS data */
- gma_intel_opregion_init(dev);
+ psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
}
+ oaktrail_hdmi_setup(dev);
return 0;
}
static void oaktrail_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
oaktrail_hdmi_teardown(dev);
- if (vbt->size == 0)
+ if (!dev_priv->has_gct)
psb_intel_destroy_bios(dev);
}
@@ -487,6 +542,9 @@ const struct psb_ops oaktrail_chip_ops = {
.accel_2d = 1,
.pipes = 2,
.crtcs = 2,
+ .hdmi_mask = (1 << 0),
+ .lvds_mask = (1 << 0),
+ .cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
.chip_setup = oaktrail_chip_setup,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f8b367b45f66..c10899c953b9 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -179,7 +179,6 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
@@ -188,11 +187,6 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- /* We assume worst case scenario of 32 bpp here, since we don't know */
- if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
- dev_priv->vram_stolen_size)
- return MODE_MEM;
-
return MODE_OK;
}
@@ -440,6 +434,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
+ struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
int i;
/* dpll */
@@ -450,14 +445,14 @@ void oaktrail_hdmi_save(struct drm_device *dev)
hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
/* pipe B */
- regs->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
- regs->savePIPEBSRC = PSB_RVDC32(PIPEBSRC);
- regs->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B);
- regs->saveHBLANK_B = PSB_RVDC32(HBLANK_B);
- regs->saveHSYNC_B = PSB_RVDC32(HSYNC_B);
- regs->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B);
- regs->saveVBLANK_B = PSB_RVDC32(VBLANK_B);
- regs->saveVSYNC_B = PSB_RVDC32(VSYNC_B);
+ pipeb->conf = PSB_RVDC32(PIPEBCONF);
+ pipeb->src = PSB_RVDC32(PIPEBSRC);
+ pipeb->htotal = PSB_RVDC32(HTOTAL_B);
+ pipeb->hblank = PSB_RVDC32(HBLANK_B);
+ pipeb->hsync = PSB_RVDC32(HSYNC_B);
+ pipeb->vtotal = PSB_RVDC32(VTOTAL_B);
+ pipeb->vblank = PSB_RVDC32(VBLANK_B);
+ pipeb->vsync = PSB_RVDC32(VSYNC_B);
hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
@@ -469,12 +464,12 @@ void oaktrail_hdmi_save(struct drm_device *dev)
hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B);
/* plane */
- regs->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
- regs->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
- regs->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
- regs->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
- regs->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
- regs->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
+ pipeb->cntr = PSB_RVDC32(DSPBCNTR);
+ pipeb->stride = PSB_RVDC32(DSPBSTRIDE);
+ pipeb->addr = PSB_RVDC32(DSPBBASE);
+ pipeb->surf = PSB_RVDC32(DSPBSURF);
+ pipeb->linoff = PSB_RVDC32(DSPBLINOFF);
+ pipeb->tileoff = PSB_RVDC32(DSPBTILEOFF);
/* cursor B */
regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
@@ -483,7 +478,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
/* save palette */
for (i = 0; i < 256; i++)
- regs->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+ pipeb->palette[i] = PSB_RVDC32(PALETTE_B + (i << 2));
}
/* restore HDMI register state */
@@ -492,6 +487,7 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
+ struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
int i;
/* dpll */
@@ -503,13 +499,13 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
DRM_UDELAY(150);
/* pipe */
- PSB_WVDC32(regs->savePIPEBSRC, PIPEBSRC);
- PSB_WVDC32(regs->saveHTOTAL_B, HTOTAL_B);
- PSB_WVDC32(regs->saveHBLANK_B, HBLANK_B);
- PSB_WVDC32(regs->saveHSYNC_B, HSYNC_B);
- PSB_WVDC32(regs->saveVTOTAL_B, VTOTAL_B);
- PSB_WVDC32(regs->saveVBLANK_B, VBLANK_B);
- PSB_WVDC32(regs->saveVSYNC_B, VSYNC_B);
+ PSB_WVDC32(pipeb->src, PIPEBSRC);
+ PSB_WVDC32(pipeb->htotal, HTOTAL_B);
+ PSB_WVDC32(pipeb->hblank, HBLANK_B);
+ PSB_WVDC32(pipeb->hsync, HSYNC_B);
+ PSB_WVDC32(pipeb->vtotal, VTOTAL_B);
+ PSB_WVDC32(pipeb->vblank, VBLANK_B);
+ PSB_WVDC32(pipeb->vsync, VSYNC_B);
PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
@@ -519,15 +515,15 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B);
- PSB_WVDC32(regs->savePIPEBCONF, PIPEBCONF);
+ PSB_WVDC32(pipeb->conf, PIPEBCONF);
PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
/* plane */
- PSB_WVDC32(regs->saveDSPBLINOFF, DSPBLINOFF);
- PSB_WVDC32(regs->saveDSPBSTRIDE, DSPBSTRIDE);
- PSB_WVDC32(regs->saveDSPBTILEOFF, DSPBTILEOFF);
- PSB_WVDC32(regs->saveDSPBCNTR, DSPBCNTR);
- PSB_WVDC32(regs->saveDSPBSURF, DSPBSURF);
+ PSB_WVDC32(pipeb->linoff, DSPBLINOFF);
+ PSB_WVDC32(pipeb->stride, DSPBSTRIDE);
+ PSB_WVDC32(pipeb->tileoff, DSPBTILEOFF);
+ PSB_WVDC32(pipeb->cntr, DSPBCNTR);
+ PSB_WVDC32(pipeb->surf, DSPBSURF);
/* cursor B */
PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR);
@@ -536,5 +532,5 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
/* restore palette */
for (i = 0; i < 256; i++)
- PSB_WVDC32(regs->save_palette_b[i], PALETTE_B + (i << 2));
+ PSB_WVDC32(pipeb->palette[i], PALETTE_B + (i << 2));
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 5e84fbde749b..88627e3ba1e3 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -250,7 +250,7 @@ static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
*/
static void oaktrail_hdmi_i2c_gpio_fix(void)
{
- void *base;
+ void __iomem *base;
unsigned int gpio_base = 0xff12c000;
int gpio_len = 0x1000;
u32 temp;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 654f32b22b21..558c77fb55ec 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -257,7 +257,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
mode_dev->panel_fixed_mode = NULL;
/* Use the firmware provided data on Moorestown */
- if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/
+ if (dev_priv->has_gct) {
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
return;
@@ -371,7 +371,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
BRIGHTNESS_MAX_LEVEL);
mode_dev->panel_wants_dither = false;
- if (dev_priv->vbt_data.size != 0x00)
+ if (dev_priv->has_gct)
mode_dev->panel_wants_dither = (dev_priv->gct_data.
Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
if (dev_priv->lvds_dither)
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
new file mode 100644
index 000000000000..4f186eca3a30
--- /dev/null
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+#define PCI_ASLE 0xe4
+#define PCI_ASLS 0xfc
+
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET 0x100
+#define ACPI_CLID 0x01ac /* current lid state indicator */
+#define ACPI_CDCK 0x01b0 /* current docking state indicator */
+#define OPREGION_SWSCI_OFFSET 0x200
+#define OPREGION_ASLE_OFFSET 0x300
+#define OPREGION_VBT_OFFSET 0x400
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI (1<<0)
+#define MBOX_SWSCI (1<<1)
+#define MBOX_ASLE (1<<2)
+
+struct opregion_header {
+ u8 signature[16];
+ u32 size;
+ u32 opregion_ver;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u8 reserved[164];
+} __packed;
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+ u32 drdy; /* driver readiness */
+ u32 csts; /* notification status */
+ u32 cevt; /* current event */
+ u8 rsvd1[20];
+ u32 didl[8]; /* supported display devices ID list */
+ u32 cpdl[8]; /* currently presented display list */
+ u32 cadl[8]; /* currently active display list */
+ u32 nadl[8]; /* next active devices list */
+ u32 aslp; /* ASL sleep time-out */
+ u32 tidx; /* toggle table index */
+ u32 chpd; /* current hotplug enable indicator */
+ u32 clid; /* current lid state*/
+ u32 cdck; /* current docking state */
+ u32 sxsw; /* Sx state resume */
+ u32 evts; /* ASL supported events */
+ u32 cnot; /* current OS notification */
+ u32 nrdy; /* driver status */
+ u8 rsvd2[60];
+} __packed;
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+ /*FIXME: add it later*/
+} __packed;
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+ u32 ardy; /* driver readiness */
+ u32 aslc; /* ASLE interrupt command */
+ u32 tche; /* technology enabled indicator */
+ u32 alsi; /* current ALS illuminance reading */
+ u32 bclp; /* backlight brightness to set */
+ u32 pfit; /* panel fitting state */
+ u32 cblv; /* current brightness level */
+ u16 bclm[20]; /* backlight level duty cycle mapping table */
+ u32 cpfm; /* current panel fitting mode */
+ u32 epfm; /* enabled panel fitting modes */
+ u8 plut[74]; /* panel LUT and identifier */
+ u32 pfmb; /* PWM freq and min brightness */
+ u8 rsvd[102];
+} __packed;
+
+/* ASLE irq request bits */
+#define ASLE_SET_ALS_ILLUM (1 << 0)
+#define ASLE_SET_BACKLIGHT (1 << 1)
+#define ASLE_SET_PFIT (1 << 2)
+#define ASLE_SET_PWM_FREQ (1 << 3)
+#define ASLE_REQ_MSK 0xf
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED (1<<10)
+#define ASLE_BACKLIGHT_FAILED (1<<12)
+#define ASLE_PFIT_FAILED (1<<14)
+#define ASLE_PWM_FREQ_FAILED (1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID (1<<31)
+#define ASLE_BCLP_MSK (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED (1<<10)
+#define ASLE_BACKLIGHT_FAILED (1<<12)
+#define ASLE_PFIT_FAILED (1<<14)
+#define ASLE_PWM_FREQ_FAILED (1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID (1<<31)
+#define ASLE_BCLP_MSK (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID (1<<31)
+
+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct backlight_device *bd = dev_priv->backlight_device;
+
+ DRM_DEBUG_DRIVER("asle set backlight %x\n", bclp);
+
+ if (!(bclp & ASLE_BCLP_VALID))
+ return ASLE_BACKLIGHT_FAILED;
+
+ if (bd == NULL)
+ return ASLE_BACKLIGHT_FAILED;
+
+ bclp &= ASLE_BCLP_MSK;
+ if (bclp > 255)
+ return ASLE_BACKLIGHT_FAILED;
+
+ if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
+ int max = bd->props.max_brightness;
+ bd->props.brightness = bclp * max / 255;
+ backlight_update_status(bd);
+ }
+
+ asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
+
+ return 0;
+}
+
+void psb_intel_opregion_asle_intr(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 asle_stat = 0;
+ u32 asle_req;
+
+ if (!asle)
+ return;
+
+ asle_req = asle->aslc & ASLE_REQ_MSK;
+ if (!asle_req) {
+ DRM_DEBUG_DRIVER("non asle set request??\n");
+ return;
+ }
+
+ if (asle_req & ASLE_SET_BACKLIGHT)
+ asle_stat |= asle_set_backlight(dev, asle->bclp);
+
+ asle->aslc = asle_stat;
+}
+
+#define ASLE_ALS_EN (1<<0)
+#define ASLE_BLC_EN (1<<1)
+#define ASLE_PFIT_EN (1<<2)
+#define ASLE_PFMB_EN (1<<3)
+
+void psb_intel_opregion_enable_asle(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+
+ if (asle) {
+ /* Don't do this on Medfield or other non PC like devices, they
+ use the bit for something different altogether */
+ psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
+ psb_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
+
+ asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN
+ | ASLE_PFMB_EN;
+ asle->ardy = 1;
+ }
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID (1<<1)
+#define ACPI_EV_DOCK (1<<2)
+
+static struct psb_intel_opregion *system_opregion;
+
+static int psb_intel_opregion_video_event(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ /* The only video events relevant to opregion are 0x80. These indicate
+ either a docking event, lid switch or display switch request. In
+ Linux, these are handled by the dock, button and video drivers.
+ We might want to fix the video driver to be opregion-aware in
+ future, but right now we just indicate to the firmware that the
+ request has been handled */
+
+ struct opregion_acpi *acpi;
+
+ if (!system_opregion)
+ return NOTIFY_DONE;
+
+ acpi = system_opregion->acpi;
+ acpi->csts = 0;
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block psb_intel_opregion_notifier = {
+ .notifier_call = psb_intel_opregion_video_event,
+};
+
+void psb_intel_opregion_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video
+ * module. We don't actually need to do anything with them. */
+ opregion->acpi->csts = 0;
+ opregion->acpi->drdy = 1;
+
+ system_opregion = opregion;
+ register_acpi_notifier(&psb_intel_opregion_notifier);
+ }
+
+ if (opregion->asle)
+ psb_intel_opregion_enable_asle(dev);
+}
+
+void psb_intel_opregion_fini(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ opregion->acpi->drdy = 0;
+
+ system_opregion = NULL;
+ unregister_acpi_notifier(&psb_intel_opregion_notifier);
+ }
+
+ /* just clear all opregion memory pointers now */
+ iounmap(opregion->header);
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+}
+
+int psb_intel_opregion_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_opregion *opregion = &dev_priv->opregion;
+ u32 opregion_phy, mboxes;
+ void __iomem *base;
+ int err = 0;
+
+ pci_read_config_dword(dev->pdev, PCI_ASLS, &opregion_phy);
+ if (opregion_phy == 0) {
+ DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
+ return -ENOTSUPP;
+ }
+ DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
+ base = acpi_os_ioremap(opregion_phy, 8*1024);
+ if (!base)
+ return -ENOMEM;
+
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ opregion->header = base;
+ opregion->vbt = base + OPREGION_VBT_OFFSET;
+
+ opregion->lid_state = base + ACPI_CLID;
+
+ mboxes = opregion->header->mboxes;
+ if (mboxes & MBOX_ACPI) {
+ DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+ opregion->acpi = base + OPREGION_ACPI_OFFSET;
+ }
+
+ if (mboxes & MBOX_ASLE) {
+ DRM_DEBUG_DRIVER("ASLE supported\n");
+ opregion->asle = base + OPREGION_ASLE_OFFSET;
+ }
+
+ return 0;
+
+err_out:
+ iounmap(base);
+ return err;
+}
+
diff --git a/drivers/gpu/drm/gma500/intel_opregion.c b/drivers/gpu/drm/gma500/opregion.h
index d946bc1b17bf..72dc6b921265 100644
--- a/drivers/gpu/drm/gma500/intel_opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Intel Corporation
+ * Copyright 2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,62 +20,30 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
- * FIXME: resolve with the i915 version
*/
-#include "psb_drv.h"
+#if defined(CONFIG_ACPI)
+extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
+extern void psb_intel_opregion_init(struct drm_device *dev);
+extern void psb_intel_opregion_fini(struct drm_device *dev);
+extern int psb_intel_opregion_setup(struct drm_device *dev);
-struct opregion_header {
- u8 signature[16];
- u32 size;
- u32 opregion_ver;
- u8 bios_ver[32];
- u8 vbios_ver[16];
- u8 driver_ver[16];
- u32 mboxes;
- u8 reserved[164];
-} __packed;
+#else
-struct opregion_apci {
- /*FIXME: add it later*/
-} __packed;
-
-struct opregion_swsci {
- /*FIXME: add it later*/
-} __packed;
-
-struct opregion_acpi {
- /*FIXME: add it later*/
-} __packed;
-
-int gma_intel_opregion_init(struct drm_device *dev)
+extern inline void psb_intel_opregion_asle_intr(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
- u32 opregion_phy;
- void *base;
- u32 *lid_state;
-
- dev_priv->lid_state = NULL;
-
- pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
- if (opregion_phy == 0)
- return -ENOTSUPP;
-
- base = ioremap(opregion_phy, 8*1024);
- if (!base)
- return -ENOMEM;
+}
- lid_state = base + 0x01ac;
+extern inline void psb_intel_opregion_init(struct drm_device *dev)
+{
+}
- dev_priv->lid_state = lid_state;
- dev_priv->lid_last_state = readl(lid_state);
- return 0;
+extern inline void psb_intel_opregion_fini(struct drm_device *dev)
+{
}
-int gma_intel_opregion_exit(struct drm_device *dev)
+extern inline int psb_intel_opregion_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
- if (dev_priv->lid_state)
- iounmap(dev_priv->lid_state);
return 0;
}
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 95d163e4f1f4..eff039bf92d4 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -197,7 +197,8 @@ static int psb_save_display_registers(struct drm_device *dev)
}
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- connector->funcs->save(connector);
+ if (connector->funcs->save)
+ connector->funcs->save(connector);
mutex_unlock(&dev->mode_config.mutex);
return 0;
@@ -235,7 +236,8 @@ static int psb_restore_display_registers(struct drm_device *dev)
crtc->funcs->restore(crtc);
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- connector->funcs->restore(connector);
+ if (connector->funcs->restore)
+ connector->funcs->restore(connector);
mutex_unlock(&dev->mode_config.mutex);
return 0;
@@ -289,17 +291,80 @@ static void psb_get_core_freq(struct drm_device *dev)
}
}
+/* Poulsbo */
+static const struct psb_offset psb_regmap[2] = {
+ {
+ .fp0 = FPA0,
+ .fp1 = FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .base = DSPABASE,
+ .surf = DSPASURF,
+ .addr = DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .base = DSPBBASE,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ }
+};
+
static int psb_chip_setup(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->regmap = psb_regmap;
psb_get_core_freq(dev);
gma_intel_setup_gmbus(dev);
- gma_intel_opregion_init(dev);
+ psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
return 0;
}
+/* Not exactly an erratum more an irritation */
+static void psb_chip_errata(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ psb_lid_timer_init(dev_priv);
+}
+
static void psb_chip_teardown(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
@@ -308,9 +373,13 @@ const struct psb_ops psb_chip_ops = {
.accel_2d = 1,
.pipes = 2,
.crtcs = 2,
+ .hdmi_mask = (1 << 0),
+ .lvds_mask = (1 << 1),
+ .cursor_needs_phys = 1,
.sgx_offset = PSB_SGX_OFFSET,
.chip_setup = psb_chip_setup,
.chip_teardown = psb_chip_teardown,
+ .errata = psb_chip_errata,
.crtc_helper = &psb_intel_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index c34adf9d910a..caba6e08693c 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -79,6 +79,14 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
{ 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
{ 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
{ 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
#endif
{ 0, }
};
@@ -144,10 +152,6 @@ static void psb_lastclose(struct drm_device *dev)
return;
}
-static void psb_do_takedown(struct drm_device *dev)
-{
-}
-
static int psb_do_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -172,24 +176,6 @@ static int psb_do_init(struct drm_device *dev)
dev_priv->gatt_free_offset = pg->mmu_gatt_start +
(stolen_gtt << PAGE_SHIFT) * 1024;
- if (1 || drm_debug) {
- uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
- uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
- DRM_INFO("SGX core id = 0x%08x\n", core_id);
- DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
- (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
- _PSB_CC_REVISION_MAJOR_SHIFT,
- (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
- _PSB_CC_REVISION_MINOR_SHIFT);
- DRM_INFO
- ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
- (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
- _PSB_CC_REVISION_MAINTENANCE_SHIFT,
- (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
- _PSB_CC_REVISION_DESIGNER_SHIFT);
- }
-
-
spin_lock_init(&dev_priv->irqmask_lock);
spin_lock_init(&dev_priv->lock_2d);
@@ -204,7 +190,6 @@ static int psb_do_init(struct drm_device *dev)
PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
return 0;
out_err:
- psb_do_takedown(dev);
return ret;
}
@@ -214,18 +199,16 @@ static int psb_driver_unload(struct drm_device *dev)
/* Kill vblank etc here */
- gma_backlight_exit(dev);
-
- psb_modeset_cleanup(dev);
if (dev_priv) {
- psb_lid_timer_takedown(dev_priv);
- gma_intel_opregion_exit(dev);
+ if (dev_priv->backlight_device)
+ gma_backlight_exit(dev);
+ psb_modeset_cleanup(dev);
if (dev_priv->ops->chip_teardown)
dev_priv->ops->chip_teardown(dev);
- psb_do_takedown(dev);
+ psb_intel_opregion_fini(dev);
if (dev_priv->pf_pd) {
psb_mmu_free_pagedir(dev_priv->pf_pd);
@@ -246,6 +229,7 @@ static int psb_driver_unload(struct drm_device *dev)
}
psb_gtt_takedown(dev);
if (dev_priv->scratch_page) {
+ set_pages_wb(dev_priv->scratch_page, 1);
__free_page(dev_priv->scratch_page);
dev_priv->scratch_page = NULL;
}
@@ -258,15 +242,13 @@ static int psb_driver_unload(struct drm_device *dev)
dev_priv->sgx_reg = NULL;
}
+ /* Destroy VBT data */
+ psb_intel_destroy_bios(dev);
+
kfree(dev_priv);
dev->dev_private = NULL;
-
- /*destroy VBT data*/
- psb_intel_destroy_bios(dev);
}
-
gma_power_uninit(dev);
-
return 0;
}
@@ -290,11 +272,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
pci_set_master(dev->pdev);
- if (!IS_PSB(dev)) {
- if (pci_enable_msi(dev->pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
- }
-
dev_priv->num_pipe = dev_priv->ops->pipes;
resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
@@ -309,6 +286,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (!dev_priv->sgx_reg)
goto out_err;
+ psb_intel_opregion_setup(dev);
+
ret = dev_priv->ops->chip_setup(dev);
if (ret)
goto out_err;
@@ -348,10 +327,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
-/* igd_opregion_init(&dev_priv->opregion_dev); */
acpi_video_register();
- if (dev_priv->lid_state)
- psb_lid_timer_init(dev_priv);
ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret)
@@ -370,8 +346,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_install(dev);
+
+ drm_irq_install(dev);
dev->vblank_disable_allowed = 1;
@@ -619,7 +595,7 @@ static const struct dev_pm_ops psb_pm_ops = {
.runtime_idle = psb_runtime_idle,
};
-static struct vm_operations_struct psb_gem_vm_ops = {
+static const struct vm_operations_struct psb_gem_vm_ops = {
.fault = psb_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 40ce2c9bc2e4..1bd115ecefe1 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -30,6 +30,7 @@
#include "psb_intel_drv.h"
#include "gtt.h"
#include "power.h"
+#include "opregion.h"
#include "oaktrail.h"
/* Append new drm mode definition here, align with libdrm definition */
@@ -120,6 +121,7 @@ enum {
#define PSB_HWSTAM 0x2098
#define PSB_INSTPM 0x20C0
#define PSB_INT_IDENTITY_R 0x20A4
+#define _PSB_IRQ_ASLE (1<<0)
#define _MDFLD_PIPEC_EVENT_FLAG (1<<2)
#define _MDFLD_PIPEC_VBLANK_FLAG (1<<3)
#define _PSB_DPST_PIPEB_FLAG (1<<4)
@@ -130,6 +132,7 @@ enum {
#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
#define _MDFLD_MIPIA_FLAG (1<<16)
#define _MDFLD_MIPIC_FLAG (1<<17)
+#define _PSB_IRQ_DISP_HOTSYNC (1<<17)
#define _PSB_IRQ_SGX_FLAG (1<<18)
#define _PSB_IRQ_MSVDX_FLAG (1<<19)
#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
@@ -257,7 +260,8 @@ struct psb_intel_opregion {
struct opregion_acpi *acpi;
struct opregion_swsci *swsci;
struct opregion_asle *asle;
- int enabled;
+ void *vbt;
+ u32 __iomem *lid_state;
};
struct sdvo_device_mapping {
@@ -277,50 +281,72 @@ struct intel_gmbus {
};
/*
+ * Register offset maps
+ */
+
+struct psb_offset {
+ u32 fp0;
+ u32 fp1;
+ u32 cntr;
+ u32 conf;
+ u32 src;
+ u32 dpll;
+ u32 dpll_md;
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 surf;
+ u32 addr;
+ u32 base;
+ u32 status;
+ u32 linoff;
+ u32 tileoff;
+ u32 palette;
+};
+
+/*
* Register save state. This is used to hold the context when the
* device is powered off. In the case of Oaktrail this can (but does not
* yet) include screen blank. Operations occuring during the save
* update the register cache instead.
*/
+
+/*
+ * Common status for pipes.
+ */
+struct psb_pipe {
+ u32 fp0;
+ u32 fp1;
+ u32 cntr;
+ u32 conf;
+ u32 src;
+ u32 dpll;
+ u32 dpll_md;
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 base;
+ u32 surf;
+ u32 addr;
+ u32 status;
+ u32 linoff;
+ u32 tileoff;
+ u32 palette[256];
+};
+
struct psb_state {
- uint32_t saveDSPACNTR;
- uint32_t saveDSPBCNTR;
- uint32_t savePIPEACONF;
- uint32_t savePIPEBCONF;
- uint32_t savePIPEASRC;
- uint32_t savePIPEBSRC;
- uint32_t saveFPA0;
- uint32_t saveFPA1;
- uint32_t saveDPLL_A;
- uint32_t saveDPLL_A_MD;
- uint32_t saveHTOTAL_A;
- uint32_t saveHBLANK_A;
- uint32_t saveHSYNC_A;
- uint32_t saveVTOTAL_A;
- uint32_t saveVBLANK_A;
- uint32_t saveVSYNC_A;
- uint32_t saveDSPASTRIDE;
- uint32_t saveDSPASIZE;
- uint32_t saveDSPAPOS;
- uint32_t saveDSPABASE;
- uint32_t saveDSPASURF;
- uint32_t saveDSPASTATUS;
- uint32_t saveFPB0;
- uint32_t saveFPB1;
- uint32_t saveDPLL_B;
- uint32_t saveDPLL_B_MD;
- uint32_t saveHTOTAL_B;
- uint32_t saveHBLANK_B;
- uint32_t saveHSYNC_B;
- uint32_t saveVTOTAL_B;
- uint32_t saveVBLANK_B;
- uint32_t saveVSYNC_B;
- uint32_t saveDSPBSTRIDE;
- uint32_t saveDSPBSIZE;
- uint32_t saveDSPBPOS;
- uint32_t saveDSPBBASE;
- uint32_t saveDSPBSURF;
- uint32_t saveDSPBSTATUS;
uint32_t saveVCLK_DIVISOR_VGA0;
uint32_t saveVCLK_DIVISOR_VGA1;
uint32_t saveVCLK_POST_DIV;
@@ -335,14 +361,8 @@ struct psb_state {
uint32_t savePP_CONTROL;
uint32_t savePP_CYCLE;
uint32_t savePFIT_CONTROL;
- uint32_t savePaletteA[256];
- uint32_t savePaletteB[256];
uint32_t saveCLOCKGATING;
uint32_t saveDSPARB;
- uint32_t saveDSPATILEOFF;
- uint32_t saveDSPBTILEOFF;
- uint32_t saveDSPAADDR;
- uint32_t saveDSPBADDR;
uint32_t savePFIT_AUTO_RATIOS;
uint32_t savePFIT_PGM_RATIOS;
uint32_t savePP_ON_DELAYS;
@@ -350,8 +370,6 @@ struct psb_state {
uint32_t savePP_DIVISOR;
uint32_t saveBCLRPAT_A;
uint32_t saveBCLRPAT_B;
- uint32_t saveDSPALINOFF;
- uint32_t saveDSPBLINOFF;
uint32_t savePERF_MODE;
uint32_t saveDSPFW1;
uint32_t saveDSPFW2;
@@ -366,8 +384,6 @@ struct psb_state {
uint32_t saveDSPBCURSOR_BASE;
uint32_t saveDSPACURSOR_POS;
uint32_t saveDSPBCURSOR_POS;
- uint32_t save_palette_a[256];
- uint32_t save_palette_b[256];
uint32_t saveOV_OVADD;
uint32_t saveOV_OGAMC0;
uint32_t saveOV_OGAMC1;
@@ -390,64 +406,7 @@ struct psb_state {
};
struct medfield_state {
- uint32_t saveDPLL_A;
- uint32_t saveFPA0;
- uint32_t savePIPEACONF;
- uint32_t saveHTOTAL_A;
- uint32_t saveHBLANK_A;
- uint32_t saveHSYNC_A;
- uint32_t saveVTOTAL_A;
- uint32_t saveVBLANK_A;
- uint32_t saveVSYNC_A;
- uint32_t savePIPEASRC;
- uint32_t saveDSPASTRIDE;
- uint32_t saveDSPALINOFF;
- uint32_t saveDSPATILEOFF;
- uint32_t saveDSPASIZE;
- uint32_t saveDSPAPOS;
- uint32_t saveDSPASURF;
- uint32_t saveDSPACNTR;
- uint32_t saveDSPASTATUS;
- uint32_t save_palette_a[256];
uint32_t saveMIPI;
-
- uint32_t saveDPLL_B;
- uint32_t saveFPB0;
- uint32_t savePIPEBCONF;
- uint32_t saveHTOTAL_B;
- uint32_t saveHBLANK_B;
- uint32_t saveHSYNC_B;
- uint32_t saveVTOTAL_B;
- uint32_t saveVBLANK_B;
- uint32_t saveVSYNC_B;
- uint32_t savePIPEBSRC;
- uint32_t saveDSPBSTRIDE;
- uint32_t saveDSPBLINOFF;
- uint32_t saveDSPBTILEOFF;
- uint32_t saveDSPBSIZE;
- uint32_t saveDSPBPOS;
- uint32_t saveDSPBSURF;
- uint32_t saveDSPBCNTR;
- uint32_t saveDSPBSTATUS;
- uint32_t save_palette_b[256];
-
- uint32_t savePIPECCONF;
- uint32_t saveHTOTAL_C;
- uint32_t saveHBLANK_C;
- uint32_t saveHSYNC_C;
- uint32_t saveVTOTAL_C;
- uint32_t saveVBLANK_C;
- uint32_t saveVSYNC_C;
- uint32_t savePIPECSRC;
- uint32_t saveDSPCSTRIDE;
- uint32_t saveDSPCLINOFF;
- uint32_t saveDSPCTILEOFF;
- uint32_t saveDSPCSIZE;
- uint32_t saveDSPCPOS;
- uint32_t saveDSPCSURF;
- uint32_t saveDSPCCNTR;
- uint32_t saveDSPCSTATUS;
- uint32_t save_palette_c[256];
uint32_t saveMIPI_C;
uint32_t savePFIT_CONTROL;
@@ -476,6 +435,7 @@ struct cdv_state {
};
struct psb_save_area {
+ struct psb_pipe pipe[3];
uint32_t saveBSM;
uint32_t saveVBT;
union {
@@ -494,15 +454,19 @@ struct psb_ops;
struct drm_psb_private {
struct drm_device *dev;
const struct psb_ops *ops;
+ const struct psb_offset *regmap;
+
+ struct child_device_config *child_dev;
+ int child_dev_num;
struct psb_gtt gtt;
/* GTT Memory manager */
struct psb_gtt_mm *gtt_mm;
struct page *scratch_page;
- u32 *gtt_map;
+ u32 __iomem *gtt_map;
uint32_t stolen_base;
- void *vram_addr;
+ u8 __iomem *vram_addr;
unsigned long vram_stolen_size;
int gtt_initialized;
u16 gmch_ctrl; /* Saved GTT setup */
@@ -518,8 +482,8 @@ struct drm_psb_private {
* Register base
*/
- uint8_t *sgx_reg;
- uint8_t *vdc_reg;
+ uint8_t __iomem *sgx_reg;
+ uint8_t __iomem *vdc_reg;
uint32_t gatt_free_offset;
/*
@@ -543,6 +507,7 @@ struct drm_psb_private {
* Modesetting
*/
struct psb_intel_mode_device mode_dev;
+ bool modeset; /* true if we have done the mode_device setup */
struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
@@ -605,7 +570,7 @@ struct drm_psb_private {
int rpm_enabled;
/* MID specific */
- struct oaktrail_vbt vbt_data;
+ bool has_gct;
struct oaktrail_gct_data gct_data;
/* Oaktrail HDMI state */
@@ -621,6 +586,11 @@ struct drm_psb_private {
uint32_t msi_addr;
uint32_t msi_data;
+ /*
+ * Hotplug handling
+ */
+
+ struct work_struct hotplug_work;
/*
* LID-Switch
@@ -628,7 +598,6 @@ struct drm_psb_private {
spinlock_t lid_lock;
struct timer_list lid_timer;
struct psb_intel_opregion opregion;
- u32 *lid_state;
u32 lid_last_state;
/*
@@ -669,6 +638,8 @@ struct drm_psb_private {
u32 dspcntr[3];
int mdfld_panel_id;
+
+ bool dplla_96mhz; /* DPLL data from the VBT */
};
@@ -682,6 +653,9 @@ struct psb_ops {
int pipes; /* Number of output pipes */
int crtcs; /* Number of CRTCs */
int sgx_offset; /* Base offset of SGX device */
+ int hdmi_mask; /* Mask of HDMI CRTCs */
+ int lvds_mask; /* Mask of LVDS CRTCs */
+ int cursor_needs_phys; /* If cursor base reg need physical address */
/* Sub functions */
struct drm_crtc_helper_funcs const *crtc_helper;
@@ -690,9 +664,13 @@ struct psb_ops {
/* Setup hooks */
int (*chip_setup)(struct drm_device *dev);
void (*chip_teardown)(struct drm_device *dev);
+ /* Optional helper caller after modeset */
+ void (*errata)(struct drm_device *dev);
/* Display management hooks */
int (*output_init)(struct drm_device *dev);
+ int (*hotplug)(struct drm_device *dev);
+ void (*hotplug_enable)(struct drm_device *dev, bool on);
/* Power management hooks */
void (*init_pm)(struct drm_device *dev);
int (*save_regs)(struct drm_device *dev);
@@ -789,12 +767,6 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
/*
- * intel_opregion.c
- */
-extern int gma_intel_opregion_init(struct drm_device *dev);
-extern int gma_intel_opregion_exit(struct drm_device *dev);
-
-/*
* framebuffer.c
*/
extern int psbfb_probed(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 2616558457c8..36c3c99612f6 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -337,15 +337,12 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
@@ -367,9 +364,9 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -392,18 +389,10 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
psb_gtt_unpin(psbfb->gtt);
goto psb_intel_pipe_set_base_exit;
}
- REG_WRITE(dspcntr_reg, dspcntr);
-
+ REG_WRITE(map->cntr, dspcntr);
- if (0 /* FIXMEAC - check what PSB needs */) {
- REG_WRITE(dspbase, offset);
- REG_READ(dspbase);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
- } else {
- REG_WRITE(dspbase, start + offset);
- REG_READ(dspbase);
- }
+ REG_WRITE(map->base, start + offset);
+ REG_READ(map->base);
psb_intel_pipe_cleaner:
/* If there was a previous display we can now unpin it */
@@ -424,14 +413,10 @@ psb_intel_pipe_set_base_exit:
static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
- /* struct drm_i915_private *dev_priv = dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
/* XXX: When our outputs are all unaware of DPMS modes other than off
@@ -442,34 +427,34 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
psb_intel_crtc_load_lut(crtc);
@@ -487,29 +472,29 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
}
/* Wait for vblank for the disable to take effect. */
psb_intel_wait_for_vblank(dev);
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
}
/* Wait for the clocks to turn off. */
@@ -589,22 +574,11 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int pipe = psb_intel_crtc->pipe;
- int fp_reg = (pipe == 0) ? FPA0 : FPB0;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
struct psb_intel_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
@@ -690,7 +664,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
/* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
+ pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -712,9 +686,9 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
drm_mode_debug_printmodeline(mode);
if (dpll & DPLL_VCO_ENABLE) {
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
udelay(150);
}
@@ -747,45 +721,45 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
REG_READ(LVDS);
}
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
/* write it again -- the BIOS does, after all */
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->dpll, dpll);
- REG_READ(dpll_reg);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
- REG_WRITE(dspsize_reg,
+ REG_WRITE(map->size,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(dsppos_reg, 0);
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->pos, 0);
+ REG_WRITE(map->src,
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
psb_intel_wait_for_vblank(dev);
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
/* Flush the plane changes */
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
@@ -799,10 +773,10 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int palreg = PALETTE_A;
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
+ int palreg = map->palette;
int i;
/* The clocks have to be on to load the palette. */
@@ -811,12 +785,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
switch (psb_intel_crtc->pipe) {
case 0:
- break;
case 1:
- palreg = PALETTE_B;
- break;
- case 2:
- palreg = PALETTE_C;
break;
default:
dev_err(dev->dev, "Illegal Pipe Number.\n");
@@ -836,7 +805,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
gma_power_end(dev);
} else {
for (i = 0; i < 256; i++) {
- dev_priv->regs.psb.save_palette_a[i] =
+ dev_priv->regs.pipe[0].palette[i] =
((psb_intel_crtc->lut_r[i] +
psb_intel_crtc->lut_adj[i]) << 16) |
((psb_intel_crtc->lut_g[i] +
@@ -854,11 +823,10 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
static void psb_intel_crtc_save(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -867,27 +835,27 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
return;
}
- crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
- crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
- crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
- crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
- crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
- crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
- crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
- crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
- crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
- crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
- crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
- crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
- crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+ crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+ crtc_state->savePIPECONF = REG_READ(map->conf);
+ crtc_state->savePIPESRC = REG_READ(map->src);
+ crtc_state->saveFP0 = REG_READ(map->fp0);
+ crtc_state->saveFP1 = REG_READ(map->fp1);
+ crtc_state->saveDPLL = REG_READ(map->dpll);
+ crtc_state->saveHTOTAL = REG_READ(map->htotal);
+ crtc_state->saveHBLANK = REG_READ(map->hblank);
+ crtc_state->saveHSYNC = REG_READ(map->hsync);
+ crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+ crtc_state->saveVBLANK = REG_READ(map->vblank);
+ crtc_state->saveVSYNC = REG_READ(map->vsync);
+ crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
/*NOTE: DSPSIZE DSPPOS only for psb*/
- crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
- crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+ crtc_state->saveDSPSIZE = REG_READ(map->size);
+ crtc_state->saveDSPPOS = REG_READ(map->pos);
- crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+ crtc_state->saveDSPBASE = REG_READ(map->base);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
}
@@ -898,12 +866,10 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
static void psb_intel_crtc_restore(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private * dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -913,45 +879,45 @@ static void psb_intel_crtc_restore(struct drm_crtc *crtc)
}
if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
- REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+ REG_WRITE(map->dpll,
crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_READ(map->dpll);
udelay(150);
}
- REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
- REG_READ(pipeA ? FPA0 : FPB0);
+ REG_WRITE(map->fp0, crtc_state->saveFP0);
+ REG_READ(map->fp0);
- REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
- REG_READ(pipeA ? FPA1 : FPB1);
+ REG_WRITE(map->fp1, crtc_state->saveFP1);
+ REG_READ(map->fp1);
- REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_WRITE(map->dpll, crtc_state->saveDPLL);
+ REG_READ(map->dpll);
udelay(150);
- REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
- REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
- REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
- REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
- REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
- REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
- REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+ REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+ REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+ REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+ REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+ REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+ REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+ REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
- REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
- REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+ REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+ REG_WRITE(map->pos, crtc_state->saveDSPPOS);
- REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
- REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+ REG_WRITE(map->src, crtc_state->savePIPESRC);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+ REG_WRITE(map->conf, crtc_state->savePIPECONF);
psb_intel_wait_for_vblank(dev);
- REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+ REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
psb_intel_wait_for_vblank(dev);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
}
@@ -962,6 +928,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
@@ -969,8 +936,10 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t temp;
size_t addr = 0;
struct gtt_range *gt;
+ struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
struct drm_gem_object *obj;
- int ret;
+ void *tmp_dst, *tmp_src;
+ int ret, i, cursor_pages;
/* if we want to turn of the cursor ignore width and height */
if (!handle) {
@@ -1019,10 +988,32 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
return ret;
}
+ if (dev_priv->ops->cursor_needs_phys) {
+ if (cursor_gt == NULL) {
+ dev_err(dev->dev, "No hardware cursor mem available");
+ return -ENOMEM;
+ }
- addr = gt->offset; /* Or resource.start ??? */
+ /* Prevent overflow */
+ if (gt->npage > 4)
+ cursor_pages = 4;
+ else
+ cursor_pages = gt->npage;
+
+ /* Copy the cursor to cursor mem */
+ tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
+ for (i = 0; i < cursor_pages; i++) {
+ tmp_src = kmap(gt->pages[i]);
+ memcpy(tmp_dst, tmp_src, PAGE_SIZE);
+ kunmap(gt->pages[i]);
+ tmp_dst += PAGE_SIZE;
+ }
- psb_intel_crtc->cursor_addr = addr;
+ addr = psb_intel_crtc->cursor_addr;
+ } else {
+ addr = gt->offset; /* Or resource.start ??? */
+ psb_intel_crtc->cursor_addr = addr;
+ }
temp = 0;
/* set the pipe for the cursor */
@@ -1115,34 +1106,30 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
struct psb_intel_clock_t clock;
bool is_lvds;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
if (gma_power_begin(dev, false)) {
- dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ dpll = REG_READ(map->dpll);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+ fp = REG_READ(map->fp0);
else
- fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+ fp = REG_READ(map->fp1);
is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
gma_power_end(dev);
} else {
- dpll = (pipe == 0) ?
- dev_priv->regs.psb.saveDPLL_A :
- dev_priv->regs.psb.saveDPLL_B;
+ dpll = p->dpll;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA0 :
- dev_priv->regs.psb.saveFPB0;
+ fp = p->fp0;
else
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA1 :
- dev_priv->regs.psb.saveFPB1;
+ fp = p->fp1;
is_lvds = (pipe == 1) && (dev_priv->regs.psb.saveLVDS &
LVDS_PORT_EN);
@@ -1202,26 +1189,20 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
int vtot;
int vsync;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
if (gma_power_begin(dev, false)) {
- htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
- hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
- vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
- vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ htot = REG_READ(map->htotal);
+ hsync = REG_READ(map->hsync);
+ vtot = REG_READ(map->vtotal);
+ vsync = REG_READ(map->vsync);
gma_power_end(dev);
} else {
- htot = (pipe == 0) ?
- dev_priv->regs.psb.saveHTOTAL_A :
- dev_priv->regs.psb.saveHTOTAL_B;
- hsync = (pipe == 0) ?
- dev_priv->regs.psb.saveHSYNC_A :
- dev_priv->regs.psb.saveHSYNC_B;
- vtot = (pipe == 0) ?
- dev_priv->regs.psb.saveVTOTAL_A :
- dev_priv->regs.psb.saveVTOTAL_B;
- vsync = (pipe == 0) ?
- dev_priv->regs.psb.saveVSYNC_A :
- dev_priv->regs.psb.saveVSYNC_B;
+ htot = p->htotal;
+ hsync = p->hsync;
+ vtot = p->vtotal;
+ vsync = p->vsync;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -1257,6 +1238,9 @@ void psb_intel_crtc_destroy(struct drm_crtc *crtc)
drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
psb_intel_crtc->cursor_obj = NULL;
}
+
+ if (psb_intel_crtc->cursor_gt != NULL)
+ psb_gtt_free_range(crtc->dev, psb_intel_crtc->cursor_gt);
kfree(psb_intel_crtc->crtc_state);
drm_crtc_cleanup(crtc);
kfree(psb_intel_crtc);
@@ -1285,13 +1269,33 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
* Set the default value of cursor control and base register
* to zero. This is a workaround for h/w defect on Oaktrail
*/
-static void psb_intel_cursor_init(struct drm_device *dev, int pipe)
+static void psb_intel_cursor_init(struct drm_device *dev,
+ struct psb_intel_crtc *psb_intel_crtc)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
+ struct gtt_range *cursor_gt;
+
+ if (dev_priv->ops->cursor_needs_phys) {
+ /* Allocate 4 pages of stolen mem for a hardware cursor. That
+ * is enough for the 64 x 64 ARGB cursors we support.
+ */
+ cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1);
+ if (!cursor_gt) {
+ psb_intel_crtc->cursor_gt = NULL;
+ goto out;
+ }
+ psb_intel_crtc->cursor_gt = cursor_gt;
+ psb_intel_crtc->cursor_addr = dev_priv->stolen_base +
+ cursor_gt->offset;
+ } else {
+ psb_intel_crtc->cursor_gt = NULL;
+ }
- REG_WRITE(control[pipe], 0);
- REG_WRITE(base[pipe], 0);
+out:
+ REG_WRITE(control[psb_intel_crtc->pipe], 0);
+ REG_WRITE(base[psb_intel_crtc->pipe], 0);
}
void psb_intel_crtc_init(struct drm_device *dev, int pipe,
@@ -1357,7 +1361,7 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
psb_intel_crtc->mode_set.connectors =
(struct drm_connector **) (psb_intel_crtc + 1);
psb_intel_crtc->mode_set.num_connectors = 0;
- psb_intel_cursor_init(dev, pipe);
+ psb_intel_cursor_init(dev, psb_intel_crtc);
}
int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index f40535e56689..2515f83248cb 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -106,11 +106,6 @@ struct psb_intel_mode_device {
size_t(*bo_offset) (struct drm_device *dev, void *bo);
/*
- * Cursor (Can go ?)
- */
- int cursor_needs_physical;
-
- /*
* LVDS info
*/
int backlight_duty_cycle; /* restore backlight to this value */
@@ -176,6 +171,7 @@ struct psb_intel_crtc {
int pipe;
int plane;
uint32_t cursor_addr;
+ struct gtt_range *cursor_gt;
u8 lut_r[256], lut_g[256], lut_b[256];
u8 lut_adj[256];
struct psb_intel_framebuffer *fbdev_fb;
@@ -193,6 +189,9 @@ struct psb_intel_crtc {
/*crtc mode setting flags*/
u32 mode_flags;
+ bool active;
+ bool crtc_enable;
+
/* Saved Crtc HW states */
struct psb_intel_crtc_state *crtc_state;
};
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index e89d3a2e8fdc..8e8c8efb0a89 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -91,6 +91,9 @@
#define BLC_PWM_CTL 0x61254
#define BLC_PWM_CTL2 0x61250
+#define PWM_ENABLE (1 << 31)
+#define PWM_LEGACY_MODE (1 << 30)
+#define PWM_PIPE_B (1 << 29)
#define BLC_PWM_CTL_C 0x62254
#define BLC_PWM_CTL2_C 0x62250
#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
@@ -216,7 +219,7 @@
#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
-#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_FPA0h1_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_LOCK (1 << 15) /* CDV */
/*
@@ -343,6 +346,9 @@
#define FP_M2_DIV_SHIFT 0
#define PORT_HOTPLUG_EN 0x61110
+#define HDMIB_HOTPLUG_INT_EN (1 << 29)
+#define HDMIC_HOTPLUG_INT_EN (1 << 28)
+#define HDMID_HOTPLUG_INT_EN (1 << 27)
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
@@ -501,10 +507,12 @@
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18)
#define PIPE_TE_ENABLE (1UL << 22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
#define PIPE_VSYNC_ENABL (1UL << 25)
#define PIPE_HDMI_AUDIO_UNDERRUN (1UL << 26)
#define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL << 27)
+#define PIPE_FIFO_UNDERRUN (1UL << 31)
#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | \
PIPE_HDMI_AUDIO_BUFFER_DONE)
#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
@@ -569,12 +577,27 @@ struct dpst_guardband {
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
+#define FW_BLC_SELF 0x20e0
+#define FW_BLC_SELF_EN (1<<15)
+
#define DSPARB 0x70030
#define DSPFW1 0x70034
+#define DSP_FIFO_SR_WM_MASK 0xFF800000
+#define DSP_FIFO_SR_WM_SHIFT 23
+#define CURSOR_B_FIFO_WM_MASK 0x003F0000
+#define CURSOR_B_FIFO_WM_SHIFT 16
#define DSPFW2 0x70038
+#define CURSOR_A_FIFO_WM_MASK 0x3F00
+#define CURSOR_A_FIFO_WM_SHIFT 8
+#define DSP_PLANE_C_FIFO_WM_MASK 0x7F
+#define DSP_PLANE_C_FIFO_WM_SHIFT 0
#define DSPFW3 0x7003c
#define DSPFW4 0x70050
#define DSPFW5 0x70054
+#define DSP_PLANE_B_FIFO_WM1_SHIFT 24
+#define DSP_PLANE_A_FIFO_WM1_SHIFT 16
+#define CURSOR_B_FIFO_WM1_SHIFT 8
+#define CURSOR_FIFO_SR_WM1_SHIFT 0
#define DSPFW6 0x70058
#define DSPCHICKENBIT 0x70400
#define DSPACNTR 0x70180
@@ -1290,6 +1313,15 @@ No status bits are changed.
#define SB_N_CB_TUNE_MASK PSB_MASK(25, 24)
#define SB_N_CB_TUNE_SHIFT 24
+/* the bit 14:13 is used to select between the different reference clock for Pipe A/B */
+#define SB_REF_DPLLA 0x8010
+#define SB_REF_DPLLB 0x8030
+#define REF_CLK_MASK (0x3 << 13)
+#define REF_CLK_CORE (0 << 13)
+#define REF_CLK_DPLL (1 << 13)
+#define REF_CLK_DPLLA (2 << 13)
+/* For the DPLL B, it will use the reference clk from DPLL A when using (2 << 13) */
+
#define _SB_REF_A 0x8018
#define _SB_REF_B 0x8038
#define SB_REF_SFR(pipe) _PIPE(pipe, _SB_REF_A, _SB_REF_B)
@@ -1313,6 +1345,7 @@ No status bits are changed.
#define LANE_PLL_MASK (0x7 << 20)
#define LANE_PLL_ENABLE (0x3 << 20)
+#define LANE_PLL_PIPE(p) (((p) == 0) ? (1 << 21) : (0 << 21))
#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 36330cabcea2..d39b15be7649 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1141,7 +1141,6 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1161,11 +1160,6 @@ static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- /* We assume worst case scenario of 32 bpp here, since we don't know */
- if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
- dev_priv->vram_stolen_size)
- return MODE_MEM;
-
return MODE_OK;
}
@@ -2044,8 +2038,7 @@ psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
struct drm_device *dev = connector->base.base.dev;
intel_attach_force_audio_property(&connector->base.base);
- if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
- intel_attach_broadcast_rgb_property(&connector->base.base);
+ intel_attach_broadcast_rgb_property(&connector->base.base);
*/
}
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 1869586457b1..8652cdf3f03f 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -190,6 +190,9 @@ static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
*/
static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
{
+ if (vdc_stat & _PSB_IRQ_ASLE)
+ psb_intel_opregion_asle_intr(dev);
+
if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
mid_pipe_event_handler(dev, 0);
@@ -199,11 +202,9 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
{
- struct drm_device *dev = (struct drm_device *) arg;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
-
- uint32_t vdc_stat, dsp_int = 0, sgx_int = 0;
+ struct drm_device *dev = arg;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
int handled = 0;
spin_lock(&dev_priv->irqmask_lock);
@@ -220,6 +221,8 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
if (vdc_stat & _PSB_IRQ_SGX_FLAG)
sgx_int = 1;
+ if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
+ hotplug_int = 1;
vdc_stat &= dev_priv->vdc_irq_mask;
spin_unlock(&dev_priv->irqmask_lock);
@@ -241,6 +244,13 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
handled = 1;
}
+ /* Note: this bit has other meanings on some devices, so we will
+ need to address that later if it ever matters */
+ if (hotplug_int && dev_priv->ops->hotplug) {
+ handled = dev_priv->ops->hotplug(dev);
+ REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ }
+
PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
(void) PSB_RVDC32(PSB_INT_IDENTITY_R);
DRM_READMEMORYBARRIER();
@@ -273,6 +283,11 @@ void psb_irq_preinstall(struct drm_device *dev)
dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
*/
+ /* Revisit this area - want per device masks ? */
+ if (dev_priv->ops->hotplug)
+ dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
+ dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE;
+
/* This register is safe even if display island is off */
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
@@ -305,18 +320,23 @@ int psb_irq_postinstall(struct drm_device *dev)
else
psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ if (dev_priv->ops->hotplug_enable)
+ dev_priv->ops->hotplug_enable(dev, true);
+
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
return 0;
}
void psb_irq_uninstall(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ if (dev_priv->ops->hotplug_enable)
+ dev_priv->ops->hotplug_enable(dev, false);
+
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
if (dev->vblank_enabled[0])
@@ -406,7 +426,7 @@ void psb_irq_turn_off_dpst(struct drm_device *dev)
psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
- PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE),
+ PSB_WVDC32(pwm_reg & ~PWM_PHASEIN_INT_ENABLE,
PWM_CONTROL_LOGIC);
pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
index b867aabe6bf3..1d2ebb5e530f 100644
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -29,7 +29,7 @@ static void psb_lid_timer_func(unsigned long data)
struct drm_device *dev = (struct drm_device *)dev_priv->dev;
struct timer_list *lid_timer = &dev_priv->lid_timer;
unsigned long irq_flags;
- u32 *lid_state = dev_priv->lid_state;
+ u32 __iomem *lid_state = dev_priv->opregion.lid_state;
u32 pp_status;
if (readl(lid_state) == dev_priv->lid_last_state)
@@ -40,10 +40,16 @@ static void psb_lid_timer_func(unsigned long data)
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0);
+ } while ((pp_status & PP_ON) == 0 &&
+ (pp_status & PP_SEQUENCE_MASK) != 0);
- /*FIXME: should be backlight level before*/
- psb_intel_lvds_set_brightness(dev, 100);
+ if (REG_READ(PP_STATUS) & PP_ON) {
+ /*FIXME: should be backlight level before*/
+ psb_intel_lvds_set_brightness(dev, 100);
+ } else {
+ DRM_DEBUG("LVDS panel never powered up");
+ return;
+ }
} else {
psb_intel_lvds_set_brightness(dev, 0);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ce7fc77678b4..2e9268da58d8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -11,17 +11,21 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
+ i915_gem_stolen.o \
i915_gem_tiling.o \
+ i915_sysfs.o \
i915_trace_points.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
intel_bios.o \
+ intel_ddi.o \
intel_dp.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
intel_panel.o \
+ intel_pm.o \
intel_i2c.o \
intel_fb.o \
intel_tv.o \
@@ -34,7 +38,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
dvo_ch7017.o \
dvo_ivch.o \
dvo_tfp410.o \
- dvo_sil164.o
+ dvo_sil164.o \
+ i915_gem_dmabuf.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e6162a1681f0..eb2b3c25b9e1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -47,7 +47,6 @@ enum {
FLUSHING_LIST,
INACTIVE_LIST,
PINNED_LIST,
- DEFERRED_FREE_LIST,
};
static const char *yesno(int v)
@@ -178,18 +177,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
- case PINNED_LIST:
- seq_printf(m, "Pinned:\n");
- head = &dev_priv->mm.pinned_list;
- break;
case FLUSHING_LIST:
seq_printf(m, "Flushing:\n");
head = &dev_priv->mm.flushing_list;
break;
- case DEFERRED_FREE_LIST:
- seq_printf(m, "Deferred free:\n");
- head = &dev_priv->mm.deferred_free_list;
- break;
default:
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
@@ -252,21 +243,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.pinned_list, mm_list);
- seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
- count, mappable_count, size, mappable_size);
-
- size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.inactive_list, mm_list);
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.deferred_free_list, mm_list);
- seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
- count, mappable_count, size, mappable_size);
-
- size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
if (obj->fault_mappable) {
size += obj->gtt_space->size;
@@ -294,6 +275,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
+ uintptr_t list = (uintptr_t) node->info_ent->data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
@@ -305,6 +287,9 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (list == PINNED_LIST && obj->pin_count == 0)
+ continue;
+
seq_printf(m, " ");
describe_obj(m, obj);
seq_printf(m, "\n");
@@ -321,7 +306,6 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
return 0;
}
-
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -430,10 +414,6 @@ static void i915_ring_seqno_info(struct seq_file *m,
if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %d\n",
ring->name, ring->get_seqno(ring));
- seq_printf(m, "Waiter sequence (%s): %d\n",
- ring->name, ring->waiting_seqno);
- seq_printf(m, "IRQ sequence (%s): %d\n",
- ring->name, ring->irq_seqno);
}
}
@@ -468,7 +448,45 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
if (ret)
return ret;
- if (!HAS_PCH_SPLIT(dev)) {
+ if (IS_VALLEYVIEW(dev)) {
+ seq_printf(m, "Display IER:\t%08x\n",
+ I915_READ(VLV_IER));
+ seq_printf(m, "Display IIR:\t%08x\n",
+ I915_READ(VLV_IIR));
+ seq_printf(m, "Display IIR_RW:\t%08x\n",
+ I915_READ(VLV_IIR_RW));
+ seq_printf(m, "Display IMR:\t%08x\n",
+ I915_READ(VLV_IMR));
+ for_each_pipe(pipe)
+ seq_printf(m, "Pipe %c stat:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
+
+ seq_printf(m, "Master IER:\t%08x\n",
+ I915_READ(VLV_MASTER_IER));
+
+ seq_printf(m, "Render IER:\t%08x\n",
+ I915_READ(GTIER));
+ seq_printf(m, "Render IIR:\t%08x\n",
+ I915_READ(GTIIR));
+ seq_printf(m, "Render IMR:\t%08x\n",
+ I915_READ(GTIMR));
+
+ seq_printf(m, "PM IER:\t\t%08x\n",
+ I915_READ(GEN6_PMIER));
+ seq_printf(m, "PM IIR:\t\t%08x\n",
+ I915_READ(GEN6_PMIIR));
+ seq_printf(m, "PM IMR:\t\t%08x\n",
+ I915_READ(GEN6_PMIMR));
+
+ seq_printf(m, "Port hotplug:\t%08x\n",
+ I915_READ(PORT_HOTPLUG_EN));
+ seq_printf(m, "DPFLIPSTAT:\t%08x\n",
+ I915_READ(VLV_DPFLIPSTAT));
+ seq_printf(m, "DPINVGTT:\t%08x\n",
+ I915_READ(DPINVGTT));
+
+ } else if (!HAS_PCH_SPLIT(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
@@ -564,69 +582,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_ringbuffer_data(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- if (!ring->obj) {
- seq_printf(m, "No ringbuffer setup\n");
- } else {
- const u8 __iomem *virt = ring->virtual_start;
- uint32_t off;
-
- for (off = 0; off < ring->size; off += 4) {
- uint32_t *ptr = (uint32_t *)(virt + off);
- seq_printf(m, "%08x : %08x\n", off, *ptr);
- }
- }
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static int i915_ringbuffer_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- int ret;
-
- ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- if (ring->size == 0)
- return 0;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- seq_printf(m, "Ring %s:\n", ring->name);
- seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
- seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
- seq_printf(m, " Size : %08x\n", ring->size);
- seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
- seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
- seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
- seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
- }
- seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
- seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static const char *ring_str(int ring)
{
switch (ring) {
@@ -704,6 +659,7 @@ static void i915_ring_error_state(struct seq_file *m,
struct drm_i915_error_state *error,
unsigned ring)
{
+ BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
seq_printf(m, "%s command stream:\n", ring_str(ring));
seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
@@ -718,8 +674,8 @@ static void i915_ring_error_state(struct seq_file *m,
if (INTEL_INFO(dev)->gen >= 4)
seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
+ seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
if (INTEL_INFO(dev)->gen >= 6) {
- seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
seq_printf(m, " SYNC_0: 0x%08x\n",
error->semaphore_mboxes[ring][0]);
@@ -727,31 +683,35 @@ static void i915_ring_error_state(struct seq_file *m,
error->semaphore_mboxes[ring][1]);
}
seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
+ seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
}
+struct i915_error_state_file_priv {
+ struct drm_device *dev;
+ struct drm_i915_error_state *error;
+};
+
static int i915_error_state(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct i915_error_state_file_priv *error_priv = m->private;
+ struct drm_device *dev = error_priv->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error;
- unsigned long flags;
+ struct drm_i915_error_state *error = error_priv->error;
+ struct intel_ring_buffer *ring;
int i, j, page, offset, elt;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- if (!dev_priv->first_error) {
+ if (!error) {
seq_printf(m, "no error state collected\n");
- goto out;
+ return 0;
}
- error = dev_priv->first_error;
-
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
+ seq_printf(m, "IER: 0x%08x\n", error->ier);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -762,11 +722,8 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- i915_ring_error_state(m, dev, error, RCS);
- if (HAS_BLT(dev))
- i915_ring_error_state(m, dev, error, BCS);
- if (HAS_BSD(dev))
- i915_ring_error_state(m, dev, error, VCS);
+ for_each_ring(ring, dev_priv, i)
+ i915_ring_error_state(m, dev, error, i);
if (error->active_bo)
print_error_buffers(m, "Active",
@@ -828,12 +785,71 @@ static int i915_error_state(struct seq_file *m, void *unused)
if (error->display)
intel_display_print_error_state(m, dev, error->display);
-out:
+ return 0;
+}
+
+static ssize_t
+i915_error_state_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct i915_error_state_file_priv *error_priv = m->private;
+ struct drm_device *dev = error_priv->dev;
+
+ DRM_DEBUG_DRIVER("Resetting error state\n");
+
+ mutex_lock(&dev->struct_mutex);
+ i915_destroy_error_state(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return cnt;
+}
+
+static int i915_error_state_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_error_state_file_priv *error_priv;
+ unsigned long flags;
+
+ error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
+ if (!error_priv)
+ return -ENOMEM;
+
+ error_priv->dev = dev;
+
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
+ error_priv->error = dev_priv->first_error;
+ if (error_priv->error)
+ kref_get(&error_priv->error->ref);
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
- return 0;
+ return single_open(file, i915_error_state, error_priv);
+}
+
+static int i915_error_state_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct i915_error_state_file_priv *error_priv = m->private;
+
+ if (error_priv->error)
+ kref_put(&error_priv->error->ref, i915_error_state_free);
+ kfree(error_priv);
+
+ return single_release(inode, file);
}
+static const struct file_operations i915_error_state_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_error_state_open,
+ .read = seq_read,
+ .write = i915_error_state_write,
+ .llseek = default_llseek,
+ .release = i915_error_state_release,
+};
+
static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1132,6 +1148,17 @@ static int gen6_drpc_info(struct seq_file *m)
seq_printf(m, "Core Power Down: %s\n",
yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+
+ /* Not exactly sure what this is */
+ seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6_LOCKED));
+ seq_printf(m, "RC6 residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6));
+ seq_printf(m, "RC6+ residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6p));
+ seq_printf(m, "RC6++ residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6pp));
+
return 0;
}
@@ -1306,17 +1333,25 @@ static int i915_opregion(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
+ void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
int ret;
+ if (data == NULL)
+ return -ENOMEM;
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto out;
- if (opregion->header)
- seq_write(m, opregion->header, OPREGION_SIZE);
+ if (opregion->header) {
+ memcpy_fromio(data, opregion->header, OPREGION_SIZE);
+ seq_write(m, data, OPREGION_SIZE);
+ }
mutex_unlock(&dev->struct_mutex);
+out:
+ kfree(data);
return 0;
}
@@ -1505,6 +1540,53 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
return 0;
}
+static int i915_dpio_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+
+ if (!IS_VALLEYVIEW(dev)) {
+ seq_printf(m, "unsupported\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
+
+ seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_DIV_A));
+ seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_DIV_B));
+
+ seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
+ seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
+
+ seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
+ seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
+
+ seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
+ seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
+
+ seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
+ intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
static ssize_t
i915_wedged_read(struct file *filp,
char __user *ubuf,
@@ -1562,6 +1644,65 @@ static const struct file_operations i915_wedged_fops = {
};
static ssize_t
+i915_ring_stop_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[20];
+ int len;
+
+ len = snprintf(buf, sizeof(buf),
+ "0x%08x\n", dev_priv->stop_rings);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_ring_stop_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ char buf[20];
+ int val = 0;
+
+ if (cnt > 0) {
+ if (cnt > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
+
+ mutex_lock(&dev->struct_mutex);
+ dev_priv->stop_rings = val;
+ mutex_unlock(&dev->struct_mutex);
+
+ return cnt;
+}
+
+static const struct file_operations i915_ring_stop_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i915_ring_stop_read,
+ .write = i915_ring_stop_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t
i915_max_freq_read(struct file *filp,
char __user *ubuf,
size_t max,
@@ -1738,7 +1879,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
return 0;
}
-int i915_forcewake_release(struct inode *inode, struct file *file)
+static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1803,11 +1944,10 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
+ {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
- {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
- {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -1816,13 +1956,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
- {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
- {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
- {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
- {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
- {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
- {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
- {"i915_error_state", i915_error_state, 0},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
{"i915_delayfreq_table", i915_delayfreq_table, 0},
@@ -1839,6 +1972,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
+ {"i915_dpio", i915_dpio_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -1867,6 +2001,17 @@ int i915_debugfs_init(struct drm_minor *minor)
&i915_cache_sharing_fops);
if (ret)
return ret;
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_ring_stop",
+ &i915_ring_stop_fops);
+ if (ret)
+ return ret;
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_error_state",
+ &i915_error_state_fops);
+ if (ret)
+ return ret;
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
@@ -1885,6 +2030,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ba60f3c8f911..f94792626b94 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "drmP.h"
#include "drm.h"
#include "drm_crtc_helper.h"
@@ -34,15 +36,62 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
-#include "../../../platform/x86/intel_ips.h"
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <acpi/video.h>
+#include <asm/pat.h>
+
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
+
+#define BEGIN_LP_RING(n) \
+ intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+ intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+ intel_ring_advance(LP_RING(dev_priv))
+
+/**
+ * Lock test for when it's just for synchronization of ring access.
+ *
+ * In that case, we don't need to do it when GEM is initialized as nobody else
+ * has access to the ring.
+ */
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
+ if (LP_RING(dev->dev_private)->obj == NULL) \
+ LOCK_TEST_WITH_RETURN(dev, file); \
+} while (0)
+
+static inline u32
+intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
+{
+ if (I915_NEED_GFX_HWS(dev_priv->dev))
+ return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
+ else
+ return intel_read_status_page(LP_RING(dev_priv), reg);
+}
+
+#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_BREADCRUMB_INDEX 0x21
+
+void i915_update_dri1_breadcrumb(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
+}
static void i915_write_hws_pga(struct drm_device *dev)
{
@@ -97,7 +146,7 @@ static void i915_free_hws(struct drm_device *dev)
if (ring->status_page.gfx_addr) {
ring->status_page.gfx_addr = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
}
/* Need to rewrite hardware status page */
@@ -195,7 +244,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
/* Allow hardware batchbuffers unless told otherwise.
*/
- dev_priv->allow_batchbuffer = 1;
+ dev_priv->dri1.allow_batchbuffer = 1;
return 0;
}
@@ -207,7 +256,7 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG_DRIVER("%s\n", __func__);
- if (ring->map.handle == NULL) {
+ if (ring->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
@@ -236,6 +285,9 @@ static int i915_dma_init(struct drm_device *dev, void *data,
drm_i915_init_t *init = data;
int retcode = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
switch (init->func) {
case I915_INIT_DMA:
retcode = i915_initialize(dev, init);
@@ -578,6 +630,9 @@ static int i915_flush_ioctl(struct drm_device *dev, void *data,
{
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
mutex_lock(&dev->struct_mutex);
@@ -598,7 +653,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
int ret;
struct drm_clip_rect *cliprects = NULL;
- if (!dev_priv->allow_batchbuffer) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv->dri1.allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
}
@@ -655,6 +713,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (cmdbuf->num_cliprects < 0)
@@ -706,11 +767,166 @@ fail_batch_free:
return ret;
}
+static int i915_emit_irq(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+
+ i915_kernel_lost_context(dev);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ dev_priv->counter++;
+ if (dev_priv->counter > 0x7FFFFFFFUL)
+ dev_priv->counter = 1;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
+
+ return dev_priv->counter;
+}
+
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ int ret = 0;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
+ READ_BREADCRUMB(dev_priv));
+
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ return 0;
+ }
+
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+ if (ring->irq_get(ring)) {
+ DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+ READ_BREADCRUMB(dev_priv) >= irq_nr);
+ ring->irq_put(ring);
+ } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
+ ret = -EBUSY;
+
+ if (ret == -EBUSY) {
+ DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+ }
+
+ return ret;
+}
+
+/* Needs the lock as it touches the ring.
+ */
+static int i915_irq_emit(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_emit_t *emit = data;
+ int result;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ mutex_lock(&dev->struct_mutex);
+ result = i915_emit_irq(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+ DRM_ERROR("copy_to_user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+static int i915_irq_wait(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_wait_t *irqwait = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ return i915_wait_irq(dev, irqwait->irq_seq);
+}
+
+static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_vblank_pipe_t *pipe = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+ return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+static int i915_vblank_swap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ /* The delayed swap mechanism was fundamentally racy, and has been
+ * removed. The model was that the client requested a delayed flip/swap
+ * from the kernel, then waited for vblank before continuing to perform
+ * rendering. The problem was that the kernel might wake the client
+ * up before it dispatched the vblank swap (since the lock has to be
+ * held while touching the ringbuffer), in which case the client would
+ * clear and start the next frame before the swap occurred, and
+ * flicker would occur in addition to likely missing the vblank.
+ *
+ * In the absence of this ioctl, userland falls back to a correct path
+ * of waiting for a vblank, then dispatching the swap on its own.
+ * Context switching to userland and back is plenty fast enough for
+ * meeting the requirements of vblank swapping.
+ */
+ return -EINVAL;
+}
+
static int i915_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
DRM_DEBUG_DRIVER("%s\n", __func__);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -739,7 +955,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev->pdev->irq ? 1 : 0;
break;
case I915_PARAM_ALLOW_BATCHBUFFER:
- value = dev_priv->allow_batchbuffer ? 1 : 0;
+ value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
break;
case I915_PARAM_LAST_DISPATCH:
value = READ_BREADCRUMB(dev_priv);
@@ -748,7 +964,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev->pci_device;
break;
case I915_PARAM_HAS_GEM:
- value = dev_priv->has_gem;
+ value = 1;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
@@ -761,13 +977,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
break;
case I915_PARAM_HAS_EXECBUF2:
/* depends on GEM */
- value = dev_priv->has_gem;
+ value = 1;
break;
case I915_PARAM_HAS_BSD:
- value = HAS_BSD(dev);
+ value = intel_ring_initialized(&dev_priv->ring[VCS]);
break;
case I915_PARAM_HAS_BLT:
- value = HAS_BLT(dev);
+ value = intel_ring_initialized(&dev_priv->ring[BCS]);
break;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
@@ -787,6 +1003,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev);
break;
+ case I915_PARAM_HAS_ALIASING_PPGTT:
+ value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -816,10 +1035,9 @@ static int i915_setparam(struct drm_device *dev, void *data,
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
- dev_priv->tex_lru_log_granularity = param->value;
break;
case I915_SETPARAM_ALLOW_BATCHBUFFER:
- dev_priv->allow_batchbuffer = param->value;
+ dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
break;
case I915_SETPARAM_NUM_USED_FENCES:
if (param->value > dev_priv->num_fence_regs ||
@@ -844,6 +1062,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_i915_hws_addr_t *hws = data;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
@@ -861,23 +1082,17 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
- dev_priv->hws_map.offset = dev->agp->base + hws->addr;
- dev_priv->hws_map.size = 4*1024;
- dev_priv->hws_map.type = 0;
- dev_priv->hws_map.flags = 0;
- dev_priv->hws_map.mtrr = 0;
-
- drm_core_ioremap_wc(&dev_priv->hws_map, dev);
- if (dev_priv->hws_map.handle == NULL) {
+ dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr,
+ 4096);
+ if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
- ring->status_page.page_addr =
- (void __force __iomem *)dev_priv->hws_map.handle;
- memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
@@ -1013,133 +1228,6 @@ intel_teardown_mchbar(struct drm_device *dev)
release_resource(&dev_priv->mch_res);
}
-#define PTE_ADDRESS_MASK 0xfffff000
-#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
-#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
-#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
-#define PTE_MAPPING_TYPE_CACHED (3 << 1)
-#define PTE_MAPPING_TYPE_MASK (3 << 1)
-#define PTE_VALID (1 << 0)
-
-/**
- * i915_stolen_to_phys - take an offset into stolen memory and turn it into
- * a physical one
- * @dev: drm device
- * @offset: address to translate
- *
- * Some chip functions require allocations from stolen space and need the
- * physical address of the memory in question.
- */
-static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev_priv->bridge_dev;
- u32 base;
-
-#if 0
- /* On the machines I have tested the Graphics Base of Stolen Memory
- * is unreliable, so compute the base by subtracting the stolen memory
- * from the Top of Low Usable DRAM which is where the BIOS places
- * the graphics stolen memory.
- */
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- /* top 32bits are reserved = 0 */
- pci_read_config_dword(pdev, 0xA4, &base);
- } else {
- /* XXX presume 8xx is the same as i915 */
- pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
- }
-#else
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- u16 val;
- pci_read_config_word(pdev, 0xb0, &val);
- base = val >> 4 << 20;
- } else {
- u8 val;
- pci_read_config_byte(pdev, 0x9c, &val);
- base = val >> 3 << 27;
- }
- base -= dev_priv->mm.gtt->stolen_size;
-#endif
-
- return base + offset;
-}
-
-static void i915_warn_stolen(struct drm_device *dev)
-{
- DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
- DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
-}
-
-static void i915_setup_compression(struct drm_device *dev, int size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
- unsigned long cfb_base;
- unsigned long ll_base = 0;
-
- /* Just in case the BIOS is doing something questionable. */
- intel_disable_fbc(dev);
-
- compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
- if (compressed_fb)
- compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
- if (!compressed_fb)
- goto err;
-
- cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
- if (!cfb_base)
- goto err_fb;
-
- if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
- compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
- 4096, 4096, 0);
- if (compressed_llb)
- compressed_llb = drm_mm_get_block(compressed_llb,
- 4096, 4096);
- if (!compressed_llb)
- goto err_fb;
-
- ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
- if (!ll_base)
- goto err_llb;
- }
-
- dev_priv->cfb_size = size;
-
- dev_priv->compressed_fb = compressed_fb;
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
- else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
- } else {
- I915_WRITE(FBC_CFB_BASE, cfb_base);
- I915_WRITE(FBC_LL_BASE, ll_base);
- dev_priv->compressed_llb = compressed_llb;
- }
-
- DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
- cfb_base, ll_base, size >> 20);
- return;
-
-err_llb:
- drm_mm_put_block(compressed_llb);
-err_fb:
- drm_mm_put_block(compressed_fb);
-err:
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- i915_warn_stolen(dev);
-}
-
-static void i915_cleanup_compression(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- drm_mm_put_block(dev_priv->compressed_fb);
- if (dev_priv->compressed_llb)
- drm_mm_put_block(dev_priv->compressed_llb);
-}
-
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
@@ -1158,14 +1246,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
- printk(KERN_INFO "i915: switched on\n");
+ pr_info("switched on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
- printk(KERN_ERR "i915: switched off\n");
+ pr_err("switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend(dev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
@@ -1183,88 +1271,11 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
-{
- if (i915_enable_ppgtt >= 0)
- return i915_enable_ppgtt;
-
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
- return false;
-#endif
-
- return true;
-}
-
-static int i915_load_gem_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long prealloc_size, gtt_size, mappable_size;
- int ret;
-
- prealloc_size = dev_priv->mm.gtt->stolen_size;
- gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
- mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
- /* Basic memrange allocator for stolen space */
- drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
-
- mutex_lock(&dev->struct_mutex);
- if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
- /* PPGTT pdes are stolen from global gtt ptes, so shrink the
- * aperture accordingly when using aliasing ppgtt. */
- gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
- /* For paranoia keep the guard page in between. */
- gtt_size -= PAGE_SIZE;
-
- i915_gem_do_init(dev, 0, mappable_size, gtt_size);
-
- ret = i915_gem_init_aliasing_ppgtt(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- } else {
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch
- * page. There are a number of places where the hardware
- * apparently prefetches past the end of the object, and we've
- * seen multiple hangs with the GPU head pointer stuck in a
- * batchbuffer bound at the last page of the aperture. One page
- * should be enough to keep any prefetching inside of the
- * aperture.
- */
- i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
- }
-
- ret = i915_gem_init_hw(dev);
- mutex_unlock(&dev->struct_mutex);
- if (ret) {
- i915_gem_cleanup_aliasing_ppgtt(dev);
- return ret;
- }
-
- /* Try to set up FBC with a reasonable compressed buffer size */
- if (I915_HAS_FBC(dev) && i915_powersave) {
- int cfb_size;
-
- /* Leave 1M for line length buffer & misc. */
-
- /* Try to get a 32M buffer... */
- if (prealloc_size > (36*1024*1024))
- cfb_size = 32*1024*1024;
- else /* fall back to 7/8 of the stolen space */
- cfb_size = prealloc_size * 7 / 8;
- i915_setup_compression(dev, cfb_size);
- }
-
- /* Allow hardware batchbuffers unless told otherwise. */
- dev_priv->allow_batchbuffer = 1;
- return 0;
-}
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+ .set_gpu_state = i915_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = i915_switcheroo_can_switch,
+};
static int i915_load_modeset_init(struct drm_device *dev)
{
@@ -1288,22 +1299,22 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_register_dsm_handler();
- ret = vga_switcheroo_register_client(dev->pdev,
- i915_switcheroo_set_state,
- NULL,
- i915_switcheroo_can_switch);
+ ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
if (ret)
goto cleanup_vga_client;
- /* IIR "flip pending" bit means done if this bit is set */
- if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
- dev_priv->flip_pending_is_done = true;
+ /* Initialise stolen first so that we may reserve preallocated
+ * objects for the BIOS to KMS transition.
+ */
+ ret = i915_gem_init_stolen(dev);
+ if (ret)
+ goto cleanup_vga_switcheroo;
intel_modeset_init(dev);
- ret = i915_load_gem_init(dev);
+ ret = i915_gem_init(dev);
if (ret)
- goto cleanup_vga_switcheroo;
+ goto cleanup_gem_stolen;
intel_modeset_gem_init(dev);
@@ -1333,6 +1344,8 @@ cleanup_gem:
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
+cleanup_gem_stolen:
+ i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
@@ -1365,572 +1378,26 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
-static void i915_pineview_get_mem_freq(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 tmp;
-
- tmp = I915_READ(CLKCFG);
-
- switch (tmp & CLKCFG_FSB_MASK) {
- case CLKCFG_FSB_533:
- dev_priv->fsb_freq = 533; /* 133*4 */
- break;
- case CLKCFG_FSB_800:
- dev_priv->fsb_freq = 800; /* 200*4 */
- break;
- case CLKCFG_FSB_667:
- dev_priv->fsb_freq = 667; /* 167*4 */
- break;
- case CLKCFG_FSB_400:
- dev_priv->fsb_freq = 400; /* 100*4 */
- break;
- }
-
- switch (tmp & CLKCFG_MEM_MASK) {
- case CLKCFG_MEM_533:
- dev_priv->mem_freq = 533;
- break;
- case CLKCFG_MEM_667:
- dev_priv->mem_freq = 667;
- break;
- case CLKCFG_MEM_800:
- dev_priv->mem_freq = 800;
- break;
- }
-
- /* detect pineview DDR3 setting */
- tmp = I915_READ(CSHRDDR3CTL);
- dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
-}
-
-static void i915_ironlake_get_mem_freq(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u16 ddrpll, csipll;
-
- ddrpll = I915_READ16(DDRMPLL1);
- csipll = I915_READ16(CSIPLL0);
-
- switch (ddrpll & 0xff) {
- case 0xc:
- dev_priv->mem_freq = 800;
- break;
- case 0x10:
- dev_priv->mem_freq = 1066;
- break;
- case 0x14:
- dev_priv->mem_freq = 1333;
- break;
- case 0x18:
- dev_priv->mem_freq = 1600;
- break;
- default:
- DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
- ddrpll & 0xff);
- dev_priv->mem_freq = 0;
- break;
- }
-
- dev_priv->r_t = dev_priv->mem_freq;
-
- switch (csipll & 0x3ff) {
- case 0x00c:
- dev_priv->fsb_freq = 3200;
- break;
- case 0x00e:
- dev_priv->fsb_freq = 3733;
- break;
- case 0x010:
- dev_priv->fsb_freq = 4266;
- break;
- case 0x012:
- dev_priv->fsb_freq = 4800;
- break;
- case 0x014:
- dev_priv->fsb_freq = 5333;
- break;
- case 0x016:
- dev_priv->fsb_freq = 5866;
- break;
- case 0x018:
- dev_priv->fsb_freq = 6400;
- break;
- default:
- DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
- csipll & 0x3ff);
- dev_priv->fsb_freq = 0;
- break;
- }
-
- if (dev_priv->fsb_freq == 3200) {
- dev_priv->c_m = 0;
- } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
- dev_priv->c_m = 1;
- } else {
- dev_priv->c_m = 2;
- }
-}
-
-static const struct cparams {
- u16 i;
- u16 t;
- u16 m;
- u16 c;
-} cparams[] = {
- { 1, 1333, 301, 28664 },
- { 1, 1066, 294, 24460 },
- { 1, 800, 294, 25192 },
- { 0, 1333, 276, 27605 },
- { 0, 1066, 276, 27605 },
- { 0, 800, 231, 23784 },
-};
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
-{
- u64 total_count, diff, ret;
- u32 count1, count2, count3, m = 0, c = 0;
- unsigned long now = jiffies_to_msecs(jiffies), diff1;
- int i;
-
- diff1 = now - dev_priv->last_time1;
-
- /* Prevent division-by-zero if we are asking too fast.
- * Also, we don't get interesting results if we are polling
- * faster than once in 10ms, so just return the saved value
- * in such cases.
- */
- if (diff1 <= 10)
- return dev_priv->chipset_power;
-
- count1 = I915_READ(DMIEC);
- count2 = I915_READ(DDREC);
- count3 = I915_READ(CSIEC);
-
- total_count = count1 + count2 + count3;
-
- /* FIXME: handle per-counter overflow */
- if (total_count < dev_priv->last_count1) {
- diff = ~0UL - dev_priv->last_count1;
- diff += total_count;
- } else {
- diff = total_count - dev_priv->last_count1;
- }
-
- for (i = 0; i < ARRAY_SIZE(cparams); i++) {
- if (cparams[i].i == dev_priv->c_m &&
- cparams[i].t == dev_priv->r_t) {
- m = cparams[i].m;
- c = cparams[i].c;
- break;
- }
- }
-
- diff = div_u64(diff, diff1);
- ret = ((m * diff) + c);
- ret = div_u64(ret, 10);
-
- dev_priv->last_count1 = total_count;
- dev_priv->last_time1 = now;
-
- dev_priv->chipset_power = ret;
-
- return ret;
-}
-
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
-{
- unsigned long m, x, b;
- u32 tsfs;
-
- tsfs = I915_READ(TSFS);
-
- m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
- x = I915_READ8(TR1);
-
- b = tsfs & TSFS_INTR_MASK;
-
- return ((m * x) / 127) - b;
-}
-
-static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
-{
- static const struct v_table {
- u16 vd; /* in .1 mil */
- u16 vm; /* in .1 mil */
- } v_table[] = {
- { 0, 0, },
- { 375, 0, },
- { 500, 0, },
- { 625, 0, },
- { 750, 0, },
- { 875, 0, },
- { 1000, 0, },
- { 1125, 0, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4250, 3125, },
- { 4375, 3250, },
- { 4500, 3375, },
- { 4625, 3500, },
- { 4750, 3625, },
- { 4875, 3750, },
- { 5000, 3875, },
- { 5125, 4000, },
- { 5250, 4125, },
- { 5375, 4250, },
- { 5500, 4375, },
- { 5625, 4500, },
- { 5750, 4625, },
- { 5875, 4750, },
- { 6000, 4875, },
- { 6125, 5000, },
- { 6250, 5125, },
- { 6375, 5250, },
- { 6500, 5375, },
- { 6625, 5500, },
- { 6750, 5625, },
- { 6875, 5750, },
- { 7000, 5875, },
- { 7125, 6000, },
- { 7250, 6125, },
- { 7375, 6250, },
- { 7500, 6375, },
- { 7625, 6500, },
- { 7750, 6625, },
- { 7875, 6750, },
- { 8000, 6875, },
- { 8125, 7000, },
- { 8250, 7125, },
- { 8375, 7250, },
- { 8500, 7375, },
- { 8625, 7500, },
- { 8750, 7625, },
- { 8875, 7750, },
- { 9000, 7875, },
- { 9125, 8000, },
- { 9250, 8125, },
- { 9375, 8250, },
- { 9500, 8375, },
- { 9625, 8500, },
- { 9750, 8625, },
- { 9875, 8750, },
- { 10000, 8875, },
- { 10125, 9000, },
- { 10250, 9125, },
- { 10375, 9250, },
- { 10500, 9375, },
- { 10625, 9500, },
- { 10750, 9625, },
- { 10875, 9750, },
- { 11000, 9875, },
- { 11125, 10000, },
- { 11250, 10125, },
- { 11375, 10250, },
- { 11500, 10375, },
- { 11625, 10500, },
- { 11750, 10625, },
- { 11875, 10750, },
- { 12000, 10875, },
- { 12125, 11000, },
- { 12250, 11125, },
- { 12375, 11250, },
- { 12500, 11375, },
- { 12625, 11500, },
- { 12750, 11625, },
- { 12875, 11750, },
- { 13000, 11875, },
- { 13125, 12000, },
- { 13250, 12125, },
- { 13375, 12250, },
- { 13500, 12375, },
- { 13625, 12500, },
- { 13750, 12625, },
- { 13875, 12750, },
- { 14000, 12875, },
- { 14125, 13000, },
- { 14250, 13125, },
- { 14375, 13250, },
- { 14500, 13375, },
- { 14625, 13500, },
- { 14750, 13625, },
- { 14875, 13750, },
- { 15000, 13875, },
- { 15125, 14000, },
- { 15250, 14125, },
- { 15375, 14250, },
- { 15500, 14375, },
- { 15625, 14500, },
- { 15750, 14625, },
- { 15875, 14750, },
- { 16000, 14875, },
- { 16125, 15000, },
- };
- if (dev_priv->info->is_mobile)
- return v_table[pxvid].vm;
- else
- return v_table[pxvid].vd;
-}
-
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+static void
+i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
+ unsigned long size)
{
- struct timespec now, diff1;
- u64 diff;
- unsigned long diffms;
- u32 count;
-
- if (dev_priv->info->gen != 5)
- return;
-
- getrawmonotonic(&now);
- diff1 = timespec_sub(now, dev_priv->last_time2);
+ dev_priv->mm.gtt_mtrr = -1;
- /* Don't divide by 0 */
- diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
- if (!diffms)
+#if defined(CONFIG_X86_PAT)
+ if (cpu_has_pat)
return;
+#endif
- count = I915_READ(GFXEC);
-
- if (count < dev_priv->last_count2) {
- diff = ~0UL - dev_priv->last_count2;
- diff += count;
- } else {
- diff = count - dev_priv->last_count2;
- }
-
- dev_priv->last_count2 = count;
- dev_priv->last_time2 = now;
-
- /* More magic constants... */
- diff = diff * 1181;
- diff = div_u64(diff, diffms * 10);
- dev_priv->gfx_power = diff;
-}
-
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
-{
- unsigned long t, corr, state1, corr2, state2;
- u32 pxvid, ext_v;
-
- pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
- pxvid = (pxvid >> 24) & 0x7f;
- ext_v = pvid_to_extvid(dev_priv, pxvid);
-
- state1 = ext_v;
-
- t = i915_mch_val(dev_priv);
-
- /* Revel in the empirically derived constants */
-
- /* Correction factor in 1/100000 units */
- if (t > 80)
- corr = ((t * 2349) + 135940);
- else if (t >= 50)
- corr = ((t * 964) + 29317);
- else /* < 50 */
- corr = ((t * 301) + 1004);
-
- corr = corr * ((150142 * state1) / 10000 - 78642);
- corr /= 100000;
- corr2 = (corr * dev_priv->corr);
-
- state2 = (corr2 * state1) / 10000;
- state2 /= 100; /* convert to mW */
-
- i915_update_gfx_val(dev_priv);
-
- return dev_priv->gfx_power + state2;
-}
-
-/* Global for IPS driver to get at the current i915 device */
-static struct drm_i915_private *i915_mch_dev;
-/*
- * Lock protecting IPS related data structures
- * - i915_mch_dev
- * - dev_priv->max_delay
- * - dev_priv->min_delay
- * - dev_priv->fmax
- * - dev_priv->gpu_busy
- */
-static DEFINE_SPINLOCK(mchdev_lock);
-
-/**
- * i915_read_mch_val - return value for IPS use
- *
- * Calculate and return a value for the IPS driver to use when deciding whether
- * we have thermal and power headroom to increase CPU or GPU power budget.
- */
-unsigned long i915_read_mch_val(void)
-{
- struct drm_i915_private *dev_priv;
- unsigned long chipset_val, graphics_val, ret = 0;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev)
- goto out_unlock;
- dev_priv = i915_mch_dev;
-
- chipset_val = i915_chipset_val(dev_priv);
- graphics_val = i915_gfx_val(dev_priv);
-
- ret = chipset_val + graphics_val;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_read_mch_val);
-
-/**
- * i915_gpu_raise - raise GPU frequency limit
- *
- * Raise the limit; IPS indicates we have thermal headroom.
- */
-bool i915_gpu_raise(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- if (dev_priv->max_delay > dev_priv->fmax)
- dev_priv->max_delay--;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_raise);
-
-/**
- * i915_gpu_lower - lower GPU frequency limit
- *
- * IPS indicates we're close to a thermal limit, so throttle back the GPU
- * frequency maximum.
- */
-bool i915_gpu_lower(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- if (dev_priv->max_delay < dev_priv->min_delay)
- dev_priv->max_delay++;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_lower);
-
-/**
- * i915_gpu_busy - indicate GPU business to IPS
- *
- * Tell the IPS driver whether or not the GPU is busy.
- */
-bool i915_gpu_busy(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = false;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev)
- goto out_unlock;
- dev_priv = i915_mch_dev;
-
- ret = dev_priv->busy;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_busy);
-
-/**
- * i915_gpu_turbo_disable - disable graphics turbo
- *
- * Disable graphics turbo by resetting the max frequency and setting the
- * current frequency to the default.
- */
-bool i915_gpu_turbo_disable(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- dev_priv->max_delay = dev_priv->fstart;
-
- if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
- ret = false;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
-
-/**
- * Tells the intel_ips driver that the i915 driver is now loaded, if
- * IPS got loaded first.
- *
- * This awkward dance is so that neither module has to depend on the
- * other in order for IPS to do the appropriate communication of
- * GPU turbo limits to i915.
- */
-static void
-ips_ping_for_i915_load(void)
-{
- void (*link)(void);
-
- link = symbol_get(ips_link_to_i915_driver);
- if (link) {
- link();
- symbol_put(ips_link_to_i915_driver);
+ /* Set up a WC MTRR for non-PAT systems. This is more common than
+ * one would think, because the kernel disables PAT on first
+ * generation Core chips because WC PAT gets overridden by a UC
+ * MTRR if present. Even if a UC MTRR isn't present.
+ */
+ dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1);
+ if (dev_priv->mm.gtt_mtrr < 0) {
+ DRM_INFO("MTRR allocation failed. Graphics "
+ "performance may suffer.\n");
}
}
@@ -1948,8 +1415,16 @@ ips_ping_for_i915_load(void)
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
+ struct intel_device_info *info;
int ret = 0, mmio_bar;
- uint32_t agp_size;
+ uint32_t aperture_size;
+
+ info = (struct intel_device_info *) flags;
+
+ /* Refuse to load on gen6+ without kms enabled. */
+ if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
/* i915 has 4 more counters */
dev->counters += 4;
@@ -1964,7 +1439,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
- dev_priv->info = (struct intel_device_info *) flags;
+ dev_priv->info = info;
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
@@ -2003,27 +1478,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_rmmap;
}
- agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev->agp->base, agp_size);
+ io_mapping_create_wc(dev->agp->base, aperture_size);
if (dev_priv->mm.gtt_mapping == NULL) {
ret = -EIO;
goto out_rmmap;
}
- /* Set up a WC MTRR for non-PAT systems. This is more common than
- * one would think, because the kernel disables PAT on first
- * generation Core chips because WC PAT gets overridden by a UC
- * MTRR if present. Even if a UC MTRR isn't present.
- */
- dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
- agp_size,
- MTRR_TYPE_WRCOMB, 1);
- if (dev_priv->mm.gtt_mtrr < 0) {
- DRM_INFO("MTRR allocation failed. Graphics "
- "performance may suffer.\n");
- }
+ i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size);
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
@@ -2047,9 +1511,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
- /* enable GEM by default */
- dev_priv->has_gem = 1;
-
intel_irq_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -2069,11 +1530,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gem_unload;
}
- if (IS_PINEVIEW(dev))
- i915_pineview_get_mem_freq(dev);
- else if (IS_GEN5(dev))
- i915_ironlake_get_mem_freq(dev);
-
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
@@ -2093,7 +1549,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
@@ -2117,6 +1573,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
}
+ i915_setup_sysfs(dev);
+
/* Must be done after probing outputs */
intel_opregion_init(dev);
acpi_video_register();
@@ -2124,14 +1582,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
- if (IS_GEN5(dev)) {
- spin_lock(&mchdev_lock);
- i915_mch_dev = dev_priv;
- dev_priv->mchdev_lock = &mchdev_lock;
- spin_unlock(&mchdev_lock);
-
- ips_ping_for_i915_load();
- }
+ if (IS_GEN5(dev))
+ intel_gpu_ips_init(dev_priv);
return 0;
@@ -2166,17 +1618,18 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- spin_lock(&mchdev_lock);
- i915_mch_dev = NULL;
- spin_unlock(&mchdev_lock);
+ intel_gpu_ips_teardown();
+
+ i915_teardown_sysfs(dev);
if (dev_priv->mm.inactive_shrinker.shrink)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
+ i915_gem_retire_requests(dev);
mutex_unlock(&dev->struct_mutex);
/* Cancel the retire work handler, which should be idle now. */
@@ -2228,8 +1681,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
- if (I915_HAS_FBC(dev) && i915_powersave)
- i915_cleanup_compression(dev);
+ i915_gem_cleanup_stolen(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
intel_cleanup_overlay(dev);
@@ -2277,7 +1729,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
* mode setting case, we want to restore the kernel's initial mode (just
* in case the last client left us in a bad state).
*
- * Additionally, in the non-mode setting case, we'll tear down the AGP
+ * Additionally, in the non-mode setting case, we'll tear down the GTT
* and DMA structures, since the kernel won't be using them, and clea
* up any GEM state.
*/
@@ -2322,7 +1774,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -2355,16 +1807,10 @@ struct drm_ioctl_desc i915_ioctls[] = {
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
-/**
- * Determine if the device really is AGP or not.
- *
- * All Intel graphics chipsets are treated as AGP, even if they are really
- * PCI-e.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * A value of 1 is always retured to indictate every i9x5 is AGP.
+/*
+ * This is really ugly: Because old userspace abused the linux agp interface to
+ * manage the gtt, we need to claim that all intel devices are agp. For
+ * otherwise the drm core refuses to initialize the agp support code.
*/
int i915_driver_device_is_agp(struct drm_device * dev)
{
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ae8a64f9f845..238a52165833 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -84,6 +84,12 @@ MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
+int i915_lvds_channel_mode __read_mostly;
+module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
+MODULE_PARM_DESC(lvds_channel_mode,
+ "Specify LVDS channel mode "
+ "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
int i915_panel_use_ssc __read_mostly = -1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
@@ -93,8 +99,8 @@ MODULE_PARM_DESC(lvds_use_ssc,
int i915_vbt_sdvo_panel_type __read_mostly = -1;
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
MODULE_PARM_DESC(vbt_sdvo_panel_type,
- "Override selection of SDVO panel mode in the VBT "
- "(default: auto)");
+ "Override/Ignore selection of SDVO panel mode in the VBT "
+ "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
static bool i915_try_reset __read_mostly = true;
module_param_named(reset, i915_try_reset, bool, 0600);
@@ -209,6 +215,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
@@ -216,6 +223,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
@@ -224,6 +232,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -233,6 +242,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_ivybridge_d_info = {
@@ -241,6 +251,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
@@ -250,6 +261,43 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
+};
+
+static const struct intel_device_info intel_valleyview_m_info = {
+ .gen = 7, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 0,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .is_valleyview = 1,
+};
+
+static const struct intel_device_info intel_valleyview_d_info = {
+ .gen = 7,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 0,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .is_valleyview = 1,
+};
+
+static const struct intel_device_info intel_haswell_d_info = {
+ .is_haswell = 1, .gen = 7,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_pch_split = 1,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+ .is_haswell = 1, .gen = 7, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_pch_split = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -297,6 +345,13 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
+ INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
+ INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
+ INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
+ INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
+ INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
{0, 0, 0}
};
@@ -308,6 +363,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
void intel_detect_pch(struct drm_device *dev)
{
@@ -328,20 +384,45 @@ void intel_detect_pch(struct drm_device *dev)
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
+ dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
+ dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
+ dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+ } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_LPT;
+ dev_priv->num_pch_pll = 0;
+ DRM_DEBUG_KMS("Found LynxPoint PCH\n");
}
+ BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
pci_dev_put(pch);
}
}
+bool i915_semaphore_is_enabled(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Enable semaphores on SNB when IO remapping is off */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return 1;
+}
+
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -366,7 +447,7 @@ void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
udelay(10);
- I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
POSTING_READ(FORCEWAKE_MT);
count = 0;
@@ -408,7 +489,7 @@ void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
@@ -446,6 +527,31 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
+void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+
+ /* Already awake? */
+ if ((I915_READ(0x130094) & 0xa1) == 0xa1)
+ return;
+
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
+ POSTING_READ(FORCEWAKE_VLV);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
+ udelay(10);
+}
+
+void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
+ /* FIXME: confirm VLV behavior with Punit folks */
+ POSTING_READ(FORCEWAKE_VLV);
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -525,15 +631,16 @@ static int i915_drm_thaw(struct drm_device *dev)
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_init_pch_refclk(dev);
+
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
- if (HAS_PCH_SPLIT(dev))
- ironlake_init_pch_refclk(dev);
-
+ intel_modeset_init_hw(dev);
drm_mode_config_reset(dev);
drm_irq_install(dev);
@@ -541,9 +648,6 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_lock(&dev->mode_config.mutex);
drm_helper_resume_force_mode(dev);
mutex_unlock(&dev->mode_config.mutex);
-
- if (IS_IRONLAKE_M(dev))
- ironlake_enable_rc6(dev);
}
intel_opregion_init(dev);
@@ -576,7 +680,7 @@ int i915_resume(struct drm_device *dev)
return 0;
}
-static int i8xx_do_reset(struct drm_device *dev, u8 flags)
+static int i8xx_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -610,11 +714,12 @@ static int i965_reset_complete(struct drm_device *dev)
{
u8 gdrst;
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- return gdrst & 0x1;
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
-static int i965_do_reset(struct drm_device *dev, u8 flags)
+static int i965_do_reset(struct drm_device *dev)
{
+ int ret;
u8 gdrst;
/*
@@ -623,20 +728,43 @@ static int i965_do_reset(struct drm_device *dev, u8 flags)
* triggers the reset; when done, the hardware will clear it.
*/
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ gdrst | GRDOM_RENDER |
+ GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ gdrst | GRDOM_MEDIA |
+ GRDOM_RESET_ENABLE);
return wait_for(i965_reset_complete(dev), 500);
}
-static int ironlake_do_reset(struct drm_device *dev, u8 flags)
+static int ironlake_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
+ u32 gdrst;
+ int ret;
+
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
-static int gen6_do_reset(struct drm_device *dev, u8 flags)
+static int gen6_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -671,10 +799,44 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
return ret;
}
+static int intel_gpu_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = -ENODEV;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ ret = gen6_do_reset(dev);
+ break;
+ case 5:
+ ret = ironlake_do_reset(dev);
+ break;
+ case 4:
+ ret = i965_do_reset(dev);
+ break;
+ case 2:
+ ret = i8xx_do_reset(dev);
+ break;
+ }
+
+ /* Also reset the gpu hangman. */
+ if (dev_priv->stop_rings) {
+ DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
+ dev_priv->stop_rings = 0;
+ if (ret == -ENODEV) {
+ DRM_ERROR("Reset not implemented, but ignoring "
+ "error for simulated gpu hangs\n");
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
- * @flags: reset domains
*
* Reset the chip. Useful if a hang is detected. Returns zero on successful
* reset or otherwise an error code.
@@ -687,14 +849,9 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
* - re-init interrupt state
* - re-init display
*/
-int i915_reset(struct drm_device *dev, u8 flags)
+int i915_reset(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- /*
- * We really should only reset the display subsystem if we actually
- * need to
- */
- bool need_display = true;
int ret;
if (!i915_try_reset)
@@ -703,26 +860,16 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (!mutex_trylock(&dev->struct_mutex))
return -EBUSY;
+ dev_priv->stop_rings = 0;
+
i915_gem_reset(dev);
ret = -ENODEV;
- if (get_seconds() - dev_priv->last_gpu_reset < 5) {
+ if (get_seconds() - dev_priv->last_gpu_reset < 5)
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
- } else switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- ret = gen6_do_reset(dev, flags);
- break;
- case 5:
- ret = ironlake_do_reset(dev, flags);
- break;
- case 4:
- ret = i965_do_reset(dev, flags);
- break;
- case 2:
- ret = i8xx_do_reset(dev, flags);
- break;
- }
+ else
+ ret = intel_gpu_reset(dev);
+
dev_priv->last_gpu_reset = get_seconds();
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
@@ -746,36 +893,27 @@ int i915_reset(struct drm_device *dev, u8 flags)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) {
+ struct intel_ring_buffer *ring;
+ int i;
+
dev_priv->mm.suspended = 0;
i915_gem_init_swizzling(dev);
- dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
- if (HAS_BSD(dev))
- dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
- if (HAS_BLT(dev))
- dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+ for_each_ring(ring, dev_priv, i)
+ ring->init(ring);
i915_gem_init_ppgtt(dev);
mutex_unlock(&dev->struct_mutex);
- drm_irq_uninstall(dev);
- drm_mode_config_reset(dev);
- drm_irq_install(dev);
- mutex_lock(&dev->struct_mutex);
- }
- mutex_unlock(&dev->struct_mutex);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_modeset_init_hw(dev);
- /*
- * Perform a full modeset as on later generations, e.g. Ironlake, we may
- * need to retrain the display link and cannot just restore the register
- * values.
- */
- if (need_display) {
- mutex_lock(&dev->mode_config.mutex);
- drm_helper_resume_force_mode(dev);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_irq_uninstall(dev);
+ drm_irq_install(dev);
+ } else {
+ mutex_unlock(&dev->struct_mutex);
}
return 0;
@@ -874,7 +1012,7 @@ static const struct dev_pm_ops i915_pm_ops = {
.restore = i915_pm_resume,
};
-static struct vm_operations_struct i915_gem_vm_ops = {
+static const struct vm_operations_struct i915_gem_vm_ops = {
.fault = i915_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -901,7 +1039,7 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
.load = i915_driver_load,
.unload = i915_driver_unload,
.open = i915_driver_open,
@@ -924,6 +1062,12 @@ static struct drm_driver driver = {
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = i915_gem_prime_export,
+ .gem_prime_import = i915_gem_prime_import,
+
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = i915_gem_dumb_destroy,
@@ -993,6 +1137,13 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+ (((dev_priv)->info->gen >= 6) && \
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE)) && \
+ (!IS_VALLEYVIEW((dev_priv)->dev))
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5fabc6c31fec..377c21f531e4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -38,6 +38,8 @@
#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
#include <linux/backlight.h>
+#include <linux/intel-iommu.h>
+#include <linux/kref.h>
/* General customization:
*/
@@ -63,10 +65,30 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
+enum port {
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ I915_MAX_PORTS
+};
+#define port_name(p) ((p) + 'A')
+
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+struct intel_pch_pll {
+ int refcount; /* count of number of CRTCs sharing this PLL */
+ int active; /* count of number of active CRTCs (i.e. DPMS on) */
+ bool on; /* is the PLL actually active? Disabled during modeset */
+ int pll_reg;
+ int fp0_reg;
+ int fp1_reg;
+};
+#define I915_NUM_PLLS 2
+
/* Interface history:
*
* 1.1: Original.
@@ -111,11 +133,11 @@ struct opregion_asle;
struct drm_i915_private;
struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
- struct opregion_swsci *swsci;
- struct opregion_asle *asle;
- void *vbt;
+ struct opregion_header __iomem *header;
+ struct opregion_acpi __iomem *acpi;
+ struct opregion_swsci __iomem *swsci;
+ struct opregion_asle __iomem *asle;
+ void __iomem *vbt;
u32 __iomem *lid_state;
};
#define OPREGION_SIZE (8*1024)
@@ -135,7 +157,6 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct list_head lru_list;
struct drm_i915_gem_object *obj;
- uint32_t setup_seqno;
int pin_count;
};
@@ -151,8 +172,11 @@ struct sdvo_device_mapping {
struct intel_display_error_state;
struct drm_i915_error_state {
+ struct kref ref;
u32 eir;
u32 pgtbl_er;
+ u32 ier;
+ bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS];
u32 head[I915_NUM_RINGS];
@@ -218,11 +242,15 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
+ void (*sanitize_pm)(struct drm_device *dev);
+ void (*update_linetime_wm)(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
+ void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
@@ -255,6 +283,9 @@ struct intel_device_info {
u8 is_broadwater:1;
u8 is_crestline:1;
u8 is_ivybridge:1;
+ u8 is_valleyview:1;
+ u8 has_pch_split:1;
+ u8 is_haswell:1;
u8 has_fbc:1;
u8 has_pipe_cxsr:1;
u8 has_hotplug:1;
@@ -291,10 +322,12 @@ enum no_fbc_reason {
enum intel_pch {
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
+ PCH_LPT, /* Lynxpoint PCH */
};
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
+#define QUIRK_INVERT_BRIGHTNESS (1<<2)
struct intel_fbdev;
struct intel_fbc_work;
@@ -302,7 +335,6 @@ struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
bool force_bit;
- bool has_gpio;
u32 reg0;
u32 gpio_reg;
struct i2c_algo_bit_data bit_algo;
@@ -314,7 +346,6 @@ typedef struct drm_i915_private {
const struct intel_device_info *info;
- int has_gem;
int relative_constants_mode;
void __iomem *regs;
@@ -326,19 +357,23 @@ typedef struct drm_i915_private {
/** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock;
- struct intel_gmbus *gmbus;
+ struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
+ /**
+ * Base address of the gmbus and gpio block.
+ */
+ uint32_t gpio_mmio_base;
+
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
uint32_t counter;
- drm_local_map_t hws_map;
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
@@ -354,6 +389,10 @@ typedef struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+
+ /* DPIO indirect register protection */
+ spinlock_t dpio_lock;
+
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
@@ -363,22 +402,20 @@ typedef struct drm_i915_private {
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
- int tex_lru_log_granularity;
- int allow_batchbuffer;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
- int vblank_pipe;
int num_pipe;
+ int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
- uint32_t last_acthd;
- uint32_t last_acthd_bsd;
- uint32_t last_acthd_blt;
+ uint32_t last_acthd[I915_NUM_RINGS];
uint32_t last_instdone;
uint32_t last_instdone1;
+ unsigned int stop_rings;
+
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
@@ -405,6 +442,8 @@ typedef struct drm_i915_private {
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
@@ -428,6 +467,7 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
+ /* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
@@ -652,24 +692,10 @@ typedef struct drm_i915_private {
*/
struct list_head inactive_list;
- /**
- * LRU list of objects which are not in the ringbuffer but
- * are still pinned in the GTT.
- */
- struct list_head pinned_list;
-
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
/**
- * List of objects currently pending being freed.
- *
- * These objects are no longer in use, but due to a signal
- * we were prevented from freeing them at the appointed time.
- */
- struct list_head deferred_free_list;
-
- /**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
@@ -717,6 +743,16 @@ typedef struct drm_i915_private {
size_t object_memory;
u32 object_count;
} mm;
+
+ /* Old dri1 support infrastructure, beware the dragons ya fools entering
+ * here! */
+ struct {
+ unsigned allow_batchbuffer : 1;
+ u32 __iomem *gfx_hws_cpu_addr;
+ } dri1;
+
+ /* Kernel Modesetting */
+
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
@@ -726,7 +762,8 @@ typedef struct drm_i915_private {
struct drm_crtc *plane_to_crtc_mapping[3];
struct drm_crtc *pipe_to_crtc_mapping[3];
wait_queue_head_t pending_flip_queue;
- bool flip_pending_is_done;
+
+ struct intel_pch_pll pch_plls[I915_NUM_PLLS];
/* Reclocking support */
bool render_reclock_avail;
@@ -781,6 +818,11 @@ typedef struct drm_i915_private {
struct drm_property *force_audio_property;
} drm_i915_private_t;
+/* Iterate over initialised rings */
+#define for_each_ring(ring__, dev_priv__, i__) \
+ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
+ if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
+
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
HDMI_AUDIO_OFF, /* force turn off HDMI audio */
@@ -844,7 +886,14 @@ struct drm_i915_gem_object {
* Current tiling mode for the object.
*/
unsigned int tiling_mode:2;
- unsigned int tiling_changed:1;
+ /**
+ * Whether the tiling parameters for the currently associated fence
+ * register have changed. Note that for the purposes of tracking
+ * tiling changes we also treat the unfenced register, the register
+ * slot that the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
+ */
+ unsigned int fence_dirty:1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -881,6 +930,7 @@ struct drm_i915_gem_object {
unsigned int cache_level:2;
unsigned int has_aliasing_ppgtt_mapping:1;
+ unsigned int has_global_gtt_mapping:1;
struct page **pages;
@@ -890,6 +940,8 @@ struct drm_i915_gem_object {
struct scatterlist *sg_list;
int num_sg;
+ /* prime dma-buf support */
+ struct sg_table *sg_table;
/**
* Used for performing relocations during execbuffer insertion.
*/
@@ -904,13 +956,12 @@ struct drm_i915_gem_object {
*/
uint32_t gtt_offset;
- /** Breadcrumb of last rendering to the buffer. */
- uint32_t last_rendering_seqno;
struct intel_ring_buffer *ring;
+ /** Breadcrumb of last rendering to the buffer. */
+ uint32_t last_rendering_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
- struct intel_ring_buffer *last_fenced_ring;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
@@ -918,13 +969,6 @@ struct drm_i915_gem_object {
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
-
- /**
- * If present, while GEM_DOMAIN_CPU is in the read domain this array
- * flags which individual pages are valid.
- */
- uint8_t *page_cpu_valid;
-
/** User space pin count and filp owning the pin */
uint32_t user_pin_count;
struct drm_file *pin_filp;
@@ -1001,6 +1045,8 @@ struct drm_i915_file_private {
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
+#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
/*
@@ -1044,10 +1090,11 @@ struct drm_i915_file_private {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
+#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -1081,6 +1128,7 @@ extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
+extern int i915_lvds_channel_mode __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
extern int i915_enable_rc6 __read_mostly;
@@ -1094,6 +1142,7 @@ extern int i915_master_create(struct drm_device *dev, struct drm_master *master)
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
/* i915_dma.c */
+void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
@@ -1104,12 +1153,14 @@ extern void i915_driver_preclose(struct drm_device *dev,
extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
+#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+#endif
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4);
-extern int i915_reset(struct drm_device *dev, u8 flags);
+extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -1119,19 +1170,10 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
-extern int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
extern void intel_irq_init(struct drm_device *dev);
-extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+void i915_error_state_free(struct kref *error_ref);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1205,8 +1247,12 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
+int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
+ gfp_t gfpmask);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
+int i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
u32 seqno);
@@ -1229,17 +1275,18 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined);
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-static inline void
+static inline bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
dev_priv->fence_regs[obj->fence_reg].pin_count++;
- }
+ return true;
+ } else
+ return false;
}
static inline void
@@ -1260,27 +1307,25 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-void i915_gem_do_init(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end);
-int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
+int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno,
- bool do_retire);
+ uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined);
@@ -1301,6 +1346,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj, int flags);
+
+
/* i915_gem_gtt.c */
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
@@ -1311,18 +1363,24 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+void i915_gem_init_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable);
-int __must_check i915_gem_evict_everything(struct drm_device *dev,
- bool purgeable_only);
-int __must_check i915_gem_evict_inactive(struct drm_device *dev,
- bool purgeable_only);
+int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
+
+/* i915_gem_stolen.c */
+int i915_gem_init_stolen(struct drm_device *dev);
+void i915_gem_cleanup_stolen(struct drm_device *dev);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1354,9 +1412,20 @@ extern int i915_restore_state(struct drm_device *dev);
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
+/* i915_sysfs.c */
+void i915_setup_sysfs(struct drm_device *dev_priv);
+void i915_teardown_sysfs(struct drm_device *dev_priv);
+
/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_device *dev);
extern void intel_teardown_gmbus(struct drm_device *dev);
+extern inline bool intel_gmbus_is_port_valid(unsigned port)
+{
+ return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
+}
+
+extern struct i2c_adapter *intel_gmbus_get_adapter(
+ struct drm_i915_private *dev_priv, unsigned port);
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -1391,6 +1460,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
#endif /* CONFIG_ACPI */
/* modesetting */
+extern void intel_modeset_init_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1403,12 +1473,17 @@ extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern int intel_enable_rc6(const struct drm_device *dev);
+extern bool i915_semaphore_is_enabled(struct drm_device *dev);
extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
+extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
+
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1420,28 +1495,6 @@ extern void intel_display_print_error_state(struct seq_file *m,
struct intel_display_error_state *error);
#endif
-#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
-
-#define BEGIN_LP_RING(n) \
- intel_ring_begin(LP_RING(dev_priv), (n))
-
-#define OUT_RING(x) \
- intel_ring_emit(LP_RING(dev_priv), x)
-
-#define ADVANCE_LP_RING() \
- intel_ring_advance(LP_RING(dev_priv))
-
-/**
- * Lock test for when it's just for synchronization of ring access.
- *
- * In that case, we don't need to do it when GEM is initialized as nobody else
- * has access to the ring.
- */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
- if (LP_RING(dev->dev_private)->obj == NULL) \
- LOCK_TEST_WITH_RETURN(dev, file); \
-} while (0)
-
/* On SNB platform, before reading ring registers forcewake bit
* must be set to prevent GT core from power down and stale values being
* returned.
@@ -1450,12 +1503,6 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- (((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
-
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0d1e4b7b4b99..c1e5c66553df 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,31 +35,41 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
+#include <linux/dma-buf.h>
static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
- bool write);
-static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
- uint64_t offset,
- uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
bool map_and_fenceable);
-static void i915_gem_clear_fence_reg(struct drm_device *dev,
- struct drm_i915_fence_reg *reg);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file);
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj);
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable);
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ /* As we do not have an associated fence register, we will force
+ * a tiling change if we ever need to acquire one.
+ */
+ obj->fence_dirty = false;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
@@ -122,26 +132,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return obj->gtt_space && !obj->active && obj->pin_count == 0;
-}
-
-void i915_gem_do_init(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
-
- dev_priv->mm.gtt_start = start;
- dev_priv->mm.gtt_mappable_end = mappable_end;
- dev_priv->mm.gtt_end = end;
- dev_priv->mm.gtt_total = end - start;
- dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
-
- /* Take over this portion of the GTT */
- intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+ return !obj->active;
}
int
@@ -150,12 +141,20 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_init *args = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
if (args->gtt_start >= args->gtt_end ||
(args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
return -EINVAL;
+ /* GEM with user mode setting was never supported on ilk and later. */
+ if (INTEL_INFO(dev)->gen >= 5)
+ return -ENODEV;
+
mutex_lock(&dev->struct_mutex);
- i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
+ i915_gem_init_global_gtt(dev, args->gtt_start,
+ args->gtt_end, args->gtt_end);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -170,13 +169,11 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
size_t pinned;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
- pinned += obj->gtt_space->size;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ if (obj->pin_count)
+ pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->mm.gtt_total;
@@ -247,6 +244,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_create *args = data;
+
return i915_gem_create(file, dev,
args->size, &args->handle);
}
@@ -259,66 +257,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
obj->tiling_mode != I915_TILING_NONE;
}
-/**
- * This is the fast shmem pread path, which attempts to copy_from_user directly
- * from the backing pages of the object to the user's address space. On a
- * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
- */
-static int
-i915_gem_shmem_pread_fast(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file)
-{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
- int page_offset, page_length;
-
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- remain = args->size;
-
- offset = args->offset;
-
- while (remain > 0) {
- struct page *page;
- char *vaddr;
- int ret;
-
- /* Operation in this page
- *
- * page_offset = offset within page
- * page_length = bytes to copy for this page
- */
- page_offset = offset_in_page(offset);
- page_length = remain;
- if ((page_offset + remain) > PAGE_SIZE)
- page_length = PAGE_SIZE - page_offset;
-
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page))
- return PTR_ERR(page);
-
- vaddr = kmap_atomic(page);
- ret = __copy_to_user_inatomic(user_data,
- vaddr + page_offset,
- page_length);
- kunmap_atomic(vaddr);
-
- mark_page_accessed(page);
- page_cache_release(page);
- if (ret)
- return -EFAULT;
-
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
-
- return 0;
-}
-
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
@@ -346,8 +284,8 @@ __copy_to_user_swizzled(char __user *cpu_vaddr,
}
static inline int
-__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
- const char *cpu_vaddr,
+__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
+ const char __user *cpu_vaddr,
int length)
{
int ret, cpu_offset = 0;
@@ -371,37 +309,121 @@ __copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
return 0;
}
-/**
- * This is the fallback shmem pread path, which allocates temporary storage
- * in kernel space to copy_to_user into outside of the struct_mutex, so we
- * can copy out of the object's backing pages while holding the struct mutex
- * and not take page faults.
- */
+/* Per-page copy function for the shmem pread fastpath.
+ * Flushes invalid cachelines before reading the target if
+ * needs_clflush is set. */
static int
-i915_gem_shmem_pread_slow(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file)
+shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
+{
+ char *vaddr;
+ int ret;
+
+ if (unlikely(page_do_bit17_swizzling))
+ return -EINVAL;
+
+ vaddr = kmap_atomic(page);
+ if (needs_clflush)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ ret = __copy_to_user_inatomic(user_data,
+ vaddr + shmem_page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
+
+ return ret;
+}
+
+static void
+shmem_clflush_swizzled_range(char *addr, unsigned long length,
+ bool swizzled)
+{
+ if (unlikely(swizzled)) {
+ unsigned long start = (unsigned long) addr;
+ unsigned long end = (unsigned long) addr + length;
+
+ /* For swizzling simply ensure that we always flush both
+ * channels. Lame, but simple and it works. Swizzled
+ * pwrite/pread is far from a hotpath - current userspace
+ * doesn't use it at all. */
+ start = round_down(start, 128);
+ end = round_up(end, 128);
+
+ drm_clflush_virt_range((void *)start, end - start);
+ } else {
+ drm_clflush_virt_range(addr, length);
+ }
+
+}
+
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
+static int
+shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
+{
+ char *vaddr;
+ int ret;
+
+ vaddr = kmap(page);
+ if (needs_clflush)
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+
+ if (page_do_bit17_swizzling)
+ ret = __copy_to_user_swizzled(user_data,
+ vaddr, shmem_page_offset,
+ page_length);
+ else
+ ret = __copy_to_user(user_data,
+ vaddr + shmem_page_offset,
+ page_length);
+ kunmap(page);
+
+ return ret;
+}
+
+static int
+i915_gem_shmem_pread(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pread *args,
+ struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
char __user *user_data;
ssize_t remain;
loff_t offset;
- int shmem_page_offset, page_length, ret;
+ int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ int hit_slowpath = 0;
+ int prefaulted = 0;
+ int needs_clflush = 0;
+ int release_page;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- offset = args->offset;
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+ /* If we're not in the cpu read domain, set ourself into the gtt
+ * read domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will dirty the data
+ * anyway again before the next pread happens. */
+ if (obj->cache_level == I915_CACHE_NONE)
+ needs_clflush = 1;
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ return ret;
+ }
- mutex_unlock(&dev->struct_mutex);
+ offset = args->offset;
while (remain > 0) {
struct page *page;
- char *vaddr;
/* Operation in this page
*
@@ -413,28 +435,51 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
+ if (obj->pages) {
+ page = obj->pages[offset >> PAGE_SHIFT];
+ release_page = 0;
+ } else {
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
+ release_page = 1;
}
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
- vaddr = kmap(page);
- if (page_do_bit17_swizzling)
- ret = __copy_to_user_swizzled(user_data,
- vaddr, shmem_page_offset,
- page_length);
- else
- ret = __copy_to_user(user_data,
- vaddr + shmem_page_offset,
- page_length);
- kunmap(page);
+ ret = shmem_pread_fast(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ needs_clflush);
+ if (ret == 0)
+ goto next_page;
- mark_page_accessed(page);
+ hit_slowpath = 1;
+ page_cache_get(page);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (!prefaulted) {
+ ret = fault_in_multipages_writeable(user_data, remain);
+ /* Userspace is tricking us, but we've already clobbered
+ * its pages with the prefault and promised to write the
+ * data up to the first fault. Hence ignore any errors
+ * and just continue. */
+ (void)ret;
+ prefaulted = 1;
+ }
+
+ ret = shmem_pread_slow(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ needs_clflush);
+
+ mutex_lock(&dev->struct_mutex);
page_cache_release(page);
+next_page:
+ mark_page_accessed(page);
+ if (release_page)
+ page_cache_release(page);
if (ret) {
ret = -EFAULT;
@@ -447,10 +492,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
}
out:
- mutex_lock(&dev->struct_mutex);
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
+ if (hit_slowpath) {
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
+ }
return ret;
}
@@ -476,11 +522,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
- args->size);
- if (ret)
- return -EFAULT;
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -498,19 +539,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
- trace_i915_gem_object_pread(obj, args->offset, args->size);
-
- ret = i915_gem_object_set_cpu_read_domain_range(obj,
- args->offset,
- args->size);
- if (ret)
+ /* prime objects have no backing filp to GEM pread/pwrite
+ * pages from.
+ */
+ if (!obj->base.filp) {
+ ret = -EINVAL;
goto out;
+ }
- ret = -EFAULT;
- if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
- if (ret == -EFAULT)
- ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
+ trace_i915_gem_object_pread(obj, args->offset, args->size);
+
+ ret = i915_gem_shmem_pread(dev, obj, args, file);
out:
drm_gem_object_unreference(&obj->base);
@@ -529,40 +568,19 @@ fast_user_write(struct io_mapping *mapping,
char __user *user_data,
int length)
{
- char *vaddr_atomic;
+ void __iomem *vaddr_atomic;
+ void *vaddr;
unsigned long unwritten;
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
- unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
+ /* We can use the cpu mem copy function because this is X86. */
+ vaddr = (void __force*)vaddr_atomic + page_offset;
+ unwritten = __copy_from_user_inatomic_nocache(vaddr,
user_data, length);
io_mapping_unmap_atomic(vaddr_atomic);
return unwritten;
}
-/* Here's the write path which can sleep for
- * page faults
- */
-
-static inline void
-slow_kernel_write(struct io_mapping *mapping,
- loff_t gtt_base, int gtt_offset,
- struct page *user_page, int user_offset,
- int length)
-{
- char __iomem *dst_vaddr;
- char *src_vaddr;
-
- dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
- src_vaddr = kmap(user_page);
-
- memcpy_toio(dst_vaddr + gtt_offset,
- src_vaddr + user_offset,
- length);
-
- kunmap(user_page);
- io_mapping_unmap(dst_vaddr);
-}
-
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
@@ -577,7 +595,19 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
- int page_offset, page_length;
+ int page_offset, page_length, ret;
+
+ ret = i915_gem_object_pin(obj, 0, true);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin;
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto out_unpin;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -602,214 +632,133 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* retry in the slow path.
*/
if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
- page_offset, user_data, page_length))
- return -EFAULT;
+ page_offset, user_data, page_length)) {
+ ret = -EFAULT;
+ goto out_unpin;
+ }
remain -= page_length;
user_data += page_length;
offset += page_length;
}
- return 0;
+out_unpin:
+ i915_gem_object_unpin(obj);
+out:
+ return ret;
}
-/**
- * This is the fallback GTT pwrite path, which uses get_user_pages to pin
- * the memory and maps it using kmap_atomic for copying.
- *
- * This code resulted in x11perf -rgb10text consuming about 10% more CPU
- * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
- */
+/* Per-page copy function for the shmem pwrite fastpath.
+ * Flushes invalid cachelines before writing to the target if
+ * needs_clflush_before is set and flushes out any written cachelines after
+ * writing if needs_clflush is set. */
static int
-i915_gem_gtt_pwrite_slow(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- ssize_t remain;
- loff_t gtt_page_base, offset;
- loff_t first_data_page, last_data_page, num_pages;
- loff_t pinned_pages, i;
- struct page **user_pages;
- struct mm_struct *mm = current->mm;
- int gtt_page_offset, data_page_offset, data_page_index, page_length;
+ char *vaddr;
int ret;
- uint64_t data_ptr = args->data_ptr;
-
- remain = args->size;
-
- /* Pin the user pages containing the data. We can't fault while
- * holding the struct mutex, and all of the pwrite implementations
- * want to hold it while dereferencing the user data.
- */
- first_data_page = data_ptr / PAGE_SIZE;
- last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- num_pages = last_data_page - first_data_page + 1;
-
- user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
-
- mutex_unlock(&dev->struct_mutex);
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 0, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
- goto out_unpin_pages;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- goto out_unpin_pages;
-
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto out_unpin_pages;
-
- offset = obj->gtt_offset + args->offset;
-
- while (remain > 0) {
- /* Operation in this page
- *
- * gtt_page_base = page offset within aperture
- * gtt_page_offset = offset within page in aperture
- * data_page_index = page number in get_user_pages return
- * data_page_offset = offset with data_page_index page.
- * page_length = bytes to copy for this page
- */
- gtt_page_base = offset & PAGE_MASK;
- gtt_page_offset = offset_in_page(offset);
- data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = offset_in_page(data_ptr);
-
- page_length = remain;
- if ((gtt_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - gtt_page_offset;
- if ((data_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - data_page_offset;
- slow_kernel_write(dev_priv->mm.gtt_mapping,
- gtt_page_base, gtt_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length);
-
- remain -= page_length;
- offset += page_length;
- data_ptr += page_length;
- }
+ if (unlikely(page_do_bit17_swizzling))
+ return -EINVAL;
-out_unpin_pages:
- for (i = 0; i < pinned_pages; i++)
- page_cache_release(user_pages[i]);
- drm_free_large(user_pages);
+ vaddr = kmap_atomic(page);
+ if (needs_clflush_before)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
+ user_data,
+ page_length);
+ if (needs_clflush_after)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
return ret;
}
-/**
- * This is the fast shmem pwrite path, which attempts to directly
- * copy_from_user into the kmapped pages backing the object.
- */
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
static int
-i915_gem_shmem_pwrite_fast(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
- int page_offset, page_length;
-
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- remain = args->size;
-
- offset = args->offset;
- obj->dirty = 1;
-
- while (remain > 0) {
- struct page *page;
- char *vaddr;
- int ret;
-
- /* Operation in this page
- *
- * page_offset = offset within page
- * page_length = bytes to copy for this page
- */
- page_offset = offset_in_page(offset);
- page_length = remain;
- if ((page_offset + remain) > PAGE_SIZE)
- page_length = PAGE_SIZE - page_offset;
-
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ char *vaddr;
+ int ret;
- vaddr = kmap_atomic(page);
- ret = __copy_from_user_inatomic(vaddr + page_offset,
+ vaddr = kmap(page);
+ if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+ if (page_do_bit17_swizzling)
+ ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
user_data,
page_length);
- kunmap_atomic(vaddr);
-
- set_page_dirty(page);
- mark_page_accessed(page);
- page_cache_release(page);
-
- /* If we get a fault while copying data, then (presumably) our
- * source page isn't available. Return the error and we'll
- * retry in the slow path.
- */
- if (ret)
- return -EFAULT;
-
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
+ else
+ ret = __copy_from_user(vaddr + shmem_page_offset,
+ user_data,
+ page_length);
+ if (needs_clflush_after)
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+ kunmap(page);
- return 0;
+ return ret;
}
-/**
- * This is the fallback shmem pwrite path, which uses get_user_pages to pin
- * the memory and maps it using kmap_atomic for copying.
- *
- * This avoids taking mmap_sem for faulting on the user's address while the
- * struct_mutex is held.
- */
static int
-i915_gem_shmem_pwrite_slow(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+i915_gem_shmem_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
- int shmem_page_offset, page_length, ret;
+ int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ int hit_slowpath = 0;
+ int needs_clflush_after = 0;
+ int needs_clflush_before = 0;
+ int release_page;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ /* If we're not in the cpu write domain, set ourself into the gtt
+ * write domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will use the data
+ * right away and we therefore have to clflush anyway. */
+ if (obj->cache_level == I915_CACHE_NONE)
+ needs_clflush_after = 1;
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+ }
+ /* Same trick applies for invalidate partially written cachelines before
+ * writing. */
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
+ && obj->cache_level == I915_CACHE_NONE)
+ needs_clflush_before = 1;
+
offset = args->offset;
obj->dirty = 1;
- mutex_unlock(&dev->struct_mutex);
-
while (remain > 0) {
struct page *page;
- char *vaddr;
+ int partial_cacheline_write;
/* Operation in this page
*
@@ -822,29 +771,51 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
+ /* If we don't overwrite a cacheline completely we need to be
+ * careful to have up-to-date data by first clflushing. Don't
+ * overcomplicate things and flush the entire patch. */
+ partial_cacheline_write = needs_clflush_before &&
+ ((shmem_page_offset | page_length)
+ & (boot_cpu_data.x86_clflush_size - 1));
+
+ if (obj->pages) {
+ page = obj->pages[offset >> PAGE_SHIFT];
+ release_page = 0;
+ } else {
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
+ release_page = 1;
}
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
- vaddr = kmap(page);
- if (page_do_bit17_swizzling)
- ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
- user_data,
- page_length);
- else
- ret = __copy_from_user(vaddr + shmem_page_offset,
- user_data,
- page_length);
- kunmap(page);
+ ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ partial_cacheline_write,
+ needs_clflush_after);
+ if (ret == 0)
+ goto next_page;
+
+ hit_slowpath = 1;
+ page_cache_get(page);
+ mutex_unlock(&dev->struct_mutex);
+
+ ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ partial_cacheline_write,
+ needs_clflush_after);
+ mutex_lock(&dev->struct_mutex);
+ page_cache_release(page);
+next_page:
set_page_dirty(page);
mark_page_accessed(page);
- page_cache_release(page);
+ if (release_page)
+ page_cache_release(page);
if (ret) {
ret = -EFAULT;
@@ -857,17 +828,21 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
}
out:
- mutex_lock(&dev->struct_mutex);
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
- /* and flush dirty cachelines in case the object isn't in the cpu write
- * domain anymore. */
- if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- intel_gtt_chipset_flush();
+ if (hit_slowpath) {
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
+ /* and flush dirty cachelines in case the object isn't in the cpu write
+ * domain anymore. */
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ i915_gem_clflush_object(obj);
+ intel_gtt_chipset_flush();
+ }
}
+ if (needs_clflush_after)
+ intel_gtt_chipset_flush();
+
return ret;
}
@@ -892,8 +867,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
- args->size);
+ ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
+ args->size);
if (ret)
return -EFAULT;
@@ -914,8 +889,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ /* prime objects have no backing filp to GEM pread/pwrite
+ * pages from.
+ */
+ if (!obj->base.filp) {
+ ret = -EINVAL;
+ goto out;
+ }
+
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+ ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
@@ -928,42 +912,18 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
if (obj->gtt_space &&
+ obj->cache_level == I915_CACHE_NONE &&
+ obj->tiling_mode == I915_TILING_NONE &&
+ obj->map_and_fenceable &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- ret = i915_gem_object_pin(obj, 0, true);
- if (ret)
- goto out;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- goto out_unpin;
-
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto out_unpin;
-
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
- if (ret == -EFAULT)
- ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
-
-out_unpin:
- i915_gem_object_unpin(obj);
-
- if (ret != -EFAULT)
- goto out;
- /* Fall through to the shmfs paths because the gtt paths might
- * fail with non-page-backed user pointers (e.g. gtt mappings
- * when moving data between textures). */
+ /* Note that the gtt paths might fail with non-page-backed user
+ * pointers (e.g. gtt mappings when moving data between
+ * textures). Fallback to the shmem path in that case. */
}
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret)
- goto out;
-
- ret = -EFAULT;
- if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
if (ret == -EFAULT)
- ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+ ret = i915_gem_shmem_pwrite(dev, obj, args, file);
out:
drm_gem_object_unreference(&obj->base);
@@ -986,9 +946,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
uint32_t write_domain = args->write_domain;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
/* Only handle setting domains to types used by the CPU. */
if (write_domain & I915_GEM_GPU_DOMAINS)
return -EINVAL;
@@ -1042,9 +999,6 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret = 0;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -1080,13 +1034,18 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
unsigned long addr;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
+ /* prime objects have no backing filp to GEM mmap
+ * pages from.
+ */
+ if (!obj->filp) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EINVAL;
+ }
+
addr = vm_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
@@ -1151,10 +1110,10 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
- if (obj->tiling_mode == I915_TILING_NONE)
- ret = i915_gem_object_put_fence(obj);
- else
- ret = i915_gem_object_get_fence(obj, NULL);
+ if (!obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+ ret = i915_gem_object_get_fence(obj);
if (ret)
goto unlock;
@@ -1308,9 +1267,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
struct drm_i915_gem_object *obj;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -1368,14 +1324,10 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_mmap_gtt *args = data;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
-
-static int
+int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask)
{
@@ -1384,6 +1336,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
struct inode *inode;
struct page *page;
+ if (obj->pages || obj->sg_table)
+ return 0;
+
/* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*/
@@ -1425,6 +1380,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
int page_count = obj->base.size / PAGE_SIZE;
int i;
+ if (!obj->pages)
+ return;
+
BUG_ON(obj->madv == __I915_MADV_PURGED);
if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -1473,7 +1431,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
if (obj->fenced_gpu_access) {
obj->last_fenced_seqno = seqno;
- obj->last_fenced_ring = ring;
/* Bump MRU to take account of the delayed flush */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
@@ -1512,15 +1469,11 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (obj->pin_count != 0)
- list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
- else
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
BUG_ON(!list_empty(&obj->gpu_write_list));
BUG_ON(!obj->active);
obj->ring = NULL;
- obj->last_fenced_ring = NULL;
i915_gem_object_move_off_active(obj);
obj->fenced_gpu_access = false;
@@ -1546,6 +1499,9 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
inode = obj->base.filp->f_path.dentry->d_inode;
shmem_truncate_range(inode, 0, (loff_t)-1);
+ if (obj->base.map_list.map)
+ drm_gem_free_mmap_offset(&obj->base);
+
obj->madv = __I915_MADV_PURGED;
}
@@ -1711,30 +1667,29 @@ static void i915_gem_reset_fences(struct drm_device *dev)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- struct drm_i915_gem_object *obj = reg->obj;
- if (!obj)
- continue;
+ i915_gem_write_fence(dev, i, NULL);
- if (obj->tiling_mode)
- i915_gem_release_mmap(obj);
+ if (reg->obj)
+ i915_gem_object_fence_lost(reg->obj);
- reg->obj->fence_reg = I915_FENCE_REG_NONE;
- reg->obj->fenced_gpu_access = false;
- reg->obj->last_fenced_seqno = 0;
- reg->obj->last_fenced_ring = NULL;
- i915_gem_clear_fence_reg(dev, reg);
+ reg->pin_count = 0;
+ reg->obj = NULL;
+ INIT_LIST_HEAD(&reg->lru_list);
}
+
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
}
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ struct intel_ring_buffer *ring;
int i;
- for (i = 0; i < I915_NUM_RINGS; i++)
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_reset_ring_lists(dev_priv, ring);
/* Remove anything from the flushing lists. The GPU cache is likely
* to be lost on reset along with the data, so simply move the
@@ -1839,24 +1794,11 @@ void
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int i;
- if (!list_empty(&dev_priv->mm.deferred_free_list)) {
- struct drm_i915_gem_object *obj, *next;
-
- /* We must be careful that during unbind() we do not
- * accidentally infinitely recurse into retire requests.
- * Currently:
- * retire -> free -> unbind -> wait -> retire_ring
- */
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.deferred_free_list,
- mm_list)
- i915_gem_free_object_tail(obj);
- }
-
- for (i = 0; i < I915_NUM_RINGS; i++)
- i915_gem_retire_requests_ring(&dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_retire_requests_ring(ring);
}
static void
@@ -1864,6 +1806,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
struct drm_device *dev;
+ struct intel_ring_buffer *ring;
bool idle;
int i;
@@ -1883,9 +1826,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
* objects indefinitely.
*/
idle = true;
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
-
+ for_each_ring(ring, dev_priv, i) {
if (!list_empty(&ring->gpu_write_list)) {
struct drm_i915_gem_request *request;
int ret;
@@ -1907,20 +1848,10 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno,
- bool do_retire)
+static int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
- u32 ier;
- int ret = 0;
-
- BUG_ON(seqno == 0);
+ BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (atomic_read(&dev_priv->mm.wedged)) {
struct completion *x = &dev_priv->error_completion;
@@ -1935,6 +1866,20 @@ i915_wait_request(struct intel_ring_buffer *ring,
return recovery_complete ? -EIO : -EAGAIN;
}
+ return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+ int ret = 0;
+
+ BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
if (seqno == ring->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
@@ -1948,54 +1893,67 @@ i915_wait_request(struct intel_ring_buffer *ring,
return ret;
}
- seqno = request->seqno;
+ BUG_ON(seqno != request->seqno);
}
- if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
- if (HAS_PCH_SPLIT(ring->dev))
- ier = I915_READ(DEIER) | I915_READ(GTIER);
- else
- ier = I915_READ(IER);
- if (!ier) {
- DRM_ERROR("something (likely vbetool) disabled "
- "interrupts, re-enabling\n");
- ring->dev->driver->irq_preinstall(ring->dev);
- ring->dev->driver->irq_postinstall(ring->dev);
- }
+ return ret;
+}
- trace_i915_gem_request_wait_begin(ring, seqno);
-
- ring->waiting_seqno = seqno;
- if (ring->irq_get(ring)) {
- if (dev_priv->mm.interruptible)
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- else
- wait_event(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
-
- ring->irq_put(ring);
- } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000))
- ret = -EBUSY;
- ring->waiting_seqno = 0;
-
- trace_i915_gem_request_wait_end(ring, seqno);
- }
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+ bool interruptible)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ int ret = 0;
+
+ if (i915_seqno_passed(ring->get_seqno(ring), seqno))
+ return 0;
+
+ trace_i915_gem_request_wait_begin(ring, seqno);
+ if (WARN_ON(!ring->irq_get(ring)))
+ return -ENODEV;
+
+#define EXIT_COND \
+ (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
+ atomic_read(&dev_priv->mm.wedged))
+
+ if (interruptible)
+ ret = wait_event_interruptible(ring->irq_queue,
+ EXIT_COND);
+ else
+ wait_event(ring->irq_queue, EXIT_COND);
+
+ ring->irq_put(ring);
+ trace_i915_gem_request_wait_end(ring, seqno);
+#undef EXIT_COND
+
+ return ret;
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_request(struct intel_ring_buffer *ring,
+ uint32_t seqno)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ int ret = 0;
+
+ BUG_ON(seqno == 0);
+
+ ret = i915_gem_check_wedge(dev_priv);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
- /* Directly dispatch request retiring. While we have the work queue
- * to handle this, the waiter on a request often wants an associated
- * buffer to have made it to the inactive list, and we would need
- * a separate wait queue to handle that.
- */
- if (ret == 0 && do_retire)
- i915_gem_retire_requests_ring(ring);
-
return ret;
}
@@ -2017,15 +1975,58 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
- true);
+ ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
+ i915_gem_retire_requests_ring(obj->ring);
}
return 0;
}
+/**
+ * i915_gem_object_sync - sync an object to a ring.
+ *
+ * @obj: object which may be in use on another ring.
+ * @to: ring we wish to use the object on. May be NULL.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Calling with NULL implies synchronizing the object with the CPU
+ * rather than a particular GPU ring.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to)
+{
+ struct intel_ring_buffer *from = obj->ring;
+ u32 seqno;
+ int ret, idx;
+
+ if (from == NULL || to == from)
+ return 0;
+
+ if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
+ return i915_gem_object_wait_rendering(obj);
+
+ idx = intel_ring_sync_index(from, to);
+
+ seqno = obj->last_rendering_seqno;
+ if (seqno <= from->sync_seqno[idx])
+ return 0;
+
+ ret = i915_gem_check_olr(obj->ring, seqno);
+ if (ret)
+ return ret;
+
+ ret = to->sync_to(to, from, seqno);
+ if (!ret)
+ from->sync_seqno[idx] = seqno;
+
+ return ret;
+}
+
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
{
u32 old_write_domain, old_read_domains;
@@ -2068,7 +2069,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
}
ret = i915_gem_object_finish_gpu(obj);
- if (ret == -ERESTARTSYS)
+ if (ret)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
* should be safe and we need to cleanup or else we might
@@ -2095,16 +2096,18 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
/* release the fence reg _after_ flushing */
ret = i915_gem_object_put_fence(obj);
- if (ret == -ERESTARTSYS)
+ if (ret)
return ret;
trace_i915_gem_object_unbind(obj);
- i915_gem_gtt_unbind_object(obj);
+ if (obj->has_global_gtt_mapping)
+ i915_gem_gtt_unbind_object(obj);
if (obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
obj->has_aliasing_ppgtt_mapping = 0;
}
+ i915_gem_gtt_finish_object(obj);
i915_gem_object_put_pages_gtt(obj);
@@ -2145,7 +2148,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
return 0;
}
-static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
{
int ret;
@@ -2159,208 +2162,201 @@ static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
return ret;
}
- return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
- do_retire);
+ return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
-int i915_gpu_idle(struct drm_device *dev, bool do_retire)
+int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret, i;
/* Flush everything onto the inactive list. */
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
+ for_each_ring(ring, dev_priv, i) {
+ ret = i915_ring_idle(ring);
if (ret)
return ret;
+
+ /* Is the device fubar? */
+ if (WARN_ON(!list_empty(&ring->gpu_write_list)))
+ return -EBUSY;
}
return 0;
}
-static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
- 0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
- val |= (uint64_t)((obj->stride / 128) - 1) <<
- SANDYBRIDGE_FENCE_PITCH_SHIFT;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) <<
+ SANDYBRIDGE_FENCE_PITCH_SHIFT;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
- intel_ring_emit(pipelined, (u32)val);
- intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
- intel_ring_emit(pipelined, (u32)(val >> 32));
- intel_ring_advance(pipelined);
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
} else
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
+ val = 0;
- return 0;
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
+ POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
}
-static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
- 0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
- val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
- intel_ring_emit(pipelined, (u32)val);
- intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
- intel_ring_emit(pipelined, (u32)(val >> 32));
- intel_ring_advance(pipelined);
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
} else
- I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
+ val = 0;
- return 0;
+ I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
+ POSTING_READ(FENCE_REG_965_0 + reg * 8);
}
-static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- u32 fence_reg, val, pitch_val;
- int tile_width;
-
- if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
- (size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
- obj->gtt_offset, obj->map_and_fenceable, size))
- return -EINVAL;
+ u32 val;
- if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
- tile_width = 128;
- else
- tile_width = 512;
-
- /* Note: pitch better be a power of two tile widths */
- pitch_val = obj->stride / tile_width;
- pitch_val = ffs(pitch_val) - 1;
-
- val = obj->gtt_offset;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I915_FENCE_SIZE_BITS(size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
-
- fence_reg = obj->fence_reg;
- if (fence_reg < 8)
- fence_reg = FENCE_REG_830_0 + fence_reg * 4;
- else
- fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
+ int pitch_val;
+ int tile_width;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 4);
- if (ret)
- return ret;
+ WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ obj->gtt_offset, obj->map_and_fenceable, size);
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(pipelined, fence_reg);
- intel_ring_emit(pipelined, val);
- intel_ring_advance(pipelined);
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* Note: pitch better be a power of two tile widths */
+ pitch_val = obj->stride / tile_width;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I915_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
} else
- I915_WRITE(fence_reg, val);
+ val = 0;
- return 0;
+ if (reg < 8)
+ reg = FENCE_REG_830_0 + reg * 4;
+ else
+ reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
-static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- int regnum = obj->fence_reg;
uint32_t val;
- uint32_t pitch_val;
- if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
- (size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
- obj->gtt_offset, size))
- return -EINVAL;
-
- pitch_val = obj->stride / 128;
- pitch_val = ffs(pitch_val) - 1;
-
- val = obj->gtt_offset;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I830_FENCE_SIZE_BITS(size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
+ uint32_t pitch_val;
+
+ WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+ obj->gtt_offset, size);
+
+ pitch_val = obj->stride / 128;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I830_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 4);
- if (ret)
- return ret;
+ I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+ POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
- intel_ring_emit(pipelined, val);
- intel_ring_advance(pipelined);
- } else
- I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
+ case 5:
+ case 4: i965_write_fence_reg(dev, reg, obj); break;
+ case 3: i915_write_fence_reg(dev, reg, obj); break;
+ case 2: i830_write_fence_reg(dev, reg, obj); break;
+ default: break;
+ }
+}
- return 0;
+static inline int fence_number(struct drm_i915_private *dev_priv,
+ struct drm_i915_fence_reg *fence)
+{
+ return fence - dev_priv->fence_regs;
}
-static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable)
{
- return i915_seqno_passed(ring->get_seqno(ring), seqno);
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int reg = fence_number(dev_priv, fence);
+
+ i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+ if (enable) {
+ obj->fence_reg = reg;
+ fence->obj = obj;
+ list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+ } else {
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ fence->obj = NULL;
+ list_del_init(&fence->lru_list);
+ }
}
static int
-i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
{
int ret;
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->last_fenced_ring,
+ ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
if (ret)
return ret;
@@ -2369,18 +2365,12 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
obj->fenced_gpu_access = false;
}
- if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
- if (!ring_passed_seqno(obj->last_fenced_ring,
- obj->last_fenced_seqno)) {
- ret = i915_wait_request(obj->last_fenced_ring,
- obj->last_fenced_seqno,
- true);
- if (ret)
- return ret;
- }
+ if (obj->last_fenced_seqno) {
+ ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
+ if (ret)
+ return ret;
obj->last_fenced_seqno = 0;
- obj->last_fenced_ring = NULL;
}
/* Ensure that all CPU reads are completed before installing a fence
@@ -2395,34 +2385,29 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- if (obj->tiling_mode)
- i915_gem_release_mmap(obj);
-
- ret = i915_gem_object_flush_fence(obj, NULL);
+ ret = i915_gem_object_flush_fence(obj);
if (ret)
return ret;
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
- WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
- i915_gem_clear_fence_reg(obj->base.dev,
- &dev_priv->fence_regs[obj->fence_reg]);
+ if (obj->fence_reg == I915_FENCE_REG_NONE)
+ return 0;
- obj->fence_reg = I915_FENCE_REG_NONE;
- }
+ i915_gem_object_update_fence(obj,
+ &dev_priv->fence_regs[obj->fence_reg],
+ false);
+ i915_gem_object_fence_lost(obj);
return 0;
}
static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev,
- struct intel_ring_buffer *pipelined)
+i915_find_fence_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_fence_reg *reg, *first, *avail;
+ struct drm_i915_fence_reg *reg, *avail;
int i;
/* First try to find a free reg */
@@ -2440,204 +2425,77 @@ i915_find_fence_reg(struct drm_device *dev,
return NULL;
/* None available, try to steal one or wait for a user to finish */
- avail = first = NULL;
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
if (reg->pin_count)
continue;
- if (first == NULL)
- first = reg;
-
- if (!pipelined ||
- !reg->obj->last_fenced_ring ||
- reg->obj->last_fenced_ring == pipelined) {
- avail = reg;
- break;
- }
+ return reg;
}
- if (avail == NULL)
- avail = first;
-
- return avail;
+ return NULL;
}
/**
- * i915_gem_object_get_fence - set up a fence reg for an object
+ * i915_gem_object_get_fence - set up fencing for an object
* @obj: object to map through a fence reg
- * @pipelined: ring on which to queue the change, or NULL for CPU access
- * @interruptible: must we wait uninterruptibly for the register to retire?
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
- *
* This function walks the fence regs looking for a free one for @obj,
* stealing one if it can't find any.
*
* It then sets up the reg based on the object's properties: address, pitch
* and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
*/
int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ bool enable = obj->tiling_mode != I915_TILING_NONE;
struct drm_i915_fence_reg *reg;
int ret;
- /* XXX disable pipelining. There are bugs. Shocking. */
- pipelined = NULL;
+ /* Have we updated the tiling parameters upon the object and so
+ * will need to serialise the write to the associated fence register?
+ */
+ if (obj->fence_dirty) {
+ ret = i915_gem_object_flush_fence(obj);
+ if (ret)
+ return ret;
+ }
/* Just update our place in the LRU if our fence is getting reused. */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
reg = &dev_priv->fence_regs[obj->fence_reg];
- list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
-
- if (obj->tiling_changed) {
- ret = i915_gem_object_flush_fence(obj, pipelined);
- if (ret)
- return ret;
-
- if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
- pipelined = NULL;
-
- if (pipelined) {
- reg->setup_seqno =
- i915_gem_next_request_seqno(pipelined);
- obj->last_fenced_seqno = reg->setup_seqno;
- obj->last_fenced_ring = pipelined;
- }
-
- goto update;
+ if (!obj->fence_dirty) {
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ return 0;
}
+ } else if (enable) {
+ reg = i915_find_fence_reg(dev);
+ if (reg == NULL)
+ return -EDEADLK;
- if (!pipelined) {
- if (reg->setup_seqno) {
- if (!ring_passed_seqno(obj->last_fenced_ring,
- reg->setup_seqno)) {
- ret = i915_wait_request(obj->last_fenced_ring,
- reg->setup_seqno,
- true);
- if (ret)
- return ret;
- }
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
- reg->setup_seqno = 0;
- }
- } else if (obj->last_fenced_ring &&
- obj->last_fenced_ring != pipelined) {
- ret = i915_gem_object_flush_fence(obj, pipelined);
+ ret = i915_gem_object_flush_fence(old);
if (ret)
return ret;
- }
-
- return 0;
- }
-
- reg = i915_find_fence_reg(dev, pipelined);
- if (reg == NULL)
- return -EDEADLK;
-
- ret = i915_gem_object_flush_fence(obj, pipelined);
- if (ret)
- return ret;
-
- if (reg->obj) {
- struct drm_i915_gem_object *old = reg->obj;
-
- drm_gem_object_reference(&old->base);
-
- if (old->tiling_mode)
- i915_gem_release_mmap(old);
- ret = i915_gem_object_flush_fence(old, pipelined);
- if (ret) {
- drm_gem_object_unreference(&old->base);
- return ret;
+ i915_gem_object_fence_lost(old);
}
+ } else
+ return 0;
- if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
- pipelined = NULL;
-
- old->fence_reg = I915_FENCE_REG_NONE;
- old->last_fenced_ring = pipelined;
- old->last_fenced_seqno =
- pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
-
- drm_gem_object_unreference(&old->base);
- } else if (obj->last_fenced_seqno == 0)
- pipelined = NULL;
-
- reg->obj = obj;
- list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
- obj->fence_reg = reg - dev_priv->fence_regs;
- obj->last_fenced_ring = pipelined;
-
- reg->setup_seqno =
- pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
- obj->last_fenced_seqno = reg->setup_seqno;
-
-update:
- obj->tiling_changed = false;
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- ret = sandybridge_write_fence_reg(obj, pipelined);
- break;
- case 5:
- case 4:
- ret = i965_write_fence_reg(obj, pipelined);
- break;
- case 3:
- ret = i915_write_fence_reg(obj, pipelined);
- break;
- case 2:
- ret = i830_write_fence_reg(obj, pipelined);
- break;
- }
-
- return ret;
-}
-
-/**
- * i915_gem_clear_fence_reg - clear out fence register info
- * @obj: object to clear
- *
- * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj.
- */
-static void
-i915_gem_clear_fence_reg(struct drm_device *dev,
- struct drm_i915_fence_reg *reg)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t fence_reg = reg - dev_priv->fence_regs;
-
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
- break;
- case 5:
- case 4:
- I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
- break;
- case 3:
- if (fence_reg >= 8)
- fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
- else
- case 2:
- fence_reg = FENCE_REG_830_0 + fence_reg * 4;
-
- I915_WRITE(fence_reg, 0);
- break;
- }
+ i915_gem_object_update_fence(obj, reg, enable);
+ obj->fence_dirty = false;
- list_del_init(&reg->lru_list);
- reg->obj = NULL;
- reg->setup_seqno = 0;
- reg->pin_count = 0;
+ return 0;
}
/**
@@ -2749,7 +2607,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return ret;
}
- ret = i915_gem_gtt_bind_object(obj);
+ ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
i915_gem_object_put_pages_gtt(obj);
drm_mm_put_block(obj->gtt_space);
@@ -2761,6 +2619,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
goto search_free;
}
+ if (!dev_priv->mm.aliasing_ppgtt)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
@@ -2878,6 +2739,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -2918,6 +2780,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
old_read_domains,
old_write_domain);
+ /* And bump the LRU for this access */
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
return 0;
}
@@ -2953,7 +2819,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return ret;
}
- i915_gem_gtt_rebind_object(obj, cache_level);
+ if (obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, cache_level);
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
@@ -2990,11 +2857,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
* any flushes to be pipelined (for pageflips).
- *
- * For the display plane, we want to be in the GTT but out of any write
- * domains. So in many ways this looks like set_to_gtt_domain() apart from the
- * ability to pipeline the waits, pinning and any additional subtleties
- * that may differentiate the display plane from ordinary buffers.
*/
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
@@ -3009,8 +2871,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
return ret;
if (pipelined != obj->ring) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret == -ERESTARTSYS)
+ ret = i915_gem_object_sync(obj, pipelined);
+ if (ret)
return ret;
}
@@ -3082,7 +2944,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
-static int
+int
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
@@ -3095,17 +2957,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
+ if (write || obj->pending_gpu_write) {
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret)
+ return ret;
+ }
i915_gem_object_flush_gtt_write_domain(obj);
- /* If we have a partially-valid cache of the object in the CPU,
- * finish invalidating it and free the per-page flags.
- */
- i915_gem_object_set_to_full_cpu_read_domain(obj);
-
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3136,113 +2995,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
-/**
- * Moves the object from a partially CPU read to a full one.
- *
- * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
- * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
- */
-static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
-{
- if (!obj->page_cpu_valid)
- return;
-
- /* If we're partially in the CPU read domain, finish moving it in.
- */
- if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
- int i;
-
- for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
- if (obj->page_cpu_valid[i])
- continue;
- drm_clflush_pages(obj->pages + i, 1);
- }
- }
-
- /* Free the page_cpu_valid mappings which are now stale, whether
- * or not we've got I915_GEM_DOMAIN_CPU.
- */
- kfree(obj->page_cpu_valid);
- obj->page_cpu_valid = NULL;
-}
-
-/**
- * Set the CPU read domain on a range of the object.
- *
- * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
- * not entirely valid. The page_cpu_valid member of the object flags which
- * pages have been flushed, and will be respected by
- * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
- * of the whole object.
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
- uint64_t offset, uint64_t size)
-{
- uint32_t old_read_domains;
- int i, ret;
-
- if (offset == 0 && size == obj->base.size)
- return i915_gem_object_set_to_cpu_domain(obj, 0);
-
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
-
- i915_gem_object_flush_gtt_write_domain(obj);
-
- /* If we're already fully in the CPU read domain, we're done. */
- if (obj->page_cpu_valid == NULL &&
- (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
- return 0;
-
- /* Otherwise, create/clear the per-page CPU read domain flag if we're
- * newly adding I915_GEM_DOMAIN_CPU
- */
- if (obj->page_cpu_valid == NULL) {
- obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
- GFP_KERNEL);
- if (obj->page_cpu_valid == NULL)
- return -ENOMEM;
- } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
- memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
-
- /* Flush the cache on any pages that are still invalid from the CPU's
- * perspective.
- */
- for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
- i++) {
- if (obj->page_cpu_valid[i])
- continue;
-
- drm_clflush_pages(obj->pages + i, 1);
-
- obj->page_cpu_valid[i] = 1;
- }
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
-
- old_read_domains = obj->base.read_domains;
- obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- obj->base.write_domain);
-
- return 0;
-}
-
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
@@ -3280,28 +3032,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (seqno == 0)
return 0;
- ret = 0;
- if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
- /* And wait for the seqno passing without holding any locks and
- * causing extra latency for others. This is safe as the irq
- * generation is designed to be run atomically and so is
- * lockless.
- */
- if (ring->irq_get(ring)) {
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- ring->irq_put(ring);
-
- if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
- } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000)) {
- ret = -EBUSY;
- }
- }
-
+ ret = __wait_seqno(ring, seqno, true);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -3313,12 +3044,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
- WARN_ON(i915_verify_lists(dev));
if (obj->gtt_space != NULL) {
if ((alignment && obj->gtt_offset & (alignment - 1)) ||
@@ -3343,34 +3071,23 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
return ret;
}
- if (obj->pin_count++ == 0) {
- if (!obj->active)
- list_move_tail(&obj->mm_list,
- &dev_priv->mm.pinned_list);
- }
+ if (!obj->has_global_gtt_mapping && map_and_fenceable)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+ obj->pin_count++;
obj->pin_mappable |= map_and_fenceable;
- WARN_ON(i915_verify_lists(dev));
return 0;
}
void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- WARN_ON(i915_verify_lists(dev));
BUG_ON(obj->pin_count == 0);
BUG_ON(obj->gtt_space == NULL);
- if (--obj->pin_count == 0) {
- if (!obj->active)
- list_move_tail(&obj->mm_list,
- &dev_priv->mm.inactive_list);
+ if (--obj->pin_count == 0)
obj->pin_mappable = false;
- }
- WARN_ON(i915_verify_lists(dev));
}
int
@@ -3494,20 +3211,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
- } else if (obj->ring->outstanding_lazy_request ==
- obj->last_rendering_seqno) {
- struct drm_i915_gem_request *request;
-
- /* This ring is not being cleared by active usage,
- * so emit a request to do so.
- */
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request) {
- ret = i915_add_request(obj->ring, NULL, request);
- if (ret)
- kfree(request);
- } else
- ret = -ENOMEM;
+ } else {
+ ret = i915_gem_check_olr(obj->ring,
+ obj->last_rendering_seqno);
}
/* Update the active list for the hardware's current position.
@@ -3643,46 +3349,42 @@ int i915_gem_init_object(struct drm_gem_object *obj)
return 0;
}
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
-
- ret = i915_gem_object_unbind(obj);
- if (ret == -ERESTARTSYS) {
- list_move(&obj->mm_list,
- &dev_priv->mm.deferred_free_list);
- return;
- }
trace_i915_gem_object_destroy(obj);
+ if (gem_obj->import_attach)
+ drm_prime_gem_destroy(gem_obj, obj->sg_table);
+
+ if (obj->phys_obj)
+ i915_gem_detach_phys_object(dev, obj);
+
+ obj->pin_count = 0;
+ if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
+ bool was_interruptible;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ WARN_ON(i915_gem_object_unbind(obj));
+
+ dev_priv->mm.interruptible = was_interruptible;
+ }
+
if (obj->base.map_list.map)
drm_gem_free_mmap_offset(&obj->base);
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
- kfree(obj->page_cpu_valid);
kfree(obj->bit_17);
kfree(obj);
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- struct drm_device *dev = obj->base.dev;
-
- while (obj->pin_count > 0)
- i915_gem_object_unpin(obj);
-
- if (obj->phys_obj)
- i915_gem_detach_phys_object(dev, obj);
-
- i915_gem_free_object_tail(obj);
-}
-
int
i915_gem_idle(struct drm_device *dev)
{
@@ -3696,20 +3398,16 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
+ i915_gem_retire_requests(dev);
/* Under UMS, be paranoid and evict. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_inactive(dev, false);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_gem_evict_everything(dev, false);
i915_gem_reset_fences(dev);
@@ -3747,9 +3445,9 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN6(dev))
- I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else
- I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
}
void i915_gem_init_ppgtt(struct drm_device *dev)
@@ -3787,21 +3485,27 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
pd_offset <<= 16;
if (INTEL_INFO(dev)->gen == 6) {
- uint32_t ecochk = I915_READ(GAM_ECOCHK);
+ uint32_t ecochk, gab_ctl, ecobits;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+ gab_ctl = I915_READ(GAB_CTL);
+ I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ring = &dev_priv->ring[i];
-
+ for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
- GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
@@ -3845,14 +3549,80 @@ cleanup_render_ring:
return ret;
}
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+ if (i915_enable_ppgtt >= 0)
+ return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return true;
+}
+
+int i915_gem_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long gtt_size, mappable_size;
+ int ret;
+
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+ mutex_lock(&dev->struct_mutex);
+ if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+ /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+ * aperture accordingly when using aliasing ppgtt. */
+ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+
+ i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
+
+ ret = i915_gem_init_aliasing_ppgtt(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ } else {
+ /* Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch
+ * page. There are a number of places where the hardware
+ * apparently prefetches past the end of the object, and we've
+ * seen multiple hangs with the GPU head pointer stuck in a
+ * batchbuffer bound at the last page of the aperture. One page
+ * should be enough to keep any prefetching inside of the
+ * aperture.
+ */
+ i915_gem_init_global_gtt(dev, 0, mappable_size,
+ gtt_size);
+ }
+
+ ret = i915_gem_init_hw(dev);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ return ret;
+ }
+
+ /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->dri1.allow_batchbuffer = 1;
+ return 0;
+}
+
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int i;
- for (i = 0; i < I915_NUM_RINGS; i++)
- intel_cleanup_ring_buffer(&dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ intel_cleanup_ring_buffer(ring);
}
int
@@ -3860,7 +3630,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret, i;
+ int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
@@ -3882,10 +3652,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- for (i = 0; i < I915_NUM_RINGS; i++) {
- BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
- BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
- }
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -3944,9 +3710,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
- INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
@@ -3958,12 +3722,8 @@ i915_gem_load(struct drm_device *dev)
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
- u32 tmp = I915_READ(MI_ARB_STATE);
- if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
- /* arb state is a masked write, so set bit + bit in mask */
- tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
- I915_WRITE(MI_ARB_STATE, tmp);
- }
+ I915_WRITE(MI_ARB_STATE,
+ _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
}
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
@@ -3978,9 +3738,7 @@ i915_gem_load(struct drm_device *dev)
dev_priv->num_fence_regs = 8;
/* Initialize fence registers to zero */
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
- }
+ i915_gem_reset_fences(dev);
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4268,7 +4026,7 @@ rescan:
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
- if (i915_gpu_idle(dev, true) == 0)
+ if (i915_gpu_idle(dev) == 0)
goto rescan;
}
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index cc93cac242d6..a4f6aaabca99 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -114,22 +114,6 @@ i915_verify_lists(struct drm_device *dev)
}
}
- list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
- if (obj->base.dev != dev ||
- !atomic_read(&obj->base.refcount.refcount)) {
- DRM_ERROR("freed pinned %p\n", obj);
- err++;
- break;
- } else if (!obj->pin_count || obj->active ||
- (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
- DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
- obj,
- obj->pin_count, obj->active,
- obj->base.write_domain);
- err++;
- }
- }
-
return warned = err;
}
#endif /* WATCH_INACTIVE */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
new file mode 100644
index 000000000000..8e269178d6a5
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2012 Red Hat Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "i915_drv.h"
+#include <linux/dma-buf.h>
+
+static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int npages = obj->base.size / PAGE_SIZE;
+ struct sg_table *sg = NULL;
+ int ret;
+ int nents;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!obj->pages) {
+ ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
+ if (ret)
+ goto out;
+ }
+
+ /* link the pages into an SG then map the sg */
+ sg = drm_prime_pages_to_sg(obj->pages, npages);
+ nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return sg;
+}
+
+static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+
+ if (obj->base.export_dma_buf == dma_buf) {
+ /* drop the reference on the export fd holds */
+ obj->base.export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(&obj->base);
+ }
+}
+
+static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static const struct dma_buf_ops i915_dmabuf_ops = {
+ .map_dma_buf = i915_gem_map_dma_buf,
+ .unmap_dma_buf = i915_gem_unmap_dma_buf,
+ .release = i915_gem_dmabuf_release,
+ .kmap = i915_gem_dmabuf_kmap,
+ .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
+ .kunmap = i915_gem_dmabuf_kunmap,
+ .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+};
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj, int flags)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+
+ return dma_buf_export(obj, &i915_dmabuf_ops,
+ obj->base.size, 0600);
+}
+
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct drm_i915_gem_object *obj;
+ int npages;
+ int size;
+ int ret;
+
+ /* is this one of own objects? */
+ if (dma_buf->ops == &i915_dmabuf_ops) {
+ obj = dma_buf->priv;
+ /* is it from our device? */
+ if (obj->base.dev == dev) {
+ drm_gem_object_reference(&obj->base);
+ return &obj->base;
+ }
+ }
+
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ size = dma_buf->size;
+ npages = size / PAGE_SIZE;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL) {
+ ret = -ENOMEM;
+ goto fail_unmap;
+ }
+
+ ret = drm_gem_private_object_init(dev, &obj->base, size);
+ if (ret) {
+ kfree(obj);
+ goto fail_unmap;
+ }
+
+ obj->sg_table = sg;
+ obj->base.import_attach = attach;
+
+ return &obj->base;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 21a82710f4b2..ae7c24e12e52 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -35,6 +35,9 @@
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
+ if (obj->pin_count)
+ return false;
+
list_add(&obj->exec_list, unwind);
return drm_mm_scan_add_block(obj->gtt_space);
}
@@ -90,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
- if (obj->base.write_domain || obj->pin_count)
+ if (obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
@@ -99,14 +102,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Finally add anything with a pending flush (in order of retirement) */
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
- if (obj->pin_count)
- continue;
-
if (mark_free(obj, &unwind_list))
goto found;
}
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (!obj->base.write_domain || obj->pin_count)
+ if (!obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
@@ -166,8 +166,9 @@ int
i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ struct drm_i915_gem_object *obj, *next;
bool lists_empty;
+ int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
@@ -177,29 +178,24 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
trace_i915_gem_evict_everything(dev, purgeable_only);
- /* Flush everything (on to the inactive lists) and evict */
- ret = i915_gpu_idle(dev, true);
+ /* The gpu_idle will flush everything in the write domain to the
+ * active list. Then we must move everything off the active list
+ * with retire requests.
+ */
+ ret = i915_gpu_idle(dev);
if (ret)
return ret;
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ i915_gem_retire_requests(dev);
- return i915_gem_evict_inactive(dev, purgeable_only);
-}
-
-/** Unbinds all inactive objects. */
-int
-i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj, *next;
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ /* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, mm_list) {
if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
- int ret = i915_gem_object_unbind(obj);
- if (ret)
- return ret;
+ if (obj->pin_count == 0)
+ WARN_ON(i915_gem_object_unbind(obj));
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index de431942ded4..974a9f1068a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -266,6 +266,12 @@ eb_destroy(struct eb_objects *eb)
kfree(eb);
}
+static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
+{
+ return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+ obj->cache_level != I915_CACHE_NONE);
+}
+
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
@@ -273,6 +279,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
+ struct drm_i915_gem_object *target_i915_obj;
uint32_t target_offset;
int ret = -EINVAL;
@@ -281,7 +288,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (unlikely(target_obj == NULL))
return -ENOENT;
- target_offset = to_intel_bo(target_obj)->gtt_offset;
+ target_i915_obj = to_intel_bo(target_obj);
+ target_offset = target_i915_obj->gtt_offset;
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
@@ -352,11 +360,19 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
}
+ /* We can't wait for rendering with pagefaults disabled */
+ if (obj->active && in_atomic())
+ return -EFAULT;
+
reloc->delta += target_offset;
- if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ if (use_cpu_reloc(obj)) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
char *vaddr;
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret)
+ return ret;
+
vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
@@ -365,11 +381,11 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
- /* We can't wait for rendering with pagefaults disabled */
- if (obj->active && in_atomic())
- return -EFAULT;
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
@@ -383,6 +399,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
io_mapping_unmap_atomic(reloc_page);
}
+ /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+ * pipe_control writes because the gpu doesn't properly redirect them
+ * through the ppgtt for non_secure batchbuffers. */
+ if (unlikely(IS_GEN6(dev) &&
+ reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+ !target_i915_obj->has_global_gtt_mapping)) {
+ i915_gem_gtt_bind_object(target_i915_obj,
+ target_i915_obj->cache_level);
+ }
+
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
@@ -393,30 +419,46 @@ static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
{
+#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
+ struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
- int i, ret;
+ int remain, ret;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry reloc;
- if (__copy_from_user_inatomic(&reloc,
- user_relocs+i,
- sizeof(reloc)))
+ remain = entry->relocation_count;
+ while (remain) {
+ struct drm_i915_gem_relocation_entry *r = stack_reloc;
+ int count = remain;
+ if (count > ARRAY_SIZE(stack_reloc))
+ count = ARRAY_SIZE(stack_reloc);
+ remain -= count;
+
+ if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
return -EFAULT;
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
- if (ret)
- return ret;
+ do {
+ u64 offset = r->presumed_offset;
- if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
- &reloc.presumed_offset,
- sizeof(reloc.presumed_offset)))
- return -EFAULT;
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
+ if (ret)
+ return ret;
+
+ if (r->presumed_offset != offset &&
+ __copy_to_user_inatomic(&user_relocs->presumed_offset,
+ &r->presumed_offset,
+ sizeof(r->presumed_offset))) {
+ return -EFAULT;
+ }
+
+ user_relocs++;
+ r++;
+ } while (--count);
}
return 0;
+#undef N_RELOC
}
static int
@@ -465,6 +507,13 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
#define __EXEC_OBJECT_HAS_FENCE (1<<31)
static int
+need_reloc_mappable(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ return entry->relocation_count && !use_cpu_reloc(obj);
+}
+
+static int
pin_and_fence_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
@@ -477,8 +526,7 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable =
- entry->relocation_count ? true : need_fence;
+ need_mappable = need_fence || need_reloc_mappable(obj);
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
if (ret)
@@ -486,18 +534,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- if (obj->tiling_mode) {
- ret = i915_gem_object_get_fence(obj, ring);
- if (ret)
- goto err_unpin;
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ goto err_unpin;
+ if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
- i915_gem_object_pin_fence(obj);
- } else {
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto err_unpin;
- }
+
obj->pending_fenced_gpu_access = true;
}
}
@@ -535,8 +578,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable =
- entry->relocation_count ? true : need_fence;
+ need_mappable = need_fence || need_reloc_mappable(obj);
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
@@ -576,8 +618,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable =
- entry->relocation_count ? true : need_fence;
+ need_mappable = need_fence || need_reloc_mappable(obj);
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
@@ -798,64 +839,6 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
return 0;
}
-static bool
-intel_enable_semaphores(struct drm_device *dev)
-{
- if (INTEL_INFO(dev)->gen < 6)
- return 0;
-
- if (i915_semaphores >= 0)
- return i915_semaphores;
-
- /* Disable semaphores on SNB */
- if (INTEL_INFO(dev)->gen == 6)
- return 0;
-
- return 1;
-}
-
-static int
-i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *to)
-{
- struct intel_ring_buffer *from = obj->ring;
- u32 seqno;
- int ret, idx;
-
- if (from == NULL || to == from)
- return 0;
-
- /* XXX gpu semaphores are implicated in various hard hangs on SNB */
- if (!intel_enable_semaphores(obj->base.dev))
- return i915_gem_object_wait_rendering(obj);
-
- idx = intel_ring_sync_index(from, to);
-
- seqno = obj->last_rendering_seqno;
- if (seqno <= from->sync_seqno[idx])
- return 0;
-
- if (seqno == from->outstanding_lazy_request) {
- struct drm_i915_gem_request *request;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
- ret = i915_add_request(from, NULL, request);
- if (ret) {
- kfree(request);
- return ret;
- }
-
- seqno = request->seqno;
- }
-
- from->sync_seqno[idx] = seqno;
-
- return to->sync_to(to, from, seqno - 1);
-}
-
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
@@ -917,7 +900,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
}
list_for_each_entry(obj, objects, exec_list) {
- ret = i915_gem_execbuffer_sync_rings(obj, ring);
+ ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
}
@@ -955,7 +938,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
- if (fault_in_pages_readable(ptr, length))
+ if (fault_in_multipages_readable(ptr, length))
return -EFAULT;
}
@@ -984,11 +967,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->pending_gpu_write = true;
list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list);
- intel_mark_busy(ring->dev, obj);
+ if (obj->pin_count) /* check for potential scanout */
+ intel_mark_busy(ring->dev, obj);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
+
+ intel_mark_busy(ring->dev, NULL);
}
static void
@@ -1078,17 +1064,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ring = &dev_priv->ring[RCS];
break;
case I915_EXEC_BSD:
- if (!HAS_BSD(dev)) {
- DRM_DEBUG("execbuf with invalid ring (BSD)\n");
- return -EINVAL;
- }
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
- if (!HAS_BLT(dev)) {
- DRM_DEBUG("execbuf with invalid ring (BLT)\n");
- return -EINVAL;
- }
ring = &dev_priv->ring[BCS];
break;
default:
@@ -1096,6 +1074,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
+ if (!intel_ring_initialized(ring)) {
+ DRM_DEBUG("execbuf with invalid ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
+ }
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK;
@@ -1133,11 +1116,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (INTEL_INFO(dev)->gen >= 5) {
+ DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+ return -EINVAL;
+ }
+
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
return -EINVAL;
}
+
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
@@ -1242,9 +1231,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret)
goto err;
+ i915_gem_retire_requests(dev);
BUG_ON(ring->sync_seqno[i]);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a135c61f4119..9fd25a435536 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -96,11 +96,10 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
- }
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
- if (dev_priv->mm.gtt->needs_dmar) {
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
0, 4096,
PCI_DMA_BIDIRECTIONAL);
@@ -112,8 +111,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
}
ppgtt->pt_dma_addr[i] = pt_addr;
- } else
- pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+ }
}
ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
@@ -269,7 +267,13 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
BUG();
}
- if (dev_priv->mm.gtt->needs_dmar) {
+ if (obj->sg_table) {
+ i915_ppgtt_insert_sg_entries(ppgtt,
+ obj->sg_table->sgl,
+ obj->sg_table->nents,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ pte_flags);
+ } else if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
i915_ppgtt_insert_sg_entries(ppgtt,
@@ -319,7 +323,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
- if (i915_gpu_idle(dev_priv->dev, false)) {
+ if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
@@ -346,48 +350,39 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
i915_gem_clflush_object(obj);
- i915_gem_gtt_rebind_object(obj, obj->cache_level);
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
}
intel_gtt_chipset_flush();
}
-int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
- int ret;
-
- if (dev_priv->mm.gtt->needs_dmar) {
- ret = intel_gtt_map_memory(obj->pages,
- obj->base.size >> PAGE_SHIFT,
- &obj->sg_list,
- &obj->num_sg);
- if (ret != 0)
- return ret;
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
- return 0;
+ if (dev_priv->mm.gtt->needs_dmar)
+ return intel_gtt_map_memory(obj->pages,
+ obj->base.size >> PAGE_SHIFT,
+ &obj->sg_list,
+ &obj->num_sg);
+ else
+ return 0;
}
-void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
- if (dev_priv->mm.gtt->needs_dmar) {
+ if (obj->sg_table) {
+ intel_gtt_insert_sg_entries(obj->sg_table->sgl,
+ obj->sg_table->nents,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
+ } else if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
intel_gtt_insert_sg_entries(obj->sg_list,
@@ -399,19 +394,26 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
obj->base.size >> PAGE_SHIFT,
obj->pages,
agp_type);
+
+ obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
+ intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+
+ obj->has_global_gtt_mapping = 0;
+}
+
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible;
interruptible = do_idling(dev_priv);
- intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
-
if (obj->sg_list) {
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
@@ -419,3 +421,23 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
undo_idling(dev_priv, interruptible);
}
+
+void i915_gem_init_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Substract the guard page ... */
+ drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+
+ dev_priv->mm.gtt_start = start;
+ dev_priv->mm.gtt_mappable_end = mappable_end;
+ dev_priv->mm.gtt_end = end;
+ dev_priv->mm.gtt_total = end - start;
+ dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+
+ /* ... but ensure that we clear the entire range. */
+ intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
new file mode 100644
index 000000000000..ada2e90a2a60
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright © 2008-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/*
+ * The BIOS typically reserves some of the system's memory for the exclusive
+ * use of the integrated graphics. This memory is no longer available for
+ * use by the OS and so the user finds that his system has less memory
+ * available than he put in. We refer to this memory as stolen.
+ *
+ * The BIOS will allocate its framebuffer from the stolen memory. Our
+ * goal is try to reuse that object for our own fbcon which must always
+ * be available for panics. Anything else we can reuse the stolen memory
+ * for is a boon.
+ */
+
+#define PTE_ADDRESS_MASK 0xfffff000
+#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
+#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
+#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
+#define PTE_MAPPING_TYPE_CACHED (3 << 1)
+#define PTE_MAPPING_TYPE_MASK (3 << 1)
+#define PTE_VALID (1 << 0)
+
+/**
+ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+ * a physical one
+ * @dev: drm device
+ * @offset: address to translate
+ *
+ * Some chip functions require allocations from stolen space and need the
+ * physical address of the memory in question.
+ */
+static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
+#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+ * is unreliable, so compute the base by subtracting the stolen memory
+ * from the Top of Low Usable DRAM which is where the BIOS places
+ * the graphics stolen memory.
+ */
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* top 32bits are reserved = 0 */
+ pci_read_config_dword(pdev, 0xA4, &base);
+ } else {
+ /* XXX presume 8xx is the same as i915 */
+ pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
+ }
+#else
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ u16 val;
+ pci_read_config_word(pdev, 0xb0, &val);
+ base = val >> 4 << 20;
+ } else {
+ u8 val;
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
+ }
+ base -= dev_priv->mm.gtt->stolen_size;
+#endif
+
+ return base + offset;
+}
+
+static void i915_warn_stolen(struct drm_device *dev)
+{
+ DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
+ DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+}
+
+static void i915_setup_compression(struct drm_device *dev, int size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+ unsigned long cfb_base;
+ unsigned long ll_base = 0;
+
+ /* Just in case the BIOS is doing something questionable. */
+ intel_disable_fbc(dev);
+
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ if (compressed_fb)
+ compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+ if (!compressed_fb)
+ goto err;
+
+ cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+ if (!cfb_base)
+ goto err_fb;
+
+ if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+ compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
+ 4096, 4096, 0);
+ if (compressed_llb)
+ compressed_llb = drm_mm_get_block(compressed_llb,
+ 4096, 4096);
+ if (!compressed_llb)
+ goto err_fb;
+
+ ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+ if (!ll_base)
+ goto err_llb;
+ }
+
+ dev_priv->cfb_size = size;
+
+ dev_priv->compressed_fb = compressed_fb;
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ else if (IS_GM45(dev)) {
+ I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ } else {
+ I915_WRITE(FBC_CFB_BASE, cfb_base);
+ I915_WRITE(FBC_LL_BASE, ll_base);
+ dev_priv->compressed_llb = compressed_llb;
+ }
+
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+ cfb_base, ll_base, size >> 20);
+ return;
+
+err_llb:
+ drm_mm_put_block(compressed_llb);
+err_fb:
+ drm_mm_put_block(compressed_fb);
+err:
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ i915_warn_stolen(dev);
+}
+
+static void i915_cleanup_compression(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ drm_mm_put_block(dev_priv->compressed_fb);
+ if (dev_priv->compressed_llb)
+ drm_mm_put_block(dev_priv->compressed_llb);
+}
+
+void i915_gem_cleanup_stolen(struct drm_device *dev)
+{
+ if (I915_HAS_FBC(dev) && i915_powersave)
+ i915_cleanup_compression(dev);
+}
+
+int i915_gem_init_stolen(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
+
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+ /* Try to set up FBC with a reasonable compressed buffer size */
+ if (I915_HAS_FBC(dev) && i915_powersave) {
+ int cfb_size;
+
+ /* Leave 1M for line length buffer & misc. */
+
+ /* Try to get a 32M buffer... */
+ if (prealloc_size > (36*1024*1024))
+ cfb_size = 32*1024*1024;
+ else /* fall back to 7/8 of the stolen space */
+ cfb_size = prealloc_size * 7 / 8;
+ i915_setup_compression(dev, cfb_size);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 1a9306665987..b964df51cec7 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -354,9 +354,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
- * need to ensure that any fence register is cleared.
+ * need to ensure that any fence register is updated before
+ * the next fenced (either through the GTT or by the BLT unit
+ * on older GPUs) access.
+ *
+ * After updating the tiling parameters, we then flag whether
+ * we need to update an associated fence register. Note this
+ * has to also include the unfenced register the GPU uses
+ * whilst executing a fenced command for an untiled object.
*/
- i915_gem_release_mmap(obj);
obj->map_and_fenceable =
obj->gtt_space == NULL ||
@@ -374,9 +380,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
if (ret == 0) {
- obj->tiling_changed = true;
+ obj->fence_dirty =
+ obj->fenced_gpu_access ||
+ obj->fence_reg != I915_FENCE_REG_NONE;
+
obj->tiling_mode = args->tiling_mode;
obj->stride = args->stride;
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_release_mmap(obj);
}
}
/* we have to maintain this existing ABI... */
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 13b028994b2b..0e72abb9f701 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -34,6 +34,7 @@
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
+#include "i915_drv.h"
typedef struct _drm_i915_batchbuffer32 {
int start; /* agp offset */
@@ -181,7 +182,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
(unsigned long)request);
}
-drm_ioctl_compat_t *i915_compat_ioctls[] = {
+static drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
[DRM_I915_GETPARAM] = compat_i915_getparam,
@@ -189,6 +190,7 @@ drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_ALLOC] = compat_i915_alloc
};
+#ifdef CONFIG_COMPAT
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
@@ -217,3 +219,4 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return ret;
}
+#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index afd4e03e337e..cc4a63307611 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sysrq.h>
#include <linux/slab.h>
#include "drmP.h"
@@ -35,35 +37,6 @@
#include "i915_trace.h"
#include "intel_drv.h"
-#define MAX_NOPID ((u32)~0)
-
-/**
- * Interrupts that are always left unmasked.
- *
- * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
- * we leave them always unmasked in IMR and then control enabling them through
- * PIPESTAT alone.
- */
-#define I915_INTERRUPT_ENABLE_FIX \
- (I915_ASLE_INTERRUPT | \
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-
-/** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
-
-#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
- PIPE_VBLANK_INTERRUPT_STATUS)
-
-#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
- PIPE_VBLANK_INTERRUPT_ENABLE)
-
-#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
- DRM_I915_VBLANK_PIPE_B)
-
/* For display hotplug interrupt */
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -118,6 +91,10 @@ void intel_enable_asle(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
+ /* FIXME: opregion/asle for VLV */
+ if (IS_VALLEYVIEW(dev))
+ return;
+
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
@@ -354,15 +331,12 @@ static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 seqno;
if (ring->obj == NULL)
return;
- seqno = ring->get_seqno(ring);
- trace_i915_gem_request_complete(ring, seqno);
+ trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
- ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0;
@@ -424,13 +398,145 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_unlock(&dev_priv->dev->struct_mutex);
}
-static void pch_irq_handler(struct drm_device *dev)
+static void snb_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+
+ if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
+ GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & GEN6_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+ if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[BCS]);
+
+ if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+ GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+ GT_RENDER_CS_ERROR_INTERRUPT)) {
+ DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
+ i915_handle_error(dev, false);
+ }
+}
+
+static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
+ u32 pm_iir)
+{
+ unsigned long flags;
+
+ /*
+ * IIR bits should never already be set because IMR should
+ * prevent an interrupt from being shown in IIR. The warning
+ * displays a case where we've unsafely cleared
+ * dev_priv->pm_iir. Although missing an interrupt of the same
+ * type is not a problem, it displays a problem in the logic.
+ *
+ * The mask bit in IMR is cleared by rps_work.
+ */
+
+ spin_lock_irqsave(&dev_priv->rps_lock, flags);
+ WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
+ dev_priv->pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+ POSTING_READ(GEN6_PMIMR);
+ spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+
+ queue_work(dev_priv->wq, &dev_priv->rps_work);
+}
+
+static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
{
+ struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 pch_iir;
+ u32 iir, gt_iir, pm_iir;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long irqflags;
int pipe;
+ u32 pipe_stats[I915_MAX_PIPES];
+ u32 vblank_status;
+ int vblank = 0;
+ bool blc_event;
- pch_iir = I915_READ(SDEIIR);
+ atomic_inc(&dev_priv->irq_received);
+
+ vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
+ PIPE_VBLANK_INTERRUPT_STATUS;
+
+ while (true) {
+ iir = I915_READ(VLV_IIR);
+ gt_iir = I915_READ(GTIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
+
+ if (gt_iir == 0 && pm_iir == 0 && iir == 0)
+ goto out;
+
+ ret = IRQ_HANDLED;
+
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ I915_READ(PORT_HOTPLUG_STAT);
+ }
+
+
+ if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
+ drm_handle_vblank(dev, 0);
+ vblank++;
+ intel_finish_page_flip(dev, 0);
+ }
+
+ if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
+ drm_handle_vblank(dev, 1);
+ vblank++;
+ intel_finish_page_flip(dev, 0);
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+
+ if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ I915_WRITE(VLV_IIR, iir);
+ }
+
+out:
+ return ret;
+}
+
+static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
@@ -471,91 +577,77 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
- struct drm_i915_master_private *master_priv;
+ u32 de_iir, gt_iir, de_ier, pm_iir;
+ irqreturn_t ret = IRQ_NONE;
+ int i;
atomic_inc(&dev_priv->irq_received);
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- POSTING_READ(DEIER);
- de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
- pch_iir = I915_READ(SDEIIR);
- pm_iir = I915_READ(GEN6_PMIIR);
-
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
- goto done;
-
- ret = IRQ_HANDLED;
-
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
+ if (gt_iir) {
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ I915_WRITE(GTIIR, gt_iir);
+ ret = IRQ_HANDLED;
}
- if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
- if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[BCS]);
-
- if (de_iir & DE_GSE_IVB)
- intel_opregion_gse_intr(dev);
-
- if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
- intel_prepare_page_flip(dev, 0);
- intel_finish_page_flip_plane(dev, 0);
- }
+ de_iir = I915_READ(DEIIR);
+ if (de_iir) {
+ if (de_iir & DE_GSE_IVB)
+ intel_opregion_gse_intr(dev);
+
+ for (i = 0; i < 3; i++) {
+ if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+ intel_prepare_page_flip(dev, i);
+ intel_finish_page_flip_plane(dev, i);
+ }
+ if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+ drm_handle_vblank(dev, i);
+ }
- if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
- intel_prepare_page_flip(dev, 1);
- intel_finish_page_flip_plane(dev, 1);
- }
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT_IVB) {
+ u32 pch_iir = I915_READ(SDEIIR);
- if (de_iir & DE_PIPEA_VBLANK_IVB)
- drm_handle_vblank(dev, 0);
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+ pch_irq_handler(dev, pch_iir);
- if (de_iir & DE_PIPEB_VBLANK_IVB)
- drm_handle_vblank(dev, 1);
+ /* clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
- /* check event from PCH */
- if (de_iir & DE_PCH_EVENT_IVB) {
- if (pch_iir & SDE_HOTPLUG_MASK_CPT)
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- pch_irq_handler(dev);
+ I915_WRITE(DEIIR, de_iir);
+ ret = IRQ_HANDLED;
}
- if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->rps_lock, flags);
- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
- dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
- POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
- queue_work(dev_priv->wq, &dev_priv->rps_work);
+ pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir) {
+ if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ ret = IRQ_HANDLED;
}
- /* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- I915_WRITE(GTIIR, gt_iir);
- I915_WRITE(DEIIR, de_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
-
-done:
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
return ret;
}
+static void ilk_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+ if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+}
+
static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -563,14 +655,9 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
- struct drm_i915_master_private *master_priv;
- u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
atomic_inc(&dev_priv->irq_received);
- if (IS_GEN6(dev))
- bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
-
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -592,19 +679,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
ret = IRQ_HANDLED;
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
-
- if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (gt_iir & bsd_usr_interrupt)
- notify_ring(dev, &dev_priv->ring[VCS]);
- if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[BCS]);
+ if (IS_GEN5(dev))
+ ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+ else
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
@@ -629,7 +707,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_PCH_EVENT) {
if (pch_iir & hotplug_mask)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- pch_irq_handler(dev);
+ pch_irq_handler(dev, pch_iir);
}
if (de_iir & DE_PCU_EVENT) {
@@ -637,25 +715,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
i915_handle_rps_change(dev);
}
- if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
- /*
- * IIR bits should never already be set because IMR should
- * prevent an interrupt from being shown in IIR. The warning
- * displays a case where we've unsafely cleared
- * dev_priv->pm_iir. Although missing an interrupt of the same
- * type is not a problem, it displays a problem in the logic.
- *
- * The mask bit in IMR is cleared by rps_work.
- */
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->rps_lock, flags);
- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
- dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
- POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
- queue_work(dev_priv->wq, &dev_priv->rps_work);
- }
+ if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
@@ -691,7 +752,7 @@ static void i915_error_work_func(struct work_struct *work)
if (atomic_read(&dev_priv->mm.wedged)) {
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
- if (!i915_reset(dev, GRDOM_RENDER)) {
+ if (!i915_reset(dev)) {
atomic_set(&dev_priv->mm.wedged, 0);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
}
@@ -727,7 +788,8 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
local_irq_save(flags);
- if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
+ if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
+ src->has_global_gtt_mapping) {
void __iomem *s;
/* Simply ignore tiling or any overlapping fence.
@@ -782,10 +844,11 @@ i915_error_object_free(struct drm_i915_error_object *obj)
kfree(obj);
}
-static void
-i915_error_state_free(struct drm_device *dev,
- struct drm_i915_error_state *error)
+void
+i915_error_state_free(struct kref *error_ref)
{
+ struct drm_i915_error_state *error = container_of(error_ref,
+ typeof(*error), ref);
int i;
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
@@ -798,37 +861,56 @@ i915_error_state_free(struct drm_device *dev,
kfree(error->overlay);
kfree(error);
}
+static void capture_bo(struct drm_i915_error_buffer *err,
+ struct drm_i915_gem_object *obj)
+{
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->seqno = obj->last_rendering_seqno;
+ err->gtt_offset = obj->gtt_offset;
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : -1;
+ err->cache_level = obj->cache_level;
+}
-static u32 capture_bo_list(struct drm_i915_error_buffer *err,
- int count,
- struct list_head *head)
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
{
struct drm_i915_gem_object *obj;
int i = 0;
list_for_each_entry(obj, head, mm_list) {
- err->size = obj->base.size;
- err->name = obj->base.name;
- err->seqno = obj->last_rendering_seqno;
- err->gtt_offset = obj->gtt_offset;
- err->read_domains = obj->base.read_domains;
- err->write_domain = obj->base.write_domain;
- err->fence_reg = obj->fence_reg;
- err->pinned = 0;
- if (obj->pin_count > 0)
- err->pinned = 1;
- if (obj->user_pin_count > 0)
- err->pinned = -1;
- err->tiling = obj->tiling_mode;
- err->dirty = obj->dirty;
- err->purgeable = obj->madv != I915_MADV_WILLNEED;
- err->ring = obj->ring ? obj->ring->id : -1;
- err->cache_level = obj->cache_level;
-
+ capture_bo(err++, obj);
if (++i == count)
break;
+ }
- err++;
+ return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, gtt_list) {
+ if (obj->pin_count == 0)
+ continue;
+
+ capture_bo(err++, obj);
+ if (++i == count)
+ break;
}
return i;
@@ -901,7 +983,6 @@ static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) {
- error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
error->semaphore_mboxes[ring->id][0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
@@ -910,6 +991,7 @@ static void i915_record_ring_state(struct drm_device *dev,
}
if (INTEL_INFO(dev)->gen >= 4) {
+ error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
@@ -919,11 +1001,13 @@ static void i915_record_ring_state(struct drm_device *dev,
error->bbaddr = I915_READ64(BB_ADDR);
}
} else {
+ error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
error->ipehr[ring->id] = I915_READ(IPEHR);
error->instdone[ring->id] = I915_READ(INSTDONE);
}
+ error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
error->seqno[ring->id] = ring->get_seqno(ring);
error->acthd[ring->id] = intel_ring_get_active_head(ring);
@@ -938,15 +1022,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
struct drm_i915_gem_request *request;
int i, count;
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
-
- if (ring->obj == NULL)
- continue;
-
+ for_each_ring(ring, dev_priv, i) {
i915_record_ring_state(dev, error, ring);
error->ring[i].batchbuffer =
@@ -1013,8 +1093,19 @@ static void i915_capture_error_state(struct drm_device *dev)
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
dev->primary->index);
+ kref_init(&error->ref);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
+
+ if (HAS_PCH_SPLIT(dev))
+ error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else if (IS_VALLEYVIEW(dev))
+ error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ else if (IS_GEN2(dev))
+ error->ier = I915_READ16(IER);
+ else
+ error->ier = I915_READ(IER);
+
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
@@ -1034,8 +1125,9 @@ static void i915_capture_error_state(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
i++;
error->active_bo_count = i;
- list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
- i++;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ if (obj->pin_count)
+ i++;
error->pinned_bo_count = i - error->active_bo_count;
error->active_bo = NULL;
@@ -1050,15 +1142,15 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error->active_bo)
error->active_bo_count =
- capture_bo_list(error->active_bo,
- error->active_bo_count,
- &dev_priv->mm.active_list);
+ capture_active_bo(error->active_bo,
+ error->active_bo_count,
+ &dev_priv->mm.active_list);
if (error->pinned_bo)
error->pinned_bo_count =
- capture_bo_list(error->pinned_bo,
- error->pinned_bo_count,
- &dev_priv->mm.pinned_list);
+ capture_pinned_bo(error->pinned_bo,
+ error->pinned_bo_count,
+ &dev_priv->mm.gtt_list);
do_gettimeofday(&error->time);
@@ -1073,7 +1165,7 @@ static void i915_capture_error_state(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
if (error)
- i915_error_state_free(dev, error);
+ i915_error_state_free(&error->ref);
}
void i915_destroy_error_state(struct drm_device *dev)
@@ -1088,7 +1180,7 @@ void i915_destroy_error_state(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
if (error)
- i915_error_state_free(dev, error);
+ kref_put(&error->ref, i915_error_state_free);
}
#else
#define i915_capture_error_state(x)
@@ -1103,33 +1195,26 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
if (!eir)
return;
- printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
- eir);
+ pr_err("render error detected, EIR: 0x%08x\n", eir);
if (IS_G4X(dev)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+ pr_err(" INSTDONE: 0x%08x\n",
I915_READ(INSTDONE_I965));
- printk(KERN_ERR " INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- printk(KERN_ERR " INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
+ pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
+ pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
}
if (eir & GM45_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
- printk(KERN_ERR "page table error\n");
- printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
- pgtbl_err);
+ pr_err("page table error\n");
+ pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
POSTING_READ(PGTBL_ER);
}
@@ -1138,53 +1223,42 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
if (!IS_GEN2(dev)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
- printk(KERN_ERR "page table error\n");
- printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
- pgtbl_err);
+ pr_err("page table error\n");
+ pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
POSTING_READ(PGTBL_ER);
}
}
if (eir & I915_ERROR_MEMORY_REFRESH) {
- printk(KERN_ERR "memory refresh error:\n");
+ pr_err("memory refresh error:\n");
for_each_pipe(pipe)
- printk(KERN_ERR "pipe %c stat: 0x%08x\n",
+ pr_err("pipe %c stat: 0x%08x\n",
pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
/* pipestat has already been acked */
}
if (eir & I915_ERROR_INSTRUCTION) {
- printk(KERN_ERR "instruction error\n");
- printk(KERN_ERR " INSTPM: 0x%08x\n",
- I915_READ(INSTPM));
+ pr_err("instruction error\n");
+ pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
if (INTEL_INFO(dev)->gen < 4) {
u32 ipeir = I915_READ(IPEIR);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD));
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
+ pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
POSTING_READ(IPEIR);
} else {
u32 ipeir = I915_READ(IPEIR_I965);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+ pr_err(" INSTDONE: 0x%08x\n",
I915_READ(INSTDONE_I965));
- printk(KERN_ERR " INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- printk(KERN_ERR " INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
+ pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
+ pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
}
@@ -1217,6 +1291,8 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
i915_capture_error_state(dev);
i915_report_and_clear_eir(dev);
@@ -1228,11 +1304,8 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- wake_up_all(&dev_priv->ring[RCS].irq_queue);
- if (HAS_BSD(dev))
- wake_up_all(&dev_priv->ring[VCS].irq_queue);
- if (HAS_BLT(dev))
- wake_up_all(&dev_priv->ring[BCS].irq_queue);
+ for_each_ring(ring, dev_priv, i)
+ wake_up_all(&ring->irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -1265,7 +1338,8 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
- stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
+ stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
+ obj->gtt_offset;
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
@@ -1281,248 +1355,6 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
}
}
-static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv;
- u32 iir, new_iir;
- u32 pipe_stats[I915_MAX_PIPES];
- u32 vblank_status;
- int vblank = 0;
- unsigned long irqflags;
- int irq_received;
- int ret = IRQ_NONE, pipe;
- bool blc_event = false;
-
- atomic_inc(&dev_priv->irq_received);
-
- iir = I915_READ(IIR);
-
- if (INTEL_INFO(dev)->gen >= 4)
- vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
- else
- vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
-
- for (;;) {
- irq_received = iir != 0;
-
- /* Can't rely on pipestat interrupt bit in iir as it might
- * have been cleared after the pipestat interrupt was received.
- * It doesn't set the bit in iir again, but it still produces
- * interrupts (for non-MSI).
- */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false);
-
- for_each_pipe(pipe) {
- int reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg);
-
- /*
- * Clear the PIPE*STAT regs before the IIR
- */
- if (pipe_stats[pipe] & 0x8000ffff) {
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe %c underrun\n",
- pipe_name(pipe));
- I915_WRITE(reg, pipe_stats[pipe]);
- irq_received = 1;
- }
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
- if (!irq_received)
- break;
-
- ret = IRQ_HANDLED;
-
- /* Consume port. Then clear IIR or we'll miss events */
- if ((I915_HAS_HOTPLUG(dev)) &&
- (iir & I915_DISPLAY_PORT_INTERRUPT)) {
- u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
-
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_status);
- if (hotplug_status & dev_priv->hotplug_supported_mask)
- queue_work(dev_priv->wq,
- &dev_priv->hotplug_work);
-
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- I915_READ(PORT_HOTPLUG_STAT);
- }
-
- I915_WRITE(IIR, iir);
- new_iir = I915_READ(IIR); /* Flush posted writes */
-
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
-
- if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
-
- if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
- intel_prepare_page_flip(dev, 0);
- if (dev_priv->flip_pending_is_done)
- intel_finish_page_flip_plane(dev, 0);
- }
-
- if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
- intel_prepare_page_flip(dev, 1);
- if (dev_priv->flip_pending_is_done)
- intel_finish_page_flip_plane(dev, 1);
- }
-
- for_each_pipe(pipe) {
- if (pipe_stats[pipe] & vblank_status &&
- drm_handle_vblank(dev, pipe)) {
- vblank++;
- if (!dev_priv->flip_pending_is_done) {
- i915_pageflip_stall_check(dev, pipe);
- intel_finish_page_flip(dev, pipe);
- }
- }
-
- if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
- blc_event = true;
- }
-
-
- if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev);
-
- /* With MSI, interrupts are only generated when iir
- * transitions from zero to nonzero. If another bit got
- * set while we were handling the existing iir bits, then
- * we would never get another interrupt.
- *
- * This is fine on non-MSI as well, as if we hit this path
- * we avoid exiting the interrupt handler only to generate
- * another one.
- *
- * Note that for MSI this could cause a stray interrupt report
- * if an interrupt landed in the time between writing IIR and
- * the posting read. This should be rare enough to never
- * trigger the 99% of 100,000 interrupts test for disabling
- * stray interrupts.
- */
- iir = new_iir;
- }
-
- return ret;
-}
-
-static int i915_emit_irq(struct drm_device * dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
- i915_kernel_lost_context(dev);
-
- DRM_DEBUG_DRIVER("\n");
-
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 1;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
-
- if (BEGIN_LP_RING(4) == 0) {
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- }
-
- return dev_priv->counter;
-}
-
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- int ret = 0;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
- DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
- READ_BREADCRUMB(dev_priv));
-
- if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- return 0;
- }
-
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
- if (ring->irq_get(ring)) {
- DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- ring->irq_put(ring);
- } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
- ret = -EBUSY;
-
- if (ret == -EBUSY) {
- DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
- READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
- }
-
- return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_emit_t *emit = data;
- int result;
-
- if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- mutex_lock(&dev->struct_mutex);
- result = i915_emit_irq(dev);
- mutex_unlock(&dev->struct_mutex);
-
- if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_wait_t *irqwait = data;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- return i915_wait_irq(dev, irqwait->irq_seq);
-}
-
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -1544,7 +1376,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
/* maintain vblank delivery even in deep C-states */
if (dev_priv->info->gen == 3)
- I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
+ I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -1575,8 +1407,34 @@ static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+ ironlake_enable_display_irq(dev_priv,
+ DE_PIPEA_VBLANK_IVB << (5 * pipe));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ return 0;
+}
+
+static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+ u32 dpfl, imr;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dpfl = I915_READ(VLV_DPFLIPSTAT);
+ imr = I915_READ(VLV_IMR);
+ if (pipe == 0) {
+ dpfl |= PIPEA_VBLANK_INT_EN;
+ imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+ } else {
+ dpfl |= PIPEA_VBLANK_INT_EN;
+ imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+ }
+ I915_WRITE(VLV_DPFLIPSTAT, dpfl);
+ I915_WRITE(VLV_IMR, imr);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -1592,8 +1450,7 @@ static void i915_disable_vblank(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (dev_priv->info->gen == 3)
- I915_WRITE(INSTPM,
- INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
@@ -1618,63 +1475,30 @@ static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+ ironlake_disable_display_irq(dev_priv,
+ DE_PIPEA_VBLANK_IVB << (pipe * 5));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-/* Set the vblank monitor pipe
- */
-int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t *pipe = data;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+ u32 dpfl, imr;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dpfl = I915_READ(VLV_DPFLIPSTAT);
+ imr = I915_READ(VLV_IMR);
+ if (pipe == 0) {
+ dpfl &= ~PIPEA_VBLANK_INT_EN;
+ imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+ } else {
+ dpfl &= ~PIPEB_VBLANK_INT_EN;
+ imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
}
-
- pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
- return 0;
-}
-
-/**
- * Schedule buffer swap at given vertical blank.
- */
-int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* The delayed swap mechanism was fundamentally racy, and has been
- * removed. The model was that the client requested a delayed flip/swap
- * from the kernel, then waited for vblank before continuing to perform
- * rendering. The problem was that the kernel might wake the client
- * up before it dispatched the vblank swap (since the lock has to be
- * held while touching the ringbuffer), in which case the client would
- * clear and start the next frame before the swap occurred, and
- * flicker would occur in addition to likely missing the vblank.
- *
- * In the absence of this ioctl, userland falls back to a correct path
- * of waiting for a vblank, then dispatching the swap on its own.
- * Context switching to userland and back is plenty fast enough for
- * meeting the requirements of vblank swapping.
- */
- return -EINVAL;
+ I915_WRITE(VLV_IMR, imr);
+ I915_WRITE(VLV_DPFLIPSTAT, dpfl);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static u32
@@ -1689,11 +1513,9 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
if (list_empty(&ring->request_list) ||
i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
/* Issue a wake-up to catch stuck h/w. */
- if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
- DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
- ring->name,
- ring->waiting_seqno,
- ring->get_seqno(ring));
+ if (waitqueue_active(&ring->irq_queue)) {
+ DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+ ring->name);
wake_up_all(&ring->irq_queue);
*err = true;
}
@@ -1716,6 +1538,35 @@ static bool kick_ring(struct intel_ring_buffer *ring)
return false;
}
+static bool i915_hangcheck_hung(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->hangcheck_count++ > 1) {
+ bool hung = true;
+
+ DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+ i915_handle_error(dev, true);
+
+ if (!IS_GEN2(dev)) {
+ struct intel_ring_buffer *ring;
+ int i;
+
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ for_each_ring(ring, dev_priv, i)
+ hung &= !kick_ring(ring);
+ }
+
+ return hung;
+ }
+
+ return false;
+}
+
/**
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. The first time this is called we simply record
@@ -1726,19 +1577,31 @@ void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
- bool err = false;
+ uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
+ struct intel_ring_buffer *ring;
+ bool err = false, idle;
+ int i;
if (!i915_enable_hangcheck)
return;
+ memset(acthd, 0, sizeof(acthd));
+ idle = true;
+ for_each_ring(ring, dev_priv, i) {
+ idle &= i915_hangcheck_ring_idle(ring, &err);
+ acthd[i] = intel_ring_get_active_head(ring);
+ }
+
/* If all work is done then ACTHD clearly hasn't advanced. */
- if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
- i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
- i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
- dev_priv->hangcheck_count = 0;
- if (err)
+ if (idle) {
+ if (err) {
+ if (i915_hangcheck_hung(dev))
+ return;
+
goto repeat;
+ }
+
+ dev_priv->hangcheck_count = 0;
return;
}
@@ -1749,47 +1612,16 @@ void i915_hangcheck_elapsed(unsigned long data)
instdone = I915_READ(INSTDONE_I965);
instdone1 = I915_READ(INSTDONE1);
}
- acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
- acthd_bsd = HAS_BSD(dev) ?
- intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
- acthd_blt = HAS_BLT(dev) ?
- intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
- if (dev_priv->last_acthd == acthd &&
- dev_priv->last_acthd_bsd == acthd_bsd &&
- dev_priv->last_acthd_blt == acthd_blt &&
+ if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
dev_priv->last_instdone == instdone &&
dev_priv->last_instdone1 == instdone1) {
- if (dev_priv->hangcheck_count++ > 1) {
- DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
- i915_handle_error(dev, true);
-
- if (!IS_GEN2(dev)) {
- /* Is the chip hanging on a WAIT_FOR_EVENT?
- * If so we can simply poke the RB_WAIT bit
- * and break the hang. This should work on
- * all but the second generation chipsets.
- */
- if (kick_ring(&dev_priv->ring[RCS]))
- goto repeat;
-
- if (HAS_BSD(dev) &&
- kick_ring(&dev_priv->ring[VCS]))
- goto repeat;
-
- if (HAS_BLT(dev) &&
- kick_ring(&dev_priv->ring[BCS]))
- goto repeat;
- }
-
+ if (i915_hangcheck_hung(dev))
return;
- }
} else {
dev_priv->hangcheck_count = 0;
- dev_priv->last_acthd = acthd;
- dev_priv->last_acthd_bsd = acthd_bsd;
- dev_priv->last_acthd_blt = acthd_blt;
+ memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
dev_priv->last_instdone = instdone;
dev_priv->last_instdone1 = instdone1;
}
@@ -1808,10 +1640,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
atomic_set(&dev_priv->irq_received, 0);
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
- INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
@@ -1832,6 +1660,38 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
+static void valleyview_irq_preinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ /* VLV magic */
+ I915_WRITE(VLV_IMR, 0);
+ I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
+ I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
+ I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
+
+ /* and GT */
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ POSTING_READ(GTIER);
+
+ I915_WRITE(DPINVGTT, 0xff);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
+}
+
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
@@ -1861,13 +1721,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
u32 render_irqs;
u32 hotplug_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
- if (HAS_BSD(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
- if (HAS_BLT(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
-
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
@@ -1884,8 +1737,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
if (IS_GEN6(dev))
render_irqs =
GT_USER_INTERRUPT |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
+ GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
else
render_irqs =
GT_USER_INTERRUPT |
@@ -1930,26 +1783,24 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
- DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
- DE_PLANEB_FLIP_DONE_IVB;
+ u32 display_mask =
+ DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
+ DE_PLANEC_FLIP_DONE_IVB |
+ DE_PLANEB_FLIP_DONE_IVB |
+ DE_PLANEA_FLIP_DONE_IVB;
u32 render_irqs;
u32 hotplug_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
- if (HAS_BSD(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
- if (HAS_BLT(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
-
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
- I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
- DE_PIPEB_VBLANK_IVB);
+ I915_WRITE(DEIER,
+ display_mask |
+ DE_PIPEC_VBLANK_IVB |
+ DE_PIPEB_VBLANK_IVB |
+ DE_PIPEA_VBLANK_IVB);
POSTING_READ(DEIER);
dev_priv->gt_irq_mask = ~0;
@@ -1957,8 +1808,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
+ render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
@@ -1978,15 +1829,496 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void i915_driver_irq_preinstall(struct drm_device * dev)
+static int valleyview_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 render_irqs;
+ u32 enable_mask;
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ u16 msid;
+
+ enable_mask = I915_DISPLAY_PORT_INTERRUPT;
+ enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ dev_priv->irq_mask = ~enable_mask;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ /* Hack for broken MSIs on VLV */
+ pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
+ pci_read_config_word(dev->pdev, 0x98, &msid);
+ msid &= 0xff; /* mask out delivery bits */
+ msid |= (1<<14);
+ pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
+
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, enable_mask);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(PIPESTAT(0), 0xffff);
+ I915_WRITE(PIPESTAT(1), 0xffff);
+ POSTING_READ(VLV_IER);
+
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+
+ render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
+ GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+ GT_GEN6_BLT_USER_INTERRUPT |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+ GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
+ GT_PIPE_NOTIFY |
+ GT_RENDER_CS_ERROR_INTERRUPT |
+ GT_SYNC_STATUS |
+ GT_USER_INTERRUPT;
+
+ dev_priv->gt_irq_mask = ~render_irqs;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, 0);
+ I915_WRITE(GTIER, render_irqs);
+ POSTING_READ(GTIER);
+
+ /* ack & enable invalid PTE error interrupts */
+#if 0 /* FIXME: add support to irq handler for checking these bits */
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+ I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
+#endif
+
+ I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+#if 0 /* FIXME: check register definitions; some have moved */
+ /* Note HDMI and DP share bits */
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
+#endif
+
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+
+ return 0;
+}
+
+static void valleyview_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (!dev_priv)
+ return;
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
+}
+
+static void ironlake_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!dev_priv)
+ return;
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+
+ I915_WRITE(SDEIMR, 0xffffffff);
+ I915_WRITE(SDEIER, 0x0);
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+}
+
+static void i8xx_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
atomic_set(&dev_priv->irq_received, 0);
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ POSTING_READ16(IER);
+}
+
+static int i8xx_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ I915_WRITE16(EMR,
+ ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+ ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+
+ I915_WRITE16(IER,
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT);
+ POSTING_READ16(IER);
+
+ return 0;
+}
+
+static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u16 iir, new_iir;
+ u32 pipe_stats[2];
+ unsigned long irqflags;
+ int irq_received;
+ int pipe;
+ u16 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ16(IIR);
+ if (iir == 0)
+ return IRQ_NONE;
+
+ while (iir & ~flip_mask) {
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ I915_WRITE16(IIR, iir & ~flip_mask);
+ new_iir = I915_READ16(IIR); /* Flush posted writes */
+
+ i915_update_dri1_breadcrumb(dev);
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+
+ if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, 0)) {
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip(dev, 0);
+ flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
+ }
+ }
+
+ if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, 1)) {
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip(dev, 1);
+ flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ }
+ }
+
+ iir = new_iir;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void i8xx_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+ }
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ I915_WRITE16(IIR, I915_READ16(IIR));
+}
+
+static void i915_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE16(HWSTAM, 0xeffe);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+ POSTING_READ(IER);
+}
+
+static int i915_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 enable_mask;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+ ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask =
+ I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ /* Enable in IER... */
+ enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+ /* and unmask in IMR */
+ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+ }
+
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ I915_WRITE(IER, enable_mask);
+ POSTING_READ(IER);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
+
+ /* Ignore TV since it's buggy */
+
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ }
+
+ intel_opregion_enable_asle(dev);
+
+ return 0;
+}
+
+static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
+ unsigned long irqflags;
+ u32 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ u32 flip[2] = {
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
+ };
+ int pipe, ret = IRQ_NONE;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ(IIR);
+ do {
+ bool irq_received = (iir & ~flip_mask) != 0;
+ bool blc_event = false;
+
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /* Clear the PIPE*STAT regs before the IIR */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = true;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ if (!irq_received)
+ break;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if ((I915_HAS_HOTPLUG(dev)) &&
+ (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ POSTING_READ(PORT_HOTPLUG_STAT);
+ }
+
+ I915_WRITE(IIR, iir & ~flip_mask);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+
+ for_each_pipe(pipe) {
+ int plane = pipe;
+ if (IS_MOBILE(dev))
+ plane = !plane;
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, pipe)) {
+ if (iir & flip[plane]) {
+ intel_prepare_page_flip(dev, plane);
+ intel_finish_page_flip(dev, pipe);
+ flip_mask &= ~flip[plane];
+ }
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+ }
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev);
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ ret = IRQ_HANDLED;
+ iir = new_iir;
+ } while (iir & ~flip_mask);
+
+ i915_update_dri1_breadcrumb(dev);
+
+ return ret;
+}
+
+static void i915_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE16(HWSTAM, 0xffff);
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+ }
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+
+ I915_WRITE(IIR, I915_READ(IIR));
+}
+
+static void i965_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
@@ -2001,20 +2333,25 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
POSTING_READ(IER);
}
-/*
- * Must be called after intel_modeset_init or hotplug interrupts won't be
- * enabled correctly.
- */
-static int i915_driver_irq_postinstall(struct drm_device *dev)
+static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
+ u32 enable_mask;
u32 error_mask;
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
/* Unmask the interrupts that we always want on. */
- dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
+ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask = ~dev_priv->irq_mask;
+ enable_mask |= I915_USER_INTERRUPT;
+
+ if (IS_G4X(dev))
+ enable_mask |= I915_BSD_USER_INTERRUPT;
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
@@ -2081,31 +2418,124 @@ static int i915_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void ironlake_irq_uninstall(struct drm_device *dev)
+static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
{
+ struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 iir, new_iir;
+ u32 pipe_stats[I915_MAX_PIPES];
+ unsigned long irqflags;
+ int irq_received;
+ int ret = IRQ_NONE, pipe;
- if (!dev_priv)
- return;
+ atomic_inc(&dev_priv->irq_received);
- dev_priv->vblank_pipe = 0;
+ iir = I915_READ(IIR);
- I915_WRITE(HWSTAM, 0xffffffff);
+ for (;;) {
+ bool blc_event = false;
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- I915_WRITE(DEIIR, I915_READ(DEIIR));
+ irq_received = iir != 0;
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- I915_WRITE(GTIIR, I915_READ(GTIIR));
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
- I915_WRITE(SDEIMR, 0xffffffff);
- I915_WRITE(SDEIER, 0x0);
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ if (!irq_received)
+ break;
+
+ ret = IRQ_HANDLED;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if ((I915_HAS_HOTPLUG(dev)) &&
+ (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ I915_READ(PORT_HOTPLUG_STAT);
+ }
+
+ I915_WRITE(IIR, iir);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (iir & I915_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 0);
+
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 1);
+
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, pipe)) {
+ i915_pageflip_stall_check(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+ }
+
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev);
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ iir = new_iir;
+ }
+
+ i915_update_dri1_breadcrumb(dev);
+
+ return ret;
}
-static void i915_driver_irq_uninstall(struct drm_device * dev)
+static void i965_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
@@ -2113,8 +2543,6 @@ static void i915_driver_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- dev_priv->vblank_pipe = 0;
-
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -2134,9 +2562,15 @@ static void i915_driver_irq_uninstall(struct drm_device * dev)
void intel_irq_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
@@ -2147,7 +2581,14 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->get_vblank_timestamp = NULL;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_VALLEYVIEW(dev)) {
+ dev->driver->irq_handler = valleyview_irq_handler;
+ dev->driver->irq_preinstall = valleyview_irq_preinstall;
+ dev->driver->irq_postinstall = valleyview_irq_postinstall;
+ dev->driver->irq_uninstall = valleyview_irq_uninstall;
+ dev->driver->enable_vblank = valleyview_enable_vblank;
+ dev->driver->disable_vblank = valleyview_disable_vblank;
+ } else if (IS_IVYBRIDGE(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2155,6 +2596,14 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
+ } else if (IS_HASWELL(dev)) {
+ /* Share interrupts handling with IVB */
+ dev->driver->irq_handler = ivybridge_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ivybridge_enable_vblank;
+ dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2163,10 +2612,25 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = ironlake_enable_vblank;
dev->driver->disable_vblank = ironlake_disable_vblank;
} else {
- dev->driver->irq_preinstall = i915_driver_irq_preinstall;
- dev->driver->irq_postinstall = i915_driver_irq_postinstall;
- dev->driver->irq_uninstall = i915_driver_irq_uninstall;
- dev->driver->irq_handler = i915_driver_irq_handler;
+ if (INTEL_INFO(dev)->gen == 2) {
+ dev->driver->irq_preinstall = i8xx_irq_preinstall;
+ dev->driver->irq_postinstall = i8xx_irq_postinstall;
+ dev->driver->irq_handler = i8xx_irq_handler;
+ dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ } else if (INTEL_INFO(dev)->gen == 3) {
+ /* IIR "flip pending" means done if this bit is set */
+ I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+
+ dev->driver->irq_preinstall = i915_irq_preinstall;
+ dev->driver->irq_postinstall = i915_irq_postinstall;
+ dev->driver->irq_uninstall = i915_irq_uninstall;
+ dev->driver->irq_handler = i915_irq_handler;
+ } else {
+ dev->driver->irq_preinstall = i965_irq_preinstall;
+ dev->driver->irq_postinstall = i965_irq_postinstall;
+ dev->driver->irq_uninstall = i965_irq_uninstall;
+ dev->driver->irq_handler = i965_irq_handler;
+ }
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9d24d65f0c3e..2d49b9507ed0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -27,6 +27,11 @@
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+
+#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
+#define _MASKED_BIT_DISABLE(a) ((a) << 16)
+
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
@@ -77,6 +82,7 @@
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
+#define GRDOM_RESET_ENABLE (1<<0)
#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
@@ -125,6 +131,13 @@
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
+#define GAC_ECO_BITS 0x14090
+#define ECOBITS_PPGTT_CACHE64B (3<<8)
+#define ECOBITS_PPGTT_CACHE4B (0<<8)
+
+#define GAB_CTL 0x24000
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
+
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@@ -222,6 +235,7 @@
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -301,6 +315,61 @@
#define DEBUG_RESET_RENDER (1<<8)
#define DEBUG_RESET_DISPLAY (1<<9)
+/*
+ * DPIO - a special bus for various display related registers to hide behind:
+ * 0x800c: m1, m2, n, p1, p2, k dividers
+ * 0x8014: REF and SFR select
+ * 0x8014: N divider, VCO select
+ * 0x801c/3c: core clock bits
+ * 0x8048/68: low pass filter coefficients
+ * 0x8100: fast clock controls
+ */
+#define DPIO_PKT 0x2100
+#define DPIO_RID (0<<24)
+#define DPIO_OP_WRITE (1<<16)
+#define DPIO_OP_READ (0<<16)
+#define DPIO_PORTID (0x12<<8)
+#define DPIO_BYTE (0xf<<4)
+#define DPIO_BUSY (1<<0) /* status only */
+#define DPIO_DATA 0x2104
+#define DPIO_REG 0x2108
+#define DPIO_CTL 0x2110
+#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1<<1)
+#define DPIO_RESET (1<<0)
+
+#define _DPIO_DIV_A 0x800c
+#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
+#define DPIO_K_SHIFT (24) /* 4 bits */
+#define DPIO_P1_SHIFT (21) /* 3 bits */
+#define DPIO_P2_SHIFT (16) /* 5 bits */
+#define DPIO_N_SHIFT (12) /* 4 bits */
+#define DPIO_ENABLE_CALIBRATION (1<<11)
+#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
+#define DPIO_M2DIV_MASK 0xff
+#define _DPIO_DIV_B 0x802c
+#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
+
+#define _DPIO_REFSFR_A 0x8014
+#define DPIO_REFSEL_OVERRIDE 27
+#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
+#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
+#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
+#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
+#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
+#define _DPIO_REFSFR_B 0x8034
+#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
+
+#define _DPIO_CORE_CLK_A 0x801c
+#define _DPIO_CORE_CLK_B 0x803c
+#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
+
+#define _DPIO_LFP_COEFF_A 0x8048
+#define _DPIO_LFP_COEFF_B 0x8068
+#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
+
+#define DPIO_FASTCLK_DISABLE 0x8100
/*
* Fence registers
@@ -360,8 +429,6 @@
#define ARB_MODE 0x04030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
-#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
-#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define DONE_REG 0x40b0
@@ -417,6 +484,7 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
+#define DMA_FADD_I8XX 0x020d0
#define ERROR_GEN6 0x040a0
@@ -432,6 +500,7 @@
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
+#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
@@ -447,14 +516,16 @@
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
-#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
-#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
-
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
+#define VLV_IIR_RW 0x182084
+#define VLV_IER 0x1820a0
+#define VLV_IIR 0x1820a4
+#define VLV_IMR 0x1820a8
+#define VLV_ISR 0x1820ac
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@@ -500,7 +571,6 @@
#define LM_BURST_LENGTH 0x00000700
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE 0x020e4 /* 915+ only */
-#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
/* Make render/texture TLB fetches lower priorty than associated data
* fetches. This is not turned on by default
@@ -565,7 +635,6 @@
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */
-#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@@ -579,7 +648,12 @@
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
-/* GEN6 interrupt control */
+#define CACHE_MODE_1 0x7004 /* IVB+ */
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
+
+/* GEN6 interrupt control
+ * Note that the per-ring interrupt bits do alias with the global interrupt bits
+ * in GTIMR. */
#define GEN6_RENDER_HWSTAM 0x2098
#define GEN6_RENDER_IMR 0x20a8
#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
@@ -615,6 +689,21 @@
#define GEN6_BSD_RNCID 0x12198
+#define GEN7_FF_THREAD_MODE 0x20a0
+#define GEN7_FF_SCHED_MASK 0x0077070
+#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
+#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
+#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
+#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
+#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
+#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
+#define GEN7_FF_VS_SCHED_HW (0x0<<12)
+#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
+#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
+#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
+#define GEN7_FF_DS_SCHED_HW (0x0<<4)
+
/*
* Framebuffer compression (915+ only)
*/
@@ -743,9 +832,9 @@
#define GMBUS_PORT_PANEL 3
#define GMBUS_PORT_DPC 4 /* HDMIC */
#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
- /* 6 reserved */
-#define GMBUS_PORT_DPD 7 /* HDMID */
-#define GMBUS_NUM_PORTS 8
+#define GMBUS_PORT_DPD 6 /* HDMID */
+#define GMBUS_PORT_RESERVED 7 /* 7 reserved */
+#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
#define GMBUS1 0x5104 /* command/status */
#define GMBUS_SW_CLR_INT (1<<31)
#define GMBUS_SW_RDY (1<<30)
@@ -797,7 +886,9 @@
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
#define DPLL_VGA_MODE_DIS (1 << 28)
#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
@@ -809,6 +900,7 @@
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
@@ -904,6 +996,7 @@
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define _DPLL_B_MD 0x06020 /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+
#define _FPA0 0x06040
#define _FPA1 0x06044
#define _FPB0 0x06048
@@ -1044,6 +1137,9 @@
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
+#define FW_BLC_SELF_VLV 0x6500
+#define FW_CSPWRDWNEN (1<<15)
+
/*
* Palette regs
*/
@@ -1601,9 +1697,12 @@
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_CTL 0x61170
+/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT_B (1 << 29)
#define VIDEO_DIP_PORT_C (2 << 29)
+#define VIDEO_DIP_PORT_D (3 << 29)
+#define VIDEO_DIP_PORT_MASK (3 << 29)
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
@@ -1614,6 +1713,10 @@
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+#define VIDEO_DIP_FREQ_MASK (3 << 16)
+/* HSW and later: */
+#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
+#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
/* Panel power sequencing */
#define PP_STATUS 0x61200
@@ -2380,7 +2483,8 @@
/* Pipe A */
#define _PIPEADSL 0x70000
-#define DSL_LINEMASK 0x00000fff
+#define DSL_LINEMASK_GEN2 0x00000fff
+#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
@@ -2422,23 +2526,30 @@
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
+#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
+#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
+#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
+#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
@@ -2463,6 +2574,40 @@
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
+#define VLV_DPFLIPSTAT 0x70028
+#define PIPEB_LINE_COMPARE_STATUS (1<<29)
+#define PIPEB_HLINE_INT_EN (1<<28)
+#define PIPEB_VBLANK_INT_EN (1<<27)
+#define SPRITED_FLIPDONE_INT_EN (1<<26)
+#define SPRITEC_FLIPDONE_INT_EN (1<<25)
+#define PLANEB_FLIPDONE_INT_EN (1<<24)
+#define PIPEA_LINE_COMPARE_STATUS (1<<21)
+#define PIPEA_HLINE_INT_EN (1<<20)
+#define PIPEA_VBLANK_INT_EN (1<<19)
+#define SPRITEB_FLIPDONE_INT_EN (1<<18)
+#define SPRITEA_FLIPDONE_INT_EN (1<<17)
+#define PLANEA_FLIPDONE_INT_EN (1<<16)
+
+#define DPINVGTT 0x7002c /* VLV only */
+#define CURSORB_INVALID_GTT_INT_EN (1<<23)
+#define CURSORA_INVALID_GTT_INT_EN (1<<22)
+#define SPRITED_INVALID_GTT_INT_EN (1<<21)
+#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
+#define PLANEB_INVALID_GTT_INT_EN (1<<19)
+#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
+#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
+#define PLANEA_INVALID_GTT_INT_EN (1<<16)
+#define DPINVGTT_EN_MASK 0xff0000
+#define CURSORB_INVALID_GTT_STATUS (1<<7)
+#define CURSORA_INVALID_GTT_STATUS (1<<6)
+#define SPRITED_INVALID_GTT_STATUS (1<<5)
+#define SPRITEC_INVALID_GTT_STATUS (1<<4)
+#define PLANEB_INVALID_GTT_STATUS (1<<3)
+#define SPRITEB_INVALID_GTT_STATUS (1<<2)
+#define SPRITEA_INVALID_GTT_STATUS (1<<1)
+#define PLANEA_INVALID_GTT_STATUS (1<<0)
+#define DPINVGTT_STATUS_MASK 0xff
+
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
@@ -2492,11 +2637,28 @@
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
#define DSPFW_HPLL_SR_MASK (0x1ff)
+/* drain latency register values*/
+#define DRAIN_LATENCY_PRECISION_32 32
+#define DRAIN_LATENCY_PRECISION_16 16
+#define VLV_DDL1 0x70050
+#define DDL_CURSORA_PRECISION_32 (1<<31)
+#define DDL_CURSORA_PRECISION_16 (0<<31)
+#define DDL_CURSORA_SHIFT 24
+#define DDL_PLANEA_PRECISION_32 (1<<7)
+#define DDL_PLANEA_PRECISION_16 (0<<7)
+#define VLV_DDL2 0x70054
+#define DDL_CURSORB_PRECISION_32 (1<<31)
+#define DDL_CURSORB_PRECISION_16 (0<<31)
+#define DDL_CURSORB_SHIFT 24
+#define DDL_PLANEB_PRECISION_32 (1<<7)
+#define DDL_PLANEB_PRECISION_16 (0<<7)
+
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
#define I830_FIFO_LINE_SIZE 32
+#define VALLEYVIEW_FIFO_SIZE 255
#define G4X_FIFO_SIZE 127
#define I965_FIFO_SIZE 512
#define I945_FIFO_SIZE 127
@@ -2504,6 +2666,7 @@
#define I855GM_FIFO_SIZE 127 /* In cachelines */
#define I830_FIFO_SIZE 95
+#define VALLEYVIEW_MAX_WM 0xff
#define G4X_MAX_WM 0x3f
#define I915_MAX_WM 0x3f
@@ -2518,6 +2681,7 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
+#define VALLEYVIEW_CURSOR_MAX_WM 64
#define I965_CURSOR_FIFO 64
#define I965_CURSOR_MAX_WM 32
#define I965_CURSOR_DFT_WM 8
@@ -2726,6 +2890,13 @@
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK (0xfffff000)
+#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
+#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
+ (I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg))))
+
/* VBIOS flags */
#define SWF00 0x71410
#define SWF01 0x71414
@@ -3058,25 +3229,38 @@
#define DE_PCH_EVENT_IVB (1<<28)
#define DE_DP_A_HOTPLUG_IVB (1<<27)
#define DE_AUX_CHANNEL_A_IVB (1<<26)
+#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
+#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
+#define DE_PIPEC_VBLANK_IVB (1<<10)
#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
-#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
-#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEB_VBLANK_IVB (1<<5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
+#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEA_VBLANK_IVB (1<<0)
+#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
+#define MASTER_INTERRUPT_ENABLE (1<<31)
+
#define DEISR 0x44000
#define DEIMR 0x44004
#define DEIIR 0x44008
#define DEIER 0x4400c
-/* GT interrupt */
-#define GT_PIPE_NOTIFY (1 << 4)
-#define GT_SYNC_STATUS (1 << 2)
-#define GT_USER_INTERRUPT (1 << 0)
-#define GT_BSD_USER_INTERRUPT (1 << 5)
-#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
-#define GT_BLT_USER_INTERRUPT (1 << 22)
+/* GT interrupt.
+ * Note that for gen6+ the ring-specific interrupt bits do alias with the
+ * corresponding bits in the per-ring interrupt control registers. */
+#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
+#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25)
+#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22)
+#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15)
+#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
+#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */
+#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5)
+#define GT_PIPE_NOTIFY (1 << 4)
+#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3)
+#define GT_SYNC_STATUS (1 << 2)
+#define GT_USER_INTERRUPT (1 << 0)
#define GTISR 0x44010
#define GTIMR 0x44014
@@ -3226,15 +3410,15 @@
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
-#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -3329,6 +3513,57 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+#define VLV_VIDEO_DIP_CTL_A 0x60220
+#define VLV_VIDEO_DIP_DATA_A 0x60208
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+
+#define VLV_VIDEO_DIP_CTL_B 0x61170
+#define VLV_VIDEO_DIP_DATA_B 0x61174
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+
+#define VLV_TVIDEO_DIP_CTL(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+#define VLV_TVIDEO_DIP_DATA(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+#define VLV_TVIDEO_DIP_GCP(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+
+/* Haswell DIP controls */
+#define HSW_VIDEO_DIP_CTL_A 0x60200
+#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
+#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
+#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
+#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
+#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
+#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
+#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
+#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
+#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
+#define HSW_VIDEO_DIP_GCP_A 0x60210
+
+#define HSW_VIDEO_DIP_CTL_B 0x61200
+#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
+#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
+#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
+#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
+#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
+#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
+#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
+#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
+#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
+#define HSW_VIDEO_DIP_GCP_B 0x61210
+
+#define HSW_TVIDEO_DIP_CTL(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+#define HSW_TVIDEO_DIP_GCP(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
@@ -3489,6 +3724,9 @@
#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
+/* LPT */
+#define FDI_PORT_WIDTH_2X_LPT (1<<19)
+#define FDI_PORT_WIDTH_1X_LPT (0<<19)
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
@@ -3549,6 +3787,7 @@
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
/* or SDVOB */
+#define VLV_HDMIB 0x61140
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER(pipe) ((pipe) << 30)
@@ -3714,6 +3953,8 @@
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
#define FORCEWAKE 0xA18C
+#define FORCEWAKE_VLV 0x1300b0
+#define FORCEWAKE_ACK_VLV 0x1300b4
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_MT_ACK 0x130040
@@ -3731,6 +3972,7 @@
#define GEN6_UCGCTL1 0x9400
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
#define GEN6_UCGCTL2 0x9404
# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
@@ -3811,6 +4053,11 @@
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define GEN6_GT_GFX_RC6_LOCKED 0x138104
+#define GEN6_GT_GFX_RC6 0x138108
+#define GEN6_GT_GFX_RC6p 0x13810C
+#define GEN6_GT_GFX_RC6pp 0x138110
+
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
@@ -3870,4 +4117,197 @@
#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+/* HSW Power Wells */
+#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
+#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
+#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
+#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
+#define HSW_PWR_WELL_ENABLE (1<<31)
+#define HSW_PWR_WELL_STATE (1<<30)
+#define HSW_PWR_WELL_CTL5 0x45410
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
+#define HSW_PWR_WELL_FORCE_ON (1<<19)
+#define HSW_PWR_WELL_CTL6 0x45414
+
+/* Per-pipe DDI Function Control */
+#define PIPE_DDI_FUNC_CTL_A 0x60400
+#define PIPE_DDI_FUNC_CTL_B 0x61400
+#define PIPE_DDI_FUNC_CTL_C 0x62400
+#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
+#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
+ PIPE_DDI_FUNC_CTL_A, \
+ PIPE_DDI_FUNC_CTL_B)
+#define PIPE_DDI_FUNC_ENABLE (1<<31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define PIPE_DDI_PORT_MASK (0xf<<28)
+#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
+#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
+#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
+#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
+#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
+#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
+#define PIPE_DDI_BPC_8 (0<<20)
+#define PIPE_DDI_BPC_10 (1<<20)
+#define PIPE_DDI_BPC_6 (2<<20)
+#define PIPE_DDI_BPC_12 (3<<20)
+#define PIPE_DDI_BFI_ENABLE (1<<4)
+#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
+#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
+#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
+
+/* DisplayPort Transport Control */
+#define DP_TP_CTL_A 0x64040
+#define DP_TP_CTL_B 0x64140
+#define DP_TP_CTL(port) _PORT(port, \
+ DP_TP_CTL_A, \
+ DP_TP_CTL_B)
+#define DP_TP_CTL_ENABLE (1<<31)
+#define DP_TP_CTL_MODE_SST (0<<27)
+#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
+#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
+#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
+
+/* DisplayPort Transport Status */
+#define DP_TP_STATUS_A 0x64044
+#define DP_TP_STATUS_B 0x64144
+#define DP_TP_STATUS(port) _PORT(port, \
+ DP_TP_STATUS_A, \
+ DP_TP_STATUS_B)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+
+/* DDI Buffer Control */
+#define DDI_BUF_CTL_A 0x64000
+#define DDI_BUF_CTL_B 0x64100
+#define DDI_BUF_CTL(port) _PORT(port, \
+ DDI_BUF_CTL_A, \
+ DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE (1<<31)
+#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
+#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
+#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
+#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
+#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
+#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
+#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
+#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
+#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
+#define DDI_BUF_EMP_MASK (0xf<<24)
+#define DDI_BUF_IS_IDLE (1<<7)
+#define DDI_PORT_WIDTH_X1 (0<<1)
+#define DDI_PORT_WIDTH_X2 (1<<1)
+#define DDI_PORT_WIDTH_X4 (3<<1)
+#define DDI_INIT_DISPLAY_DETECTED (1<<0)
+
+/* DDI Buffer Translations */
+#define DDI_BUF_TRANS_A 0x64E00
+#define DDI_BUF_TRANS_B 0x64E60
+#define DDI_BUF_TRANS(port) _PORT(port, \
+ DDI_BUF_TRANS_A, \
+ DDI_BUF_TRANS_B)
+
+/* Sideband Interface (SBI) is programmed indirectly, via
+ * SBI_ADDR, which contains the register offset; and SBI_DATA,
+ * which contains the payload */
+#define SBI_ADDR 0xC6000
+#define SBI_DATA 0xC6004
+#define SBI_CTL_STAT 0xC6008
+#define SBI_CTL_OP_CRRD (0x6<<8)
+#define SBI_CTL_OP_CRWR (0x7<<8)
+#define SBI_RESPONSE_FAIL (0x1<<1)
+#define SBI_RESPONSE_SUCCESS (0x0<<1)
+#define SBI_BUSY (0x1<<0)
+#define SBI_READY (0x0<<0)
+
+/* SBI offsets */
+#define SBI_SSCDIVINTPHASE6 0x0600
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
+#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
+#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
+#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
+#define SBI_SSCCTL 0x020c
+#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_DISABLE (1<<0)
+#define SBI_SSCAUXDIV6 0x0610
+#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
+#define SBI_DBUFF0 0x2a00
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE 0xC6020
+#define PIXCLK_GATE_UNGATE 1<<0
+#define PIXCLK_GATE_GATE 0<<0
+
+/* SPLL */
+#define SPLL_CTL 0x46020
+#define SPLL_PLL_ENABLE (1<<31)
+#define SPLL_PLL_SCC (1<<28)
+#define SPLL_PLL_NON_SCC (2<<28)
+#define SPLL_PLL_FREQ_810MHz (0<<26)
+#define SPLL_PLL_FREQ_1350MHz (1<<26)
+
+/* WRPLL */
+#define WRPLL_CTL1 0x46040
+#define WRPLL_CTL2 0x46060
+#define WRPLL_PLL_ENABLE (1<<31)
+#define WRPLL_PLL_SELECT_SSC (0x01<<28)
+#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
+#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
+/* WRPLL divider programming */
+#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_POST(x) ((x)<<8)
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+
+/* Port clock selection */
+#define PORT_CLK_SEL_A 0x46100
+#define PORT_CLK_SEL_B 0x46104
+#define PORT_CLK_SEL(port) _PORT(port, \
+ PORT_CLK_SEL_A, \
+ PORT_CLK_SEL_B)
+#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
+#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
+#define PORT_CLK_SEL_LCPLL_810 (2<<29)
+#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_WRPLL1 (4<<29)
+#define PORT_CLK_SEL_WRPLL2 (5<<29)
+
+/* Pipe clock selection */
+#define PIPE_CLK_SEL_A 0x46140
+#define PIPE_CLK_SEL_B 0x46144
+#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
+ PIPE_CLK_SEL_A, \
+ PIPE_CLK_SEL_B)
+/* For each pipe, we need to select the corresponding port clock */
+#define PIPE_CLK_SEL_DISABLED (0x0<<29)
+#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
+
+/* LCPLL Control */
+#define LCPLL_CTL 0x130040
+#define LCPLL_PLL_DISABLE (1<<31)
+#define LCPLL_PLL_LOCK (1<<30)
+#define LCPLL_CD_CLOCK_DISABLE (1<<25)
+#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define PIPE_WM_LINETIME_A 0x45270
+#define PIPE_WM_LINETIME_B 0x45274
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
+ PIPE_WM_LINETIME_A, \
+ PIPE_WM_LINETIME_A)
+#define PIPE_WM_LINETIME_MASK (0x1ff)
+#define PIPE_WM_LINETIME_TIME(x) ((x))
+#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
+#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP 0xc2014
+#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
+#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
+#define SFUSE_STRAP_DDID_DETECTED (1<<0)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2b5eb229ff2c..0ede02a99d91 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -40,7 +40,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
return false;
if (HAS_PCH_SPLIT(dev))
- dpll_reg = PCH_DPLL(pipe);
+ dpll_reg = _PCH_DPLL(pipe);
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
@@ -876,22 +876,6 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(IER, dev_priv->saveIER);
I915_WRITE(IMR, dev_priv->saveIMR);
}
- mutex_unlock(&dev->struct_mutex);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_init_clock_gating(dev);
-
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- intel_init_emon(dev);
- }
-
- if (INTEL_INFO(dev)->gen >= 6) {
- gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
- }
-
- mutex_lock(&dev->struct_mutex);
/* Cache mode state */
I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
new file mode 100644
index 000000000000..79f83445afa0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include "i915_drv.h"
+
+static u32 calc_residency(struct drm_device *dev, const u32 reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 raw_time; /* 32b value may overflow during fixed point math */
+
+ if (!intel_enable_rc6(dev))
+ return 0;
+
+ raw_time = I915_READ(reg) * 128ULL;
+ return DIV_ROUND_UP_ULL(raw_time, 100000);
+}
+
+static ssize_t
+show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
+}
+
+static ssize_t
+show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
+ return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
+}
+
+static ssize_t
+show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+ return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
+}
+
+static ssize_t
+show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+ return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
+}
+
+static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
+static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
+static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
+static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
+
+static struct attribute *rc6_attrs[] = {
+ &dev_attr_rc6_enable.attr,
+ &dev_attr_rc6_residency_ms.attr,
+ &dev_attr_rc6p_residency_ms.attr,
+ &dev_attr_rc6pp_residency_ms.attr,
+ NULL
+};
+
+static struct attribute_group rc6_attr_group = {
+ .name = power_group_name,
+ .attrs = rc6_attrs
+};
+
+void i915_setup_sysfs(struct drm_device *dev)
+{
+ int ret;
+
+ /* ILK doesn't have any residency information */
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+ if (ret)
+ DRM_ERROR("sysfs setup failed\n");
+}
+
+void i915_teardown_sysfs(struct drm_device *dev)
+{
+ sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+}
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
index ead876eb6ea0..f1df2bd4ecf4 100644
--- a/drivers/gpu/drm/i915/i915_trace_points.c
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -7,5 +7,7 @@
#include "i915_drv.h"
+#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "i915_trace.h"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index bae3edf956a4..f413899475e9 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -9,6 +9,7 @@
#include <acpi/acpi_drivers.h>
#include "drmP.h"
+#include "i915_drv.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
@@ -182,8 +183,6 @@ static void intel_dsm_platform_mux_info(void)
DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
intel_dsm_mux_type(info->buffer.pointer[3]));
}
- } else {
- DRM_ERROR("MUX INFO call failed\n");
}
out:
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b48fc2a8410c..353459362f6f 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -174,6 +174,28 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
}
+/* get lvds_fp_timing entry
+ * this function may return NULL if the corresponding entry is invalid
+ */
+static const struct lvds_fp_timing *
+get_lvds_fp_timing(const struct bdb_header *bdb,
+ const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs,
+ int index)
+{
+ size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
+ u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
+ size_t ofs;
+
+ if (index >= ARRAY_SIZE(ptrs->ptr))
+ return NULL;
+ ofs = ptrs->ptr[index].fp_timing_offset;
+ if (ofs < data_ofs ||
+ ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
+ return NULL;
+ return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
+}
+
/* Try to find integrated panel data */
static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv,
@@ -183,6 +205,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
const struct bdb_lvds_lfp_data *lvds_lfp_data;
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
const struct lvds_dvo_timing *panel_dvo_timing;
+ const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
int i, downclock;
@@ -244,6 +267,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
"Normal Clock %dKHz, downclock %dKHz\n",
panel_fixed_mode->clock, 10*downclock);
}
+
+ fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ lvds_options->panel_type);
+ if (fp_timing) {
+ /* check the resolution, just to be sure */
+ if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+ fp_timing->y_res == panel_fixed_mode->vdisplay) {
+ dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
+ DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
+ dev_priv->bios_lvds_val);
+ }
+ }
}
/* Try to find sdvo panel data */
@@ -256,6 +292,11 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
int index;
index = i915_vbt_sdvo_panel_type;
+ if (index == -2) {
+ DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+ return;
+ }
+
if (index == -1) {
struct bdb_sdvo_lvds_options *sdvo_lvds_options;
@@ -332,11 +373,11 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
- if (bus_pin >= 1 && bus_pin <= 6)
+ if (intel_gmbus_is_port_valid(bus_pin))
dev_priv->crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
- block_size);
+ block_size);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 90b9793fd5da..75a70c46ef1b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -55,18 +55,36 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
struct intel_crt, base);
}
-static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
+static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 temp, reg;
+ u32 temp;
- if (HAS_PCH_SPLIT(dev))
- reg = PCH_ADPA;
- else
- reg = ADPA;
+ temp = I915_READ(PCH_ADPA);
+ temp &= ~ADPA_DAC_ENABLE;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ temp |= ADPA_DAC_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /* Just leave port enable cleared */
+ break;
+ }
+
+ I915_WRITE(PCH_ADPA, temp);
+}
- temp = I915_READ(reg);
+static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 temp;
+
+ temp = I915_READ(ADPA);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
@@ -85,7 +103,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
break;
}
- I915_WRITE(reg, temp);
+ I915_WRITE(ADPA, temp);
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -278,9 +296,10 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
struct edid *edid;
bool is_digital = false;
+ struct i2c_adapter *i2c;
- edid = drm_get_edid(connector,
- &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+ edid = drm_get_edid(connector, i2c);
/*
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
@@ -476,15 +495,16 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ struct i2c_adapter *i2c;
- ret = intel_ddc_get_modes(connector,
- &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+ ret = intel_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
return ret;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- return intel_ddc_get_modes(connector,
- &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
+ i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
+ return intel_ddc_get_modes(connector, i2c);
}
static int intel_crt_set_property(struct drm_connector *connector,
@@ -507,12 +527,20 @@ static void intel_crt_reset(struct drm_connector *connector)
* Routines for controlling stuff on the analog port
*/
-static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
- .dpms = intel_crt_dpms,
+static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
.prepare = intel_encoder_prepare,
.commit = intel_encoder_commit,
.mode_set = intel_crt_mode_set,
+ .dpms = pch_crt_dpms,
+};
+
+static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
+ .mode_fixup = intel_crt_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .commit = intel_encoder_commit,
+ .mode_set = intel_crt_mode_set,
+ .dpms = gmch_crt_dpms,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
@@ -536,7 +564,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident);
+ DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
return 1;
}
@@ -558,6 +586,7 @@ void intel_crt_init(struct drm_device *dev)
struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_encoder_helper_funcs *encoder_helper_funcs;
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt))
@@ -586,14 +615,23 @@ void intel_crt_init(struct drm_device *dev)
crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
1 << INTEL_ANALOG_CLONE_BIT |
1 << INTEL_SDVO_LVDS_CLONE_BIT);
- crt->base.crtc_mask = (1 << 0) | (1 << 1);
+ if (IS_HASWELL(dev))
+ crt->base.crtc_mask = (1 << 0);
+ else
+ crt->base.crtc_mask = (1 << 0) | (1 << 1);
+
if (IS_GEN2(dev))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
+ if (HAS_PCH_SPLIT(dev))
+ encoder_helper_funcs = &pch_encoder_funcs;
+ else
+ encoder_helper_funcs = &gmch_encoder_funcs;
+
+ drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
new file mode 100644
index 000000000000..46d1e886c692
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -0,0 +1,755 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/* HDMI/DVI modes ignore everything but the last 2 items. So we share
+ * them for both DP and FDI transports, allowing those ports to
+ * automatically adapt to HDMI connections as well
+ */
+static const u32 hsw_ddi_translations_dp[] = {
+ 0x00FFFFFF, 0x0006000E, /* DP parameters */
+ 0x00D75FFF, 0x0005000A,
+ 0x00C30FFF, 0x00040006,
+ 0x80AAAFFF, 0x000B0000,
+ 0x00FFFFFF, 0x0005000A,
+ 0x00D75FFF, 0x000C0004,
+ 0x80C30FFF, 0x000B0000,
+ 0x00FFFFFF, 0x00040006,
+ 0x80D75FFF, 0x000B0000,
+ 0x00FFFFFF, 0x00040006 /* HDMI parameters */
+};
+
+static const u32 hsw_ddi_translations_fdi[] = {
+ 0x00FFFFFF, 0x0007000E, /* FDI parameters */
+ 0x00D75FFF, 0x000F000A,
+ 0x00C30FFF, 0x00060006,
+ 0x00AAAFFF, 0x001E0000,
+ 0x00FFFFFF, 0x000F000A,
+ 0x00D75FFF, 0x00160004,
+ 0x00C30FFF, 0x001E0000,
+ 0x00FFFFFF, 0x00060006,
+ 0x00D75FFF, 0x001E0000,
+ 0x00FFFFFF, 0x00040006 /* HDMI parameters */
+};
+
+/* On Haswell, DDI port buffers must be programmed with correct values
+ * in advance. The buffer values are different for FDI and DP modes,
+ * but the HDMI/DVI fields are shared among those. So we program the DDI
+ * in either FDI or DP modes only, as HDMI connections will work with both
+ * of those
+ */
+void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+ int i;
+ const u32 *ddi_translations = ((use_fdi_mode) ?
+ hsw_ddi_translations_fdi :
+ hsw_ddi_translations_dp);
+
+ DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
+ port_name(port),
+ use_fdi_mode ? "FDI" : "DP");
+
+ WARN((use_fdi_mode && (port != PORT_E)),
+ "Programming port %c in FDI mode, this probably will not work.\n",
+ port_name(port));
+
+ for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+ I915_WRITE(reg, ddi_translations[i]);
+ reg += 4;
+ }
+}
+
+/* Program DDI buffers translations for DP. By default, program ports A-D in DP
+ * mode and port E for FDI.
+ */
+void intel_prepare_ddi(struct drm_device *dev)
+{
+ int port;
+
+ if (IS_HASWELL(dev)) {
+ for (port = PORT_A; port < PORT_E; port++)
+ intel_prepare_ddi_buffers(dev, port, false);
+
+ /* DDI E is the suggested one to work in FDI mode, so program is as such by
+ * default. It will have to be re-programmed in case a digital DP output
+ * will be detected on it
+ */
+ intel_prepare_ddi_buffers(dev, PORT_E, true);
+ }
+}
+
+static const long hsw_ddi_buf_ctl_values[] = {
+ DDI_BUF_EMP_400MV_0DB_HSW,
+ DDI_BUF_EMP_400MV_3_5DB_HSW,
+ DDI_BUF_EMP_400MV_6DB_HSW,
+ DDI_BUF_EMP_400MV_9_5DB_HSW,
+ DDI_BUF_EMP_600MV_0DB_HSW,
+ DDI_BUF_EMP_600MV_3_5DB_HSW,
+ DDI_BUF_EMP_600MV_6DB_HSW,
+ DDI_BUF_EMP_800MV_0DB_HSW,
+ DDI_BUF_EMP_800MV_3_5DB_HSW
+};
+
+
+/* Starting with Haswell, different DDI ports can work in FDI mode for
+ * connection to the PCH-located connectors. For this, it is necessary to train
+ * both the DDI port and PCH receiver for the desired DDI buffer settings.
+ *
+ * The recommended port to work in FDI mode is DDI E, which we use here. Also,
+ * please note that when FDI mode is active on DDI E, it shares 2 lines with
+ * DDI A (which is used for eDP)
+ */
+
+void hsw_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, i;
+
+ /* Configure CPU PLL, wait for warmup */
+ I915_WRITE(SPLL_CTL,
+ SPLL_PLL_ENABLE |
+ SPLL_PLL_FREQ_1350MHz |
+ SPLL_PLL_SCC);
+
+ /* Use SPLL to drive the output when in FDI mode */
+ I915_WRITE(PORT_CLK_SEL(PORT_E),
+ PORT_CLK_SEL_SPLL);
+ I915_WRITE(PIPE_CLK_SEL(pipe),
+ PIPE_CLK_SEL_PORT(PORT_E));
+
+ udelay(20);
+
+ /* Start the training iterating through available voltages and emphasis */
+ for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
+ /* Configure DP_TP_CTL with auto-training */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 |
+ DP_TP_CTL_ENABLE);
+
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
+ temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp = (temp & ~DDI_BUF_EMP_MASK);
+ I915_WRITE(DDI_BUF_CTL(PORT_E),
+ temp |
+ DDI_BUF_CTL_ENABLE |
+ DDI_PORT_WIDTH_X2 |
+ hsw_ddi_buf_ctl_values[i]);
+
+ udelay(600);
+
+ /* Enable CPU FDI Receiver with auto-training */
+ reg = FDI_RX_CTL(pipe);
+ I915_WRITE(reg,
+ I915_READ(reg) |
+ FDI_LINK_TRAIN_AUTO |
+ FDI_RX_ENABLE |
+ FDI_LINK_TRAIN_PATTERN_1_CPT |
+ FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_PORT_WIDTH_2X_LPT |
+ FDI_RX_PLL_ENABLE);
+ POSTING_READ(reg);
+ udelay(100);
+
+ temp = I915_READ(DP_TP_STATUS(PORT_E));
+ if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
+ DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
+
+ /* Enable normal pixel sending for FDI */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
+
+ /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
+ temp = I915_READ(DDI_FUNC_CTL(pipe));
+ temp &= ~PIPE_DDI_PORT_MASK;
+ temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
+ PIPE_DDI_MODE_SELECT_FDI |
+ PIPE_DDI_FUNC_ENABLE |
+ PIPE_DDI_PORT_WIDTH_X2;
+ I915_WRITE(DDI_FUNC_CTL(pipe),
+ temp);
+ break;
+ } else {
+ DRM_ERROR("Error training BUF_CTL %d\n", i);
+
+ /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ I915_READ(DP_TP_CTL(PORT_E)) &
+ ~DP_TP_CTL_ENABLE);
+ I915_WRITE(FDI_RX_CTL(pipe),
+ I915_READ(FDI_RX_CTL(pipe)) &
+ ~FDI_RX_PLL_ENABLE);
+ continue;
+ }
+ }
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+/* For DDI connections, it is possible to support different outputs over the
+ * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
+ * the time the output is detected what exactly is on the other end of it. This
+ * function aims at providing support for this detection and proper output
+ * configuration.
+ */
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+ /* For now, we don't do any proper output detection and assume that we
+ * handle HDMI only */
+
+ switch(port){
+ case PORT_A:
+ /* We don't handle eDP and DP yet */
+ DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
+ break;
+ /* Assume that the ports B, C and D are working in HDMI mode for now */
+ case PORT_B:
+ case PORT_C:
+ case PORT_D:
+ intel_hdmi_init(dev, DDI_BUF_CTL(port));
+ break;
+ default:
+ DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
+ port);
+ break;
+ }
+}
+
+/* WRPLL clock dividers */
+struct wrpll_tmds_clock {
+ u32 clock;
+ u16 p; /* Post divider */
+ u16 n2; /* Feedback divider */
+ u16 r2; /* Reference divider */
+};
+
+/* Table of matching values for WRPLL clocks programming for each frequency */
+static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
+ {19750, 38, 25, 18},
+ {20000, 48, 32, 18},
+ {21000, 36, 21, 15},
+ {21912, 42, 29, 17},
+ {22000, 36, 22, 15},
+ {23000, 36, 23, 15},
+ {23500, 40, 40, 23},
+ {23750, 26, 16, 14},
+ {23750, 26, 16, 14},
+ {24000, 36, 24, 15},
+ {25000, 36, 25, 15},
+ {25175, 26, 40, 33},
+ {25200, 30, 21, 15},
+ {26000, 36, 26, 15},
+ {27000, 30, 21, 14},
+ {27027, 18, 100, 111},
+ {27500, 30, 29, 19},
+ {28000, 34, 30, 17},
+ {28320, 26, 30, 22},
+ {28322, 32, 42, 25},
+ {28750, 24, 23, 18},
+ {29000, 30, 29, 18},
+ {29750, 32, 30, 17},
+ {30000, 30, 25, 15},
+ {30750, 30, 41, 24},
+ {31000, 30, 31, 18},
+ {31500, 30, 28, 16},
+ {32000, 30, 32, 18},
+ {32500, 28, 32, 19},
+ {33000, 24, 22, 15},
+ {34000, 28, 30, 17},
+ {35000, 26, 32, 19},
+ {35500, 24, 30, 19},
+ {36000, 26, 26, 15},
+ {36750, 26, 46, 26},
+ {37000, 24, 23, 14},
+ {37762, 22, 40, 26},
+ {37800, 20, 21, 15},
+ {38000, 24, 27, 16},
+ {38250, 24, 34, 20},
+ {39000, 24, 26, 15},
+ {40000, 24, 32, 18},
+ {40500, 20, 21, 14},
+ {40541, 22, 147, 89},
+ {40750, 18, 19, 14},
+ {41000, 16, 17, 14},
+ {41500, 22, 44, 26},
+ {41540, 22, 44, 26},
+ {42000, 18, 21, 15},
+ {42500, 22, 45, 26},
+ {43000, 20, 43, 27},
+ {43163, 20, 24, 15},
+ {44000, 18, 22, 15},
+ {44900, 20, 108, 65},
+ {45000, 20, 25, 15},
+ {45250, 20, 52, 31},
+ {46000, 18, 23, 15},
+ {46750, 20, 45, 26},
+ {47000, 20, 40, 23},
+ {48000, 18, 24, 15},
+ {49000, 18, 49, 30},
+ {49500, 16, 22, 15},
+ {50000, 18, 25, 15},
+ {50500, 18, 32, 19},
+ {51000, 18, 34, 20},
+ {52000, 18, 26, 15},
+ {52406, 14, 34, 25},
+ {53000, 16, 22, 14},
+ {54000, 16, 24, 15},
+ {54054, 16, 173, 108},
+ {54500, 14, 24, 17},
+ {55000, 12, 22, 18},
+ {56000, 14, 45, 31},
+ {56250, 16, 25, 15},
+ {56750, 14, 25, 17},
+ {57000, 16, 27, 16},
+ {58000, 16, 43, 25},
+ {58250, 16, 38, 22},
+ {58750, 16, 40, 23},
+ {59000, 14, 26, 17},
+ {59341, 14, 40, 26},
+ {59400, 16, 44, 25},
+ {60000, 16, 32, 18},
+ {60500, 12, 39, 29},
+ {61000, 14, 49, 31},
+ {62000, 14, 37, 23},
+ {62250, 14, 42, 26},
+ {63000, 12, 21, 15},
+ {63500, 14, 28, 17},
+ {64000, 12, 27, 19},
+ {65000, 14, 32, 19},
+ {65250, 12, 29, 20},
+ {65500, 12, 32, 22},
+ {66000, 12, 22, 15},
+ {66667, 14, 38, 22},
+ {66750, 10, 21, 17},
+ {67000, 14, 33, 19},
+ {67750, 14, 58, 33},
+ {68000, 14, 30, 17},
+ {68179, 14, 46, 26},
+ {68250, 14, 46, 26},
+ {69000, 12, 23, 15},
+ {70000, 12, 28, 18},
+ {71000, 12, 30, 19},
+ {72000, 12, 24, 15},
+ {73000, 10, 23, 17},
+ {74000, 12, 23, 14},
+ {74176, 8, 100, 91},
+ {74250, 10, 22, 16},
+ {74481, 12, 43, 26},
+ {74500, 10, 29, 21},
+ {75000, 12, 25, 15},
+ {75250, 10, 39, 28},
+ {76000, 12, 27, 16},
+ {77000, 12, 53, 31},
+ {78000, 12, 26, 15},
+ {78750, 12, 28, 16},
+ {79000, 10, 38, 26},
+ {79500, 10, 28, 19},
+ {80000, 12, 32, 18},
+ {81000, 10, 21, 14},
+ {81081, 6, 100, 111},
+ {81624, 8, 29, 24},
+ {82000, 8, 17, 14},
+ {83000, 10, 40, 26},
+ {83950, 10, 28, 18},
+ {84000, 10, 28, 18},
+ {84750, 6, 16, 17},
+ {85000, 6, 17, 18},
+ {85250, 10, 30, 19},
+ {85750, 10, 27, 17},
+ {86000, 10, 43, 27},
+ {87000, 10, 29, 18},
+ {88000, 10, 44, 27},
+ {88500, 10, 41, 25},
+ {89000, 10, 28, 17},
+ {89012, 6, 90, 91},
+ {89100, 10, 33, 20},
+ {90000, 10, 25, 15},
+ {91000, 10, 32, 19},
+ {92000, 10, 46, 27},
+ {93000, 10, 31, 18},
+ {94000, 10, 40, 23},
+ {94500, 10, 28, 16},
+ {95000, 10, 44, 25},
+ {95654, 10, 39, 22},
+ {95750, 10, 39, 22},
+ {96000, 10, 32, 18},
+ {97000, 8, 23, 16},
+ {97750, 8, 42, 29},
+ {98000, 8, 45, 31},
+ {99000, 8, 22, 15},
+ {99750, 8, 34, 23},
+ {100000, 6, 20, 18},
+ {100500, 6, 19, 17},
+ {101000, 6, 37, 33},
+ {101250, 8, 21, 14},
+ {102000, 6, 17, 15},
+ {102250, 6, 25, 22},
+ {103000, 8, 29, 19},
+ {104000, 8, 37, 24},
+ {105000, 8, 28, 18},
+ {106000, 8, 22, 14},
+ {107000, 8, 46, 29},
+ {107214, 8, 27, 17},
+ {108000, 8, 24, 15},
+ {108108, 8, 173, 108},
+ {109000, 6, 23, 19},
+ {109000, 6, 23, 19},
+ {110000, 6, 22, 18},
+ {110013, 6, 22, 18},
+ {110250, 8, 49, 30},
+ {110500, 8, 36, 22},
+ {111000, 8, 23, 14},
+ {111264, 8, 150, 91},
+ {111375, 8, 33, 20},
+ {112000, 8, 63, 38},
+ {112500, 8, 25, 15},
+ {113100, 8, 57, 34},
+ {113309, 8, 42, 25},
+ {114000, 8, 27, 16},
+ {115000, 6, 23, 18},
+ {116000, 8, 43, 25},
+ {117000, 8, 26, 15},
+ {117500, 8, 40, 23},
+ {118000, 6, 38, 29},
+ {119000, 8, 30, 17},
+ {119500, 8, 46, 26},
+ {119651, 8, 39, 22},
+ {120000, 8, 32, 18},
+ {121000, 6, 39, 29},
+ {121250, 6, 31, 23},
+ {121750, 6, 23, 17},
+ {122000, 6, 42, 31},
+ {122614, 6, 30, 22},
+ {123000, 6, 41, 30},
+ {123379, 6, 37, 27},
+ {124000, 6, 51, 37},
+ {125000, 6, 25, 18},
+ {125250, 4, 13, 14},
+ {125750, 4, 27, 29},
+ {126000, 6, 21, 15},
+ {127000, 6, 24, 17},
+ {127250, 6, 41, 29},
+ {128000, 6, 27, 19},
+ {129000, 6, 43, 30},
+ {129859, 4, 25, 26},
+ {130000, 6, 26, 18},
+ {130250, 6, 42, 29},
+ {131000, 6, 32, 22},
+ {131500, 6, 38, 26},
+ {131850, 6, 41, 28},
+ {132000, 6, 22, 15},
+ {132750, 6, 28, 19},
+ {133000, 6, 34, 23},
+ {133330, 6, 37, 25},
+ {134000, 6, 61, 41},
+ {135000, 6, 21, 14},
+ {135250, 6, 167, 111},
+ {136000, 6, 62, 41},
+ {137000, 6, 35, 23},
+ {138000, 6, 23, 15},
+ {138500, 6, 40, 26},
+ {138750, 6, 37, 24},
+ {139000, 6, 34, 22},
+ {139050, 6, 34, 22},
+ {139054, 6, 34, 22},
+ {140000, 6, 28, 18},
+ {141000, 6, 36, 23},
+ {141500, 6, 22, 14},
+ {142000, 6, 30, 19},
+ {143000, 6, 27, 17},
+ {143472, 4, 17, 16},
+ {144000, 6, 24, 15},
+ {145000, 6, 29, 18},
+ {146000, 6, 47, 29},
+ {146250, 6, 26, 16},
+ {147000, 6, 49, 30},
+ {147891, 6, 23, 14},
+ {148000, 6, 23, 14},
+ {148250, 6, 28, 17},
+ {148352, 4, 100, 91},
+ {148500, 6, 33, 20},
+ {149000, 6, 48, 29},
+ {150000, 6, 25, 15},
+ {151000, 4, 19, 17},
+ {152000, 6, 27, 16},
+ {152280, 6, 44, 26},
+ {153000, 6, 34, 20},
+ {154000, 6, 53, 31},
+ {155000, 6, 31, 18},
+ {155250, 6, 50, 29},
+ {155750, 6, 45, 26},
+ {156000, 6, 26, 15},
+ {157000, 6, 61, 35},
+ {157500, 6, 28, 16},
+ {158000, 6, 65, 37},
+ {158250, 6, 44, 25},
+ {159000, 6, 53, 30},
+ {159500, 6, 39, 22},
+ {160000, 6, 32, 18},
+ {161000, 4, 31, 26},
+ {162000, 4, 18, 15},
+ {162162, 4, 131, 109},
+ {162500, 4, 53, 44},
+ {163000, 4, 29, 24},
+ {164000, 4, 17, 14},
+ {165000, 4, 22, 18},
+ {166000, 4, 32, 26},
+ {167000, 4, 26, 21},
+ {168000, 4, 46, 37},
+ {169000, 4, 104, 83},
+ {169128, 4, 64, 51},
+ {169500, 4, 39, 31},
+ {170000, 4, 34, 27},
+ {171000, 4, 19, 15},
+ {172000, 4, 51, 40},
+ {172750, 4, 32, 25},
+ {172800, 4, 32, 25},
+ {173000, 4, 41, 32},
+ {174000, 4, 49, 38},
+ {174787, 4, 22, 17},
+ {175000, 4, 35, 27},
+ {176000, 4, 30, 23},
+ {177000, 4, 38, 29},
+ {178000, 4, 29, 22},
+ {178500, 4, 37, 28},
+ {179000, 4, 53, 40},
+ {179500, 4, 73, 55},
+ {180000, 4, 20, 15},
+ {181000, 4, 55, 41},
+ {182000, 4, 31, 23},
+ {183000, 4, 42, 31},
+ {184000, 4, 30, 22},
+ {184750, 4, 26, 19},
+ {185000, 4, 37, 27},
+ {186000, 4, 51, 37},
+ {187000, 4, 36, 26},
+ {188000, 4, 32, 23},
+ {189000, 4, 21, 15},
+ {190000, 4, 38, 27},
+ {190960, 4, 41, 29},
+ {191000, 4, 41, 29},
+ {192000, 4, 27, 19},
+ {192250, 4, 37, 26},
+ {193000, 4, 20, 14},
+ {193250, 4, 53, 37},
+ {194000, 4, 23, 16},
+ {194208, 4, 23, 16},
+ {195000, 4, 26, 18},
+ {196000, 4, 45, 31},
+ {197000, 4, 35, 24},
+ {197750, 4, 41, 28},
+ {198000, 4, 22, 15},
+ {198500, 4, 25, 17},
+ {199000, 4, 28, 19},
+ {200000, 4, 37, 25},
+ {201000, 4, 61, 41},
+ {202000, 4, 112, 75},
+ {202500, 4, 21, 14},
+ {203000, 4, 146, 97},
+ {204000, 4, 62, 41},
+ {204750, 4, 44, 29},
+ {205000, 4, 38, 25},
+ {206000, 4, 29, 19},
+ {207000, 4, 23, 15},
+ {207500, 4, 40, 26},
+ {208000, 4, 37, 24},
+ {208900, 4, 48, 31},
+ {209000, 4, 48, 31},
+ {209250, 4, 31, 20},
+ {210000, 4, 28, 18},
+ {211000, 4, 25, 16},
+ {212000, 4, 22, 14},
+ {213000, 4, 30, 19},
+ {213750, 4, 38, 24},
+ {214000, 4, 46, 29},
+ {214750, 4, 35, 22},
+ {215000, 4, 43, 27},
+ {216000, 4, 24, 15},
+ {217000, 4, 37, 23},
+ {218000, 4, 42, 26},
+ {218250, 4, 42, 26},
+ {218750, 4, 34, 21},
+ {219000, 4, 47, 29},
+ {219000, 4, 47, 29},
+ {220000, 4, 44, 27},
+ {220640, 4, 49, 30},
+ {220750, 4, 36, 22},
+ {221000, 4, 36, 22},
+ {222000, 4, 23, 14},
+ {222525, 4, 28, 17},
+ {222750, 4, 33, 20},
+ {227000, 4, 37, 22},
+ {230250, 4, 29, 17},
+ {233500, 4, 38, 22},
+ {235000, 4, 40, 23},
+ {238000, 4, 30, 17},
+ {241500, 2, 17, 19},
+ {245250, 2, 20, 22},
+ {247750, 2, 22, 24},
+ {253250, 2, 15, 16},
+ {256250, 2, 18, 19},
+ {262500, 2, 31, 32},
+ {267250, 2, 66, 67},
+ {268500, 2, 94, 95},
+ {270000, 2, 14, 14},
+ {272500, 2, 77, 76},
+ {273750, 2, 57, 56},
+ {280750, 2, 24, 23},
+ {281250, 2, 23, 22},
+ {286000, 2, 17, 16},
+ {291750, 2, 26, 24},
+ {296703, 2, 56, 51},
+ {297000, 2, 22, 20},
+ {298000, 2, 21, 19},
+};
+
+void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ int port = intel_hdmi->ddi_port;
+ int pipe = intel_crtc->pipe;
+ int p, n2, r2, valid=0;
+ u32 temp, i;
+
+ /* On Haswell, we need to enable the clocks and prepare DDI function to
+ * work in HDMI mode for this pipe.
+ */
+ DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
+
+ for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
+ if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
+ p = wrpll_tmds_clock_table[i].p;
+ n2 = wrpll_tmds_clock_table[i].n2;
+ r2 = wrpll_tmds_clock_table[i].r2;
+
+ DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
+ crtc->mode.clock,
+ p, n2, r2);
+
+ valid = 1;
+ break;
+ }
+ }
+
+ if (!valid) {
+ DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
+ crtc->mode.clock);
+ return;
+ }
+
+ /* Enable LCPLL if disabled */
+ temp = I915_READ(LCPLL_CTL);
+ if (temp & LCPLL_PLL_DISABLE)
+ I915_WRITE(LCPLL_CTL,
+ temp & ~LCPLL_PLL_DISABLE);
+
+ /* Configure WR PLL 1, program the correct divider values for
+ * the desired frequency and wait for warmup */
+ I915_WRITE(WRPLL_CTL1,
+ WRPLL_PLL_ENABLE |
+ WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) |
+ WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p));
+
+ udelay(20);
+
+ /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
+ * this port for connection.
+ */
+ I915_WRITE(PORT_CLK_SEL(port),
+ PORT_CLK_SEL_WRPLL1);
+ I915_WRITE(PIPE_CLK_SEL(pipe),
+ PIPE_CLK_SEL_PORT(port));
+
+ udelay(20);
+
+ if (intel_hdmi->has_audio) {
+ /* Proper support for digital audio needs a new logic and a new set
+ * of registers, so we leave it for future patch bombing.
+ */
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
+ pipe_name(intel_crtc->pipe));
+ }
+
+ /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+ temp = I915_READ(DDI_FUNC_CTL(pipe));
+ temp &= ~PIPE_DDI_PORT_MASK;
+ temp &= ~PIPE_DDI_BPC_12;
+ temp |= PIPE_DDI_SELECT_PORT(port) |
+ PIPE_DDI_MODE_SELECT_HDMI |
+ ((intel_crtc->bpp > 24) ?
+ PIPE_DDI_BPC_12 :
+ PIPE_DDI_BPC_8) |
+ PIPE_DDI_FUNC_ENABLE;
+
+ I915_WRITE(DDI_FUNC_CTL(pipe), temp);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ int port = intel_hdmi->ddi_port;
+ u32 temp;
+
+ temp = I915_READ(DDI_BUF_CTL(port));
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ } else {
+ temp |= DDI_BUF_CTL_ENABLE;
+ }
+
+ /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
+ * and swing/emphasis values are ignored so nothing special needs
+ * to be done besides enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port),
+ temp);
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1b1cf3b3ff51..ee61ad1e642b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,7 +24,7 @@
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/cpufreq.h>
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
@@ -44,7 +44,6 @@
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
-static void intel_update_watermarks(struct drm_device *dev);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -360,6 +359,88 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.find_pll = intel_find_pll_ironlake_dp,
};
+u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
+{
+ unsigned long flags;
+ u32 val = 0;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+ DRM_ERROR("DPIO idle wait timed out\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(DPIO_REG, reg);
+ I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
+ DPIO_BYTE);
+ if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+ DRM_ERROR("DPIO read wait timed out\n");
+ goto out_unlock;
+ }
+ val = I915_READ(DPIO_DATA);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+ return val;
+}
+
+static void vlv_init_dpio(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Reset the DPIO config */
+ I915_WRITE(DPIO_CTL, 0);
+ POSTING_READ(DPIO_CTL);
+ I915_WRITE(DPIO_CTL, 1);
+ POSTING_READ(DPIO_CTL);
+}
+
+static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro (Core i5/i7 Series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+ },
+ },
+ { } /* terminating entry */
+};
+
+static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
+ unsigned int reg)
+{
+ unsigned int val;
+
+ /* use the module option value if specified */
+ if (i915_lvds_channel_mode > 0)
+ return i915_lvds_channel_mode == 2;
+
+ if (dmi_check_system(intel_dual_link_lvds))
+ return true;
+
+ if (dev_priv->lvds_val)
+ val = dev_priv->lvds_val;
+ else {
+ /* BIOS should set the proper LVDS register value at boot, but
+ * in reality, it doesn't set the value when the lid is closed;
+ * we need to check "the value to be set" in VBT when LVDS
+ * register is uninitialized.
+ */
+ val = I915_READ(reg);
+ if (!(val & ~LVDS_DETECTED))
+ val = dev_priv->bios_lvds_val;
+ dev_priv->lvds_val = val;
+ }
+ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
@@ -368,8 +449,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP) {
+ if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
/* LVDS dual channel */
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -397,8 +477,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
+ if (is_dual_link_lvds(dev_priv, LVDS))
/* LVDS with dual channel */
limit = &intel_limits_g4x_dual_channel_lvds;
else
@@ -536,8 +615,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
* reliably set up different single/dual channel state, if we
* even can.
*/
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
+ if (is_dual_link_lvds(dev_priv, LVDS))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
@@ -706,6 +784,17 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
+static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 frame, frame_reg = PIPEFRAME(pipe);
+
+ frame = I915_READ(frame_reg);
+
+ if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
+ DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
/**
* intel_wait_for_vblank - wait for vblank on a given pipe
* @dev: drm device
@@ -719,6 +808,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipestat_reg = PIPESTAT(pipe);
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ironlake_wait_for_vblank(dev, pipe);
+ return;
+ }
+
/* Clear existing vblank status. Note this will clear any other
* sticky status fields as well.
*
@@ -771,15 +865,20 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
100))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
} else {
- u32 last_line;
+ u32 last_line, line_mask;
int reg = PIPEDSL(pipe);
unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ if (IS_GEN2(dev))
+ line_mask = DSL_LINEMASK_GEN2;
+ else
+ line_mask = DSL_LINEMASK_GEN3;
+
/* Wait for the display line to settle */
do {
- last_line = I915_READ(reg) & DSL_LINEMASK;
+ last_line = I915_READ(reg) & line_mask;
mdelay(5);
- } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
+ } while (((I915_READ(reg) & line_mask) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
@@ -811,26 +910,33 @@ static void assert_pll(struct drm_i915_private *dev_priv,
/* For ILK+ */
static void assert_pch_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+ struct intel_crtc *intel_crtc, bool state)
{
int reg;
u32 val;
bool cur_state;
+ if (HAS_PCH_LPT(dev_priv->dev)) {
+ DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
+ return;
+ }
+
+ if (!intel_crtc->pch_pll) {
+ WARN(1, "asserting PCH PLL enabled with no PLL\n");
+ return;
+ }
+
if (HAS_PCH_CPT(dev_priv->dev)) {
u32 pch_dpll;
pch_dpll = I915_READ(PCH_DPLL_SEL);
/* Make sure the selected PLL is enabled to the transcoder */
- WARN(!((pch_dpll >> (4 * pipe)) & 8),
- "transcoder %d PLL not enabled\n", pipe);
-
- /* Convert the transcoder pipe number to a pll pipe number */
- pipe = (pch_dpll >> (4 * pipe)) & 1;
+ WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8),
+ "transcoder %d PLL not enabled\n", intel_crtc->pipe);
}
- reg = PCH_DPLL(pipe);
+ reg = intel_crtc->pch_pll->pll_reg;
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
WARN(cur_state != state,
@@ -847,9 +953,16 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- reg = FDI_TX_CTL(pipe);
- val = I915_READ(reg);
- cur_state = !!(val & FDI_TX_ENABLE);
+ if (IS_HASWELL(dev_priv->dev)) {
+ /* On Haswell, DDI is used instead of FDI_TX_CTL */
+ reg = DDI_FUNC_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
+ } else {
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_TX_ENABLE);
+ }
WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
@@ -864,9 +977,14 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- reg = FDI_RX_CTL(pipe);
- val = I915_READ(reg);
- cur_state = !!(val & FDI_RX_ENABLE);
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
+ return;
+ } else {
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
+ }
WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
@@ -884,6 +1002,10 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
if (dev_priv->info->gen == 5)
return;
+ /* On Haswell, DDI ports are responsible for the FDI PLL setup */
+ if (IS_HASWELL(dev_priv->dev))
+ return;
+
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
@@ -895,6 +1017,10 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
+ return;
+ }
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1000,6 +1126,11 @@ static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
u32 val;
bool enabled;
+ if (HAS_PCH_LPT(dev_priv->dev)) {
+ DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
+ return;
+ }
+
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
@@ -1198,6 +1329,69 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
POSTING_READ(reg);
}
+/* SBI access */
+static void
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(SBI_ADDR,
+ (reg << 16));
+ I915_WRITE(SBI_DATA,
+ value);
+ I915_WRITE(SBI_CTL_STAT,
+ SBI_BUSY |
+ SBI_CTL_OP_CRWR);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
+ goto out_unlock;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+}
+
+static u32
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(SBI_ADDR,
+ (reg << 16));
+ I915_WRITE(SBI_CTL_STAT,
+ SBI_BUSY |
+ SBI_CTL_OP_CRRD);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
+ goto out_unlock;
+ }
+
+ value = I915_READ(SBI_DATA);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+ return value;
+}
+
/**
* intel_enable_pch_pll - enable PCH PLL
* @dev_priv: i915 private structure
@@ -1206,60 +1400,88 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
-static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll;
int reg;
u32 val;
- if (pipe > 1)
+ /* PCH PLLs only available on ILK, SNB and IVB */
+ BUG_ON(dev_priv->info->gen < 5);
+ pll = intel_crtc->pch_pll;
+ if (pll == NULL)
return;
- /* PCH only available on ILK+ */
- BUG_ON(dev_priv->info->gen < 5);
+ if (WARN_ON(pll->refcount == 0))
+ return;
+
+ DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
+ pll->pll_reg, pll->active, pll->on,
+ intel_crtc->base.base.id);
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
- reg = PCH_DPLL(pipe);
+ if (pll->active++ && pll->on) {
+ assert_pch_pll_enabled(dev_priv, intel_crtc);
+ return;
+ }
+
+ DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
+
+ reg = pll->pll_reg;
val = I915_READ(reg);
val |= DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
+
+ pll->on = true;
}
-static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll = intel_crtc->pch_pll;
int reg;
- u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
- pll_sel = TRANSC_DPLL_ENABLE;
-
- if (pipe > 1)
- return;
+ u32 val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
+ if (pll == NULL)
+ return;
- /* Make sure transcoder isn't still depending on us */
- assert_transcoder_disabled(dev_priv, pipe);
+ if (WARN_ON(pll->refcount == 0))
+ return;
- if (pipe == 0)
- pll_sel |= TRANSC_DPLLA_SEL;
- else if (pipe == 1)
- pll_sel |= TRANSC_DPLLB_SEL;
+ DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
+ pll->pll_reg, pll->active, pll->on,
+ intel_crtc->base.base.id);
+ if (WARN_ON(pll->active == 0)) {
+ assert_pch_pll_disabled(dev_priv, intel_crtc);
+ return;
+ }
- if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
+ if (--pll->active) {
+ assert_pch_pll_enabled(dev_priv, intel_crtc);
return;
+ }
+
+ DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
- reg = PCH_DPLL(pipe);
+ /* Make sure transcoder isn't still depending on us */
+ assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
+
+ reg = pll->pll_reg;
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
+
+ pll->on = false;
}
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
@@ -1273,12 +1495,16 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
BUG_ON(dev_priv->info->gen < 5);
/* Make sure PCH DPLL is enabled */
- assert_pch_pll_enabled(dev_priv, pipe);
+ assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
+ return;
+ }
reg = TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1415,7 +1641,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
* Plane regs are double buffered, going from enabled->disabled needs a
* trigger in order to latch. The display address reg provides this.
*/
-static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
@@ -1526,490 +1752,6 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
disable_pch_hdmi(dev_priv, pipe, HDMID);
}
-static void i8xx_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 fbc_ctl;
-
- /* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
- if ((fbc_ctl & FBC_CTL_EN) == 0)
- return;
-
- fbc_ctl &= ~FBC_CTL_EN;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- /* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
- return;
- }
-
- DRM_DEBUG_KMS("disabled FBC\n");
-}
-
-static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int cfb_pitch;
- int plane, i;
- u32 fbc_ctl, fbc_ctl2;
-
- cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
- if (fb->pitches[0] < cfb_pitch)
- cfb_pitch = fb->pitches[0];
-
- /* FBC_CTL wants 64B units */
- cfb_pitch = (cfb_pitch / 64) - 1;
- plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
-
- /* Clear old tags */
- for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- I915_WRITE(FBC_TAG + (i * 4), 0);
-
- /* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= plane;
- I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, crtc->y);
-
- /* enable it... */
- fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
- if (IS_I945GM(dev))
- fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
- fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
- fbc_ctl |= obj->fence_reg;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
- cfb_pitch, crtc->y, intel_crtc->plane);
-}
-
-static bool i8xx_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
-}
-
-static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- unsigned long stall_watermark = 200;
- u32 dpfc_ctl;
-
- dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
- dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
- I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
-
- I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
- I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
-
- /* enable it... */
- I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
-
- DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
-}
-
-static void g4x_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool g4x_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-static void sandybridge_blit_fbc_update(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blt_ecoskpd;
-
- /* Make sure blitter notifies FBC of writes */
- gen6_gt_force_wake_get(dev_priv);
- blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT);
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- POSTING_READ(GEN6_BLITTER_ECOSKPD);
- gen6_gt_force_wake_put(dev_priv);
-}
-
-static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- unsigned long stall_watermark = 200;
- u32 dpfc_ctl;
-
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- dpfc_ctl &= DPFC_RESERVED;
- dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- /* Set persistent mode for front-buffer rendering, ala X. */
- dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
- dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
- I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
-
- I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
- I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
- /* enable it... */
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- if (IS_GEN6(dev)) {
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- sandybridge_blit_fbc_update(dev);
- }
-
- DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
-}
-
-static void ironlake_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool ironlake_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-bool intel_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->display.fbc_enabled)
- return false;
-
- return dev_priv->display.fbc_enabled(dev);
-}
-
-static void intel_fbc_work_fn(struct work_struct *__work)
-{
- struct intel_fbc_work *work =
- container_of(to_delayed_work(__work),
- struct intel_fbc_work, work);
- struct drm_device *dev = work->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- if (work == dev_priv->fbc_work) {
- /* Double check that we haven't switched fb without cancelling
- * the prior work.
- */
- if (work->crtc->fb == work->fb) {
- dev_priv->display.enable_fbc(work->crtc,
- work->interval);
-
- dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
- dev_priv->cfb_fb = work->crtc->fb->base.id;
- dev_priv->cfb_y = work->crtc->y;
- }
-
- dev_priv->fbc_work = NULL;
- }
- mutex_unlock(&dev->struct_mutex);
-
- kfree(work);
-}
-
-static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->fbc_work == NULL)
- return;
-
- DRM_DEBUG_KMS("cancelling pending FBC enable\n");
-
- /* Synchronisation is provided by struct_mutex and checking of
- * dev_priv->fbc_work, so we can perform the cancellation
- * entirely asynchronously.
- */
- if (cancel_delayed_work(&dev_priv->fbc_work->work))
- /* tasklet was killed before being run, clean up */
- kfree(dev_priv->fbc_work);
-
- /* Mark the work as no longer wanted so that if it does
- * wake-up (because the work was already running and waiting
- * for our mutex), it will discover that is no longer
- * necessary to run.
- */
- dev_priv->fbc_work = NULL;
-}
-
-static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct intel_fbc_work *work;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->display.enable_fbc)
- return;
-
- intel_cancel_fbc_work(dev_priv);
-
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (work == NULL) {
- dev_priv->display.enable_fbc(crtc, interval);
- return;
- }
-
- work->crtc = crtc;
- work->fb = crtc->fb;
- work->interval = interval;
- INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
-
- dev_priv->fbc_work = work;
-
- DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
-
- /* Delay the actual enabling to let pageflipping cease and the
- * display to settle before starting the compression. Note that
- * this delay also serves a second purpose: it allows for a
- * vblank to pass after disabling the FBC before we attempt
- * to modify the control registers.
- *
- * A more complicated solution would involve tracking vblanks
- * following the termination of the page-flipping sequence
- * and indeed performing the enable as a co-routine and not
- * waiting synchronously upon the vblank.
- */
- schedule_delayed_work(&work->work, msecs_to_jiffies(50));
-}
-
-void intel_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- intel_cancel_fbc_work(dev_priv);
-
- if (!dev_priv->display.disable_fbc)
- return;
-
- dev_priv->display.disable_fbc(dev);
- dev_priv->cfb_plane = -1;
-}
-
-/**
- * intel_update_fbc - enable/disable FBC as needed
- * @dev: the drm_device
- *
- * Set up the framebuffer compression hardware at mode set time. We
- * enable it if possible:
- * - plane A only (on pre-965)
- * - no pixel mulitply/line duplication
- * - no alpha buffer discard
- * - no dual wide
- * - framebuffer <= 2048 in width, 1536 in height
- *
- * We can't assume that any compression will take place (worst case),
- * so the compressed buffer has to be the same size as the uncompressed
- * one. It also must reside (along with the line length buffer) in
- * stolen memory.
- *
- * We need to enable/disable FBC on a global basis.
- */
-static void intel_update_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL, *tmp_crtc;
- struct intel_crtc *intel_crtc;
- struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
- int enable_fbc;
-
- DRM_DEBUG_KMS("\n");
-
- if (!i915_powersave)
- return;
-
- if (!I915_HAS_FBC(dev))
- return;
-
- /*
- * If FBC is already on, we just have to verify that we can
- * keep it that way...
- * Need to disable if:
- * - more than one pipe is active
- * - changing FBC params (stride, fence, mode)
- * - new fb is too large to fit in compressed buffer
- * - going to an unsupported config (interlace, pixel multiply, etc.)
- */
- list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled && tmp_crtc->fb) {
- if (crtc) {
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
- goto out_disable;
- }
- crtc = tmp_crtc;
- }
- }
-
- if (!crtc || crtc->fb == NULL) {
- DRM_DEBUG_KMS("no output, disabling\n");
- dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
- goto out_disable;
- }
-
- intel_crtc = to_intel_crtc(crtc);
- fb = crtc->fb;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
- enable_fbc = i915_enable_fbc;
- if (enable_fbc < 0) {
- DRM_DEBUG_KMS("fbc set to per-chip default\n");
- enable_fbc = 1;
- if (INTEL_INFO(dev)->gen <= 6)
- enable_fbc = 0;
- }
- if (!enable_fbc) {
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
- goto out_disable;
- }
- if (intel_fb->obj->base.size > dev_priv->cfb_size) {
- DRM_DEBUG_KMS("framebuffer too large, disabling "
- "compression\n");
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- goto out_disable;
- }
- if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
- (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
- dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
- goto out_disable;
- }
- if ((crtc->mode.hdisplay > 2048) ||
- (crtc->mode.vdisplay > 1536)) {
- DRM_DEBUG_KMS("mode too large for compression, disabling\n");
- dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
- goto out_disable;
- }
- if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
- DRM_DEBUG_KMS("plane not 0, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_BAD_PLANE;
- goto out_disable;
- }
-
- /* The use of a CPU fence is mandatory in order to detect writes
- * by the CPU to the scanout and trigger updates to the FBC.
- */
- if (obj->tiling_mode != I915_TILING_X ||
- obj->fence_reg == I915_FENCE_REG_NONE) {
- DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_NOT_TILED;
- goto out_disable;
- }
-
- /* If the kernel debugger is active, always disable compression */
- if (in_dbg_master())
- goto out_disable;
-
- /* If the scanout has not changed, don't modify the FBC settings.
- * Note that we make the fundamental assumption that the fb->obj
- * cannot be unpinned (and have its GTT offset and fence revoked)
- * without first being decoupled from the scanout and FBC disabled.
- */
- if (dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_fb == fb->base.id &&
- dev_priv->cfb_y == crtc->y)
- return;
-
- if (intel_fbc_enabled(dev)) {
- /* We update FBC along two paths, after changing fb/crtc
- * configuration (modeswitching) and after page-flipping
- * finishes. For the latter, we know that not only did
- * we disable the FBC at the start of the page-flip
- * sequence, but also more than one vblank has passed.
- *
- * For the former case of modeswitching, it is possible
- * to switch between two FBC valid configurations
- * instantaneously so we do need to disable the FBC
- * before we can modify its control registers. We also
- * have to wait for the next vblank for that to take
- * effect. However, since we delay enabling FBC we can
- * assume that a vblank has passed since disabling and
- * that we can safely alter the registers in the deferred
- * callback.
- *
- * In the scenario that we go from a valid to invalid
- * and then back to valid FBC configuration we have
- * no strict enforcement that a vblank occurred since
- * disabling the FBC. However, along all current pipe
- * disabling paths we do need to wait for a vblank at
- * some point. And we wait before enabling FBC anyway.
- */
- DRM_DEBUG_KMS("disabling active FBC for update\n");
- intel_disable_fbc(dev);
- }
-
- intel_enable_fbc(crtc, 500);
- return;
-
-out_disable:
- /* Multiple disables should be harmless */
- if (intel_fbc_enabled(dev)) {
- DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
- intel_disable_fbc(dev);
- }
-}
-
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -2050,13 +1792,11 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
- if (obj->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence(obj, pipelined);
- if (ret)
- goto err_unpin;
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ goto err_unpin;
- i915_gem_object_pin_fence(obj);
- }
+ i915_gem_object_pin_fence(obj);
dev_priv->mm.interruptible = true;
return 0;
@@ -2137,7 +1877,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
Start, Offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(DSPSURF(plane), Start);
+ I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
} else
@@ -2217,7 +1957,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- I915_WRITE(DSPSURF(plane), Start);
+ I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
POSTING_READ(reg);
@@ -2232,16 +1972,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = dev_priv->display.update_plane(crtc, fb, x, y);
- if (ret)
- return ret;
- intel_update_fbc(dev);
+ if (dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
intel_increase_pllclock(crtc);
- return 0;
+ return dev_priv->display.update_plane(crtc, fb, x, y);
}
static int
@@ -2276,6 +2012,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
@@ -2286,16 +2023,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
- switch (intel_crtc->plane) {
- case 0:
- case 1:
- break;
- case 2:
- if (IS_IVYBRIDGE(dev))
- break;
- /* fall through otherwise */
- default:
- DRM_ERROR("no plane for crtc\n");
+ if(intel_crtc->plane > dev_priv->num_pipe) {
+ DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
+ intel_crtc->plane,
+ dev_priv->num_pipe);
return -EINVAL;
}
@@ -2312,8 +2043,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb)
intel_finish_fb(old_fb);
- ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
- LEAVE_ATOMIC_MODE_SET);
+ ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
if (ret) {
intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
@@ -2326,6 +2056,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
}
+ intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
if (!dev->primary->master)
@@ -2547,7 +2278,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 reg, temp, i;
+ u32 reg, temp, i, retry;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
@@ -2599,15 +2330,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(500);
- reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_BIT_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done.\n");
- break;
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_BIT_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ udelay(50);
}
+ if (retry < 5)
+ break;
}
if (i == 4)
DRM_ERROR("FDI train 1 fail!\n");
@@ -2648,15 +2383,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(500);
- reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
- break;
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ udelay(50);
}
+ if (retry < 5)
+ break;
}
if (i == 4)
DRM_ERROR("FDI train 2 fail!\n");
@@ -2808,14 +2547,18 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(200);
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+ /* On Haswell, the PLL configuration for ports and pipes is handled
+ * separately, as part of DDI setup */
+ if (!IS_HASWELL(dev)) {
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
- POSTING_READ(reg);
- udelay(100);
+ POSTING_READ(reg);
+ udelay(100);
+ }
}
}
@@ -2888,38 +2631,16 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
}
-/*
- * When we disable a pipe, we need to clear any pending scanline wait events
- * to avoid hanging the ring, which we assume we are waiting on.
- */
-static void intel_clear_scanline_wait(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- u32 tmp;
-
- if (IS_GEN2(dev))
- /* Can't break the hang on i8xx */
- return;
-
- ring = LP_RING(dev_priv);
- tmp = I915_READ_CTL(ring);
- if (tmp & RING_WAIT)
- I915_WRITE_CTL(ring, tmp);
-}
-
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
- struct drm_i915_gem_object *obj;
- struct drm_i915_private *dev_priv;
+ struct drm_device *dev = crtc->dev;
if (crtc->fb == NULL)
return;
- obj = to_intel_framebuffer(crtc->fb)->obj;
- dev_priv = crtc->dev->dev_private;
- wait_event(dev_priv->pending_flip_queue,
- atomic_read(&obj->pending_flip) == 0);
+ mutex_lock(&dev->struct_mutex);
+ intel_finish_fb(crtc->fb);
+ mutex_unlock(&dev->struct_mutex);
}
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
@@ -2936,6 +2657,22 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
if (encoder->base.crtc != crtc)
continue;
+ /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
+ * CPU handles all others */
+ if (IS_HASWELL(dev)) {
+ /* It is still unclear how this will work on PPT, so throw up a warning */
+ WARN_ON(!HAS_PCH_LPT(dev));
+
+ if (encoder->type == DRM_MODE_ENCODER_DAC) {
+ DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
+ return true;
+ } else {
+ DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
+ encoder->type);
+ return false;
+ }
+ }
+
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&encoder->base))
@@ -2947,6 +2684,97 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
return true;
}
+/* Program iCLKIP clock to the desired frequency */
+static void lpt_program_iclkip(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 divsel, phaseinc, auxdiv, phasedir = 0;
+ u32 temp;
+
+ /* It is necessary to ungate the pixclk gate prior to programming
+ * the divisors, and gate it back when it is done.
+ */
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+
+ /* Disable SSCCTL */
+ intel_sbi_write(dev_priv, SBI_SSCCTL6,
+ intel_sbi_read(dev_priv, SBI_SSCCTL6) |
+ SBI_SSCCTL_DISABLE);
+
+ /* 20MHz is a corner case which is out of range for the 7-bit divisor */
+ if (crtc->mode.clock == 20000) {
+ auxdiv = 1;
+ divsel = 0x41;
+ phaseinc = 0x20;
+ } else {
+ /* The iCLK virtual clock root frequency is in MHz,
+ * but the crtc->mode.clock in in KHz. To get the divisors,
+ * it is necessary to divide one by another, so we
+ * convert the virtual clock precision to KHz here for higher
+ * precision.
+ */
+ u32 iclk_virtual_root_freq = 172800 * 1000;
+ u32 iclk_pi_range = 64;
+ u32 desired_divisor, msb_divisor_value, pi_value;
+
+ desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
+ msb_divisor_value = desired_divisor / iclk_pi_range;
+ pi_value = desired_divisor % iclk_pi_range;
+
+ auxdiv = 0;
+ divsel = msb_divisor_value - 2;
+ phaseinc = pi_value;
+ }
+
+ /* This should not happen with any sane values */
+ WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+ DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+ crtc->mode.clock,
+ auxdiv,
+ divsel,
+ phasedir,
+ phaseinc);
+
+ /* Program SSCDIVINTPHASE6 */
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
+ temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+ temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
+ temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+ temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
+
+ intel_sbi_write(dev_priv,
+ SBI_SSCDIVINTPHASE6,
+ temp);
+
+ /* Program SSCAUXDIV */
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
+ temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
+ temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+ intel_sbi_write(dev_priv,
+ SBI_SSCAUXDIV6,
+ temp);
+
+
+ /* Enable modulator and associated divider */
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
+ temp &= ~SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv,
+ SBI_SSCCTL6,
+ temp);
+
+ /* Wait for initialization time */
+ udelay(24);
+
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+}
+
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
@@ -2961,29 +2789,41 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 reg, temp, transc_sel;
+ u32 reg, temp;
+
+ assert_transcoder_disabled(dev_priv, pipe);
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
- intel_enable_pch_pll(dev_priv, pipe);
+ intel_enable_pch_pll(intel_crtc);
- if (HAS_PCH_CPT(dev)) {
- transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
- TRANSC_DPLLB_SEL;
+ if (HAS_PCH_LPT(dev)) {
+ DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
+ lpt_program_iclkip(crtc);
+ } else if (HAS_PCH_CPT(dev)) {
+ u32 sel;
- /* Be sure PCH DPLL SEL is set */
temp = I915_READ(PCH_DPLL_SEL);
- if (pipe == 0) {
- temp &= ~(TRANSA_DPLLB_SEL);
- temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
- } else if (pipe == 1) {
- temp &= ~(TRANSB_DPLLB_SEL);
- temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
- } else if (pipe == 2) {
- temp &= ~(TRANSC_DPLLB_SEL);
- temp |= (TRANSC_DPLL_ENABLE | transc_sel);
+ switch (pipe) {
+ default:
+ case 0:
+ temp |= TRANSA_DPLL_ENABLE;
+ sel = TRANSA_DPLLB_SEL;
+ break;
+ case 1:
+ temp |= TRANSB_DPLL_ENABLE;
+ sel = TRANSB_DPLLB_SEL;
+ break;
+ case 2:
+ temp |= TRANSC_DPLL_ENABLE;
+ sel = TRANSC_DPLLB_SEL;
+ break;
}
+ if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
+ temp |= sel;
+ else
+ temp &= ~sel;
I915_WRITE(PCH_DPLL_SEL, temp);
}
@@ -2998,7 +2838,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
- intel_fdi_normal_train(crtc);
+ if (!IS_HASWELL(dev))
+ intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
@@ -3041,6 +2882,93 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_enable_transcoder(dev_priv, pipe);
}
+static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
+{
+ struct intel_pch_pll *pll = intel_crtc->pch_pll;
+
+ if (pll == NULL)
+ return;
+
+ if (pll->refcount == 0) {
+ WARN(1, "bad PCH PLL refcount\n");
+ return;
+ }
+
+ --pll->refcount;
+ intel_crtc->pch_pll = NULL;
+}
+
+static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
+{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll;
+ int i;
+
+ pll = intel_crtc->pch_pll;
+ if (pll) {
+ DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+ goto prepare;
+ }
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+ i = intel_crtc->pipe;
+ pll = &dev_priv->pch_plls[i];
+
+ DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+
+ goto found;
+ }
+
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ pll = &dev_priv->pch_plls[i];
+
+ /* Only want to check enabled timings first */
+ if (pll->refcount == 0)
+ continue;
+
+ if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
+ fp == I915_READ(pll->fp0_reg)) {
+ DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
+ intel_crtc->base.base.id,
+ pll->pll_reg, pll->refcount, pll->active);
+
+ goto found;
+ }
+ }
+
+ /* Ok no matching timings, maybe there's a free one? */
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ pll = &dev_priv->pch_plls[i];
+ if (pll->refcount == 0) {
+ DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+ goto found;
+ }
+ }
+
+ return NULL;
+
+found:
+ intel_crtc->pch_pll = pll;
+ pll->refcount++;
+ DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
+prepare: /* separate function? */
+ DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
+
+ /* Wait for the clocks to stabilize before rewriting the regs */
+ I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(pll->pll_reg);
+ udelay(150);
+
+ I915_WRITE(pll->fp0_reg, fp);
+ I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+ pll->on = false;
+ return pll;
+}
+
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3185,8 +3113,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
}
/* disable PCH DPLL */
- if (!intel_crtc->no_pll)
- intel_disable_pch_pll(dev_priv, pipe);
+ intel_disable_pch_pll(intel_crtc);
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
@@ -3214,7 +3141,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_clear_scanline_wait(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -3242,6 +3168,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+static void ironlake_crtc_off(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ intel_put_pch_pll(intel_crtc);
+}
+
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
@@ -3313,7 +3245,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc->active = false;
intel_update_fbc(dev);
intel_update_watermarks(dev);
- intel_clear_scanline_wait(dev);
}
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -3333,6 +3264,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+static void i9xx_crtc_off(struct drm_crtc *crtc)
+{
+}
+
/**
* Sets the power management mode of the pipe and plane.
*/
@@ -3380,25 +3315,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_device *dev = crtc->dev;
-
- /* Flush any pending WAITs before we disable the pipe. Note that
- * we need to drop the struct_mutex in order to acquire it again
- * during the lowlevel dpms routines around a couple of the
- * operations. It does not look trivial nor desirable to move
- * that locking higher. So instead we leave a window for the
- * submission of further commands on the fb before we can actually
- * disable it. This race with userspace exists anyway, and we can
- * only rely on the pipe being disabled by userspace after it
- * receives the hotplug notification and has flushed any pending
- * batches.
- */
- if (crtc->fb) {
- mutex_lock(&dev->struct_mutex);
- intel_finish_fb(crtc->fb);
- mutex_unlock(&dev->struct_mutex);
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ dev_priv->display.off(crtc);
+
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
@@ -3448,8 +3369,7 @@ void intel_encoder_commit(struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
struct drm_device *dev = encoder->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
/* lvds has its own version of commit see intel_lvds_commit */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3487,6 +3407,11 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
+static int valleyview_get_display_clock_speed(struct drm_device *dev)
+{
+ return 400000; /* FIXME */
+}
+
static int i945_get_display_clock_speed(struct drm_device *dev)
{
return 400000;
@@ -3584,1342 +3509,6 @@ ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
-
-struct intel_watermark_params {
- unsigned long fifo_size;
- unsigned long max_wm;
- unsigned long default_wm;
- unsigned long guard_size;
- unsigned long cacheline_size;
-};
-
-/* Pineview has different values for various configs */
-static const struct intel_watermark_params pineview_display_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params pineview_display_hplloff_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_HPLLOFF_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params pineview_cursor_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params g4x_wm_info = {
- G4X_FIFO_SIZE,
- G4X_MAX_WM,
- G4X_MAX_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params g4x_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params i965_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- I915_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params i945_wm_info = {
- I945_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i915_wm_info = {
- I915_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i855_wm_info = {
- I855GM_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i830_wm_info = {
- I830_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params ironlake_display_wm_info = {
- ILK_DISPLAY_FIFO,
- ILK_DISPLAY_MAXWM,
- ILK_DISPLAY_DFTWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_wm_info = {
- ILK_CURSOR_FIFO,
- ILK_CURSOR_MAXWM,
- ILK_CURSOR_DFTWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_display_srwm_info = {
- ILK_DISPLAY_SR_FIFO,
- ILK_DISPLAY_MAX_SRWM,
- ILK_DISPLAY_DFT_SRWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_srwm_info = {
- ILK_CURSOR_SR_FIFO,
- ILK_CURSOR_MAX_SRWM,
- ILK_CURSOR_DFT_SRWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params sandybridge_display_wm_info = {
- SNB_DISPLAY_FIFO,
- SNB_DISPLAY_MAXWM,
- SNB_DISPLAY_DFTWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_wm_info = {
- SNB_CURSOR_FIFO,
- SNB_CURSOR_MAXWM,
- SNB_CURSOR_DFTWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_display_srwm_info = {
- SNB_DISPLAY_SR_FIFO,
- SNB_DISPLAY_MAX_SRWM,
- SNB_DISPLAY_DFT_SRWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
- SNB_CURSOR_SR_FIFO,
- SNB_CURSOR_MAX_SRWM,
- SNB_CURSOR_DFT_SRWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-
-
-/**
- * intel_calculate_wm - calculate watermark level
- * @clock_in_khz: pixel clock
- * @wm: chip FIFO params
- * @pixel_size: display pixel size
- * @latency_ns: memory latency for the platform
- *
- * Calculate the watermark level (the level at which the display plane will
- * start fetching from memory again). Each chip has a different display
- * FIFO size and allocation, so the caller needs to figure that out and pass
- * in the correct intel_watermark_params structure.
- *
- * As the pixel clock runs, the FIFO will be drained at a rate that depends
- * on the pixel size. When it reaches the watermark level, it'll start
- * fetching FIFO line sized based chunks from memory until the FIFO fills
- * past the watermark point. If the FIFO drains completely, a FIFO underrun
- * will occur, and a display engine hang could result.
- */
-static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
- const struct intel_watermark_params *wm,
- int fifo_size,
- int pixel_size,
- unsigned long latency_ns)
-{
- long entries_required, wm_size;
-
- /*
- * Note: we need to make sure we don't overflow for various clock &
- * latency values.
- * clocks go from a few thousand to several hundred thousand.
- * latency is usually a few thousand
- */
- entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
- 1000;
- entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
-
- DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
-
- wm_size = fifo_size - (entries_required + wm->guard_size);
-
- DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
-
- /* Don't promote wm_size to unsigned... */
- if (wm_size > (long)wm->max_wm)
- wm_size = wm->max_wm;
- if (wm_size <= 0)
- wm_size = wm->default_wm;
- return wm_size;
-}
-
-struct cxsr_latency {
- int is_desktop;
- int is_ddr3;
- unsigned long fsb_freq;
- unsigned long mem_freq;
- unsigned long display_sr;
- unsigned long display_hpll_disable;
- unsigned long cursor_sr;
- unsigned long cursor_hpll_disable;
-};
-
-static const struct cxsr_latency cxsr_latency_table[] = {
- {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
- {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
- {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
- {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
- {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
-
- {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
- {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
- {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
- {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
- {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
-
- {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
- {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
- {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
- {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
- {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
-
- {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
- {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
- {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
- {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
- {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
-
- {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
- {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
- {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
- {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
- {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
-
- {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
- {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
- {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
- {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
- {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
-};
-
-static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
- int is_ddr3,
- int fsb,
- int mem)
-{
- const struct cxsr_latency *latency;
- int i;
-
- if (fsb == 0 || mem == 0)
- return NULL;
-
- for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
- latency = &cxsr_latency_table[i];
- if (is_desktop == latency->is_desktop &&
- is_ddr3 == latency->is_ddr3 &&
- fsb == latency->fsb_freq && mem == latency->mem_freq)
- return latency;
- }
-
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-
- return NULL;
-}
-
-static void pineview_disable_cxsr(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* deactivate cxsr */
- I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
-}
-
-/*
- * Latency for FIFO fetches is dependent on several factors:
- * - memory configuration (speed, channels)
- * - chipset
- * - current MCH state
- * It can be fairly high in some situations, so here we assume a fairly
- * pessimal value. It's a tradeoff between extra memory fetches (if we
- * set this value too high, the FIFO will fetch frequently to stay full)
- * and power consumption (set it too low to save power and we might see
- * FIFO underruns and display "flicker").
- *
- * A value of 5us seems to be a good balance; safe for very low end
- * platforms but not overly aggressive on lower latency configs.
- */
-static const int latency_ns = 5000;
-
-static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- if (plane)
- size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
-static int i85x_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x1ff;
- if (plane)
- size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
- size >>= 1; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
-static int i845_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- size >>= 2; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A",
- size);
-
- return size;
-}
-
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- size >>= 1; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
-static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
-{
- struct drm_crtc *crtc, *enabled = NULL;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->enabled && crtc->fb) {
- if (enabled)
- return NULL;
- enabled = crtc;
- }
- }
-
- return enabled;
-}
-
-static void pineview_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- const struct cxsr_latency *latency;
- u32 reg;
- unsigned long wm;
-
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
- dev_priv->fsb_freq, dev_priv->mem_freq);
- if (!latency) {
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
- return;
- }
-
- crtc = single_enabled_crtc(dev);
- if (crtc) {
- int clock = crtc->mode.clock;
- int pixel_size = crtc->fb->bits_per_pixel / 8;
-
- /* Display SR */
- wm = intel_calculate_wm(clock, &pineview_display_wm,
- pineview_display_wm.fifo_size,
- pixel_size, latency->display_sr);
- reg = I915_READ(DSPFW1);
- reg &= ~DSPFW_SR_MASK;
- reg |= wm << DSPFW_SR_SHIFT;
- I915_WRITE(DSPFW1, reg);
- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-
- /* cursor SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_wm,
- pineview_display_wm.fifo_size,
- pixel_size, latency->cursor_sr);
- reg = I915_READ(DSPFW3);
- reg &= ~DSPFW_CURSOR_SR_MASK;
- reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
- I915_WRITE(DSPFW3, reg);
-
- /* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
- pixel_size, latency->display_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~DSPFW_HPLL_SR_MASK;
- reg |= wm & DSPFW_HPLL_SR_MASK;
- I915_WRITE(DSPFW3, reg);
-
- /* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
- pixel_size, latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~DSPFW_HPLL_CURSOR_MASK;
- reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
- I915_WRITE(DSPFW3, reg);
- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-
- /* activate cxsr */
- I915_WRITE(DSPFW3,
- I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
- DRM_DEBUG_KMS("Self-refresh is enabled\n");
- } else {
- pineview_disable_cxsr(dev);
- DRM_DEBUG_KMS("Self-refresh is disabled\n");
- }
-}
-
-static bool g4x_compute_wm0(struct drm_device *dev,
- int plane,
- const struct intel_watermark_params *display,
- int display_latency_ns,
- const struct intel_watermark_params *cursor,
- int cursor_latency_ns,
- int *plane_wm,
- int *cursor_wm)
-{
- struct drm_crtc *crtc;
- int htotal, hdisplay, clock, pixel_size;
- int line_time_us, line_count;
- int entries, tlb_miss;
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
- *cursor_wm = cursor->guard_size;
- *plane_wm = display->guard_size;
- return false;
- }
-
- htotal = crtc->mode.htotal;
- hdisplay = crtc->mode.hdisplay;
- clock = crtc->mode.clock;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- /* Use the small buffer method to calculate plane watermark */
- entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
- tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, display->cacheline_size);
- *plane_wm = entries + display->guard_size;
- if (*plane_wm > (int)display->max_wm)
- *plane_wm = display->max_wm;
-
- /* Use the large buffer method to calculate cursor watermark */
- line_time_us = ((htotal * 1000) / clock);
- line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * 64 * pixel_size;
- tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
- if (*cursor_wm > (int)cursor->max_wm)
- *cursor_wm = (int)cursor->max_wm;
-
- return true;
-}
-
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool g4x_check_srwm(struct drm_device *dev,
- int display_wm, int cursor_wm,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor)
-{
- DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
- display_wm, cursor_wm);
-
- if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
- display_wm, display->max_wm);
- return false;
- }
-
- if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
- cursor_wm, cursor->max_wm);
- return false;
- }
-
- if (!(display_wm || cursor_wm)) {
- DRM_DEBUG_KMS("SR latency is 0, disabling\n");
- return false;
- }
-
- return true;
-}
-
-static bool g4x_compute_srwm(struct drm_device *dev,
- int plane,
- int latency_ns,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor,
- int *display_wm, int *cursor_wm)
-{
- struct drm_crtc *crtc;
- int hdisplay, htotal, pixel_size, clock;
- unsigned long line_time_us;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *display_wm = *cursor_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- hdisplay = crtc->mode.hdisplay;
- htotal = crtc->mode.htotal;
- clock = crtc->mode.clock;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- line_time_us = (htotal * 1000) / clock;
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = hdisplay * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *display_wm = entries + display->guard_size;
-
- /* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * 64;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
-
- return g4x_check_srwm(dev,
- *display_wm, *cursor_wm,
- display, cursor);
-}
-
-#define single_plane_enabled(mask) is_power_of_2(mask)
-
-static void g4x_update_wm(struct drm_device *dev)
-{
- static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
- int plane_sr, cursor_sr;
- unsigned int enabled = 0;
-
- if (g4x_compute_wm0(dev, 0,
- &g4x_wm_info, latency_ns,
- &g4x_cursor_wm_info, latency_ns,
- &planea_wm, &cursora_wm))
- enabled |= 1;
-
- if (g4x_compute_wm0(dev, 1,
- &g4x_wm_info, latency_ns,
- &g4x_cursor_wm_info, latency_ns,
- &planeb_wm, &cursorb_wm))
- enabled |= 2;
-
- plane_sr = cursor_sr = 0;
- if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- sr_latency_ns,
- &g4x_wm_info,
- &g4x_cursor_wm_info,
- &plane_sr, &cursor_sr))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- else
- I915_WRITE(FW_BLC_SELF,
- I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
- planea_wm, cursora_wm,
- planeb_wm, cursorb_wm,
- plane_sr, cursor_sr);
-
- I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- planea_wm);
- I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
- /* HPLL off in SR has some issues on G4x... disable it */
- I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-}
-
-static void i965_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- int srwm = 1;
- int cursor_sr = 16;
-
- /* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev);
- if (crtc) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 12000;
- int clock = crtc->mode.clock;
- int htotal = crtc->mode.htotal;
- int hdisplay = crtc->mode.hdisplay;
- int pixel_size = crtc->fb->bits_per_pixel / 8;
- unsigned long line_time_us;
- int entries;
-
- line_time_us = ((htotal * 1000) / clock);
-
- /* Use ns/us then divide to preserve precision */
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * hdisplay;
- entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
- srwm = I965_FIFO_SIZE - entries;
- if (srwm < 0)
- srwm = 1;
- srwm &= 0x1ff;
- DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
- entries, srwm);
-
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * 64;
- entries = DIV_ROUND_UP(entries,
- i965_cursor_wm_info.cacheline_size);
- cursor_sr = i965_cursor_wm_info.fifo_size -
- (entries + i965_cursor_wm_info.guard_size);
-
- if (cursor_sr > i965_cursor_wm_info.max_wm)
- cursor_sr = i965_cursor_wm_info.max_wm;
-
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- "cursor %d\n", srwm, cursor_sr);
-
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- } else {
- /* Turn off self refresh if both pipes are enabled */
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
- }
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
- srwm);
-
- /* 965 has limitations... */
- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
- (8 << 16) | (8 << 8) | (8 << 0));
- I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
- /* update cursor SR watermark */
- I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-}
-
-static void i9xx_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- const struct intel_watermark_params *wm_info;
- uint32_t fwater_lo;
- uint32_t fwater_hi;
- int cwm, srwm = 1;
- int fifo_size;
- int planea_wm, planeb_wm;
- struct drm_crtc *crtc, *enabled = NULL;
-
- if (IS_I945GM(dev))
- wm_info = &i945_wm_info;
- else if (!IS_GEN2(dev))
- wm_info = &i915_wm_info;
- else
- wm_info = &i855_wm_info;
-
- fifo_size = dev_priv->display.get_fifo_size(dev, 0);
- crtc = intel_get_crtc_for_plane(dev, 0);
- if (crtc->enabled && crtc->fb) {
- planea_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
- enabled = crtc;
- } else
- planea_wm = fifo_size - wm_info->guard_size;
-
- fifo_size = dev_priv->display.get_fifo_size(dev, 1);
- crtc = intel_get_crtc_for_plane(dev, 1);
- if (crtc->enabled && crtc->fb) {
- planeb_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
- if (enabled == NULL)
- enabled = crtc;
- else
- enabled = NULL;
- } else
- planeb_wm = fifo_size - wm_info->guard_size;
-
- DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
-
- /*
- * Overlay gets an aggressive default since video jitter is bad.
- */
- cwm = 2;
-
- /* Play safe and disable self-refresh before adjusting watermarks. */
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
-
- /* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev) && enabled) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 6000;
- int clock = enabled->mode.clock;
- int htotal = enabled->mode.htotal;
- int hdisplay = enabled->mode.hdisplay;
- int pixel_size = enabled->fb->bits_per_pixel / 8;
- unsigned long line_time_us;
- int entries;
-
- line_time_us = (htotal * 1000) / clock;
-
- /* Use ns/us then divide to preserve precision */
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * hdisplay;
- entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
- srwm = wm_info->fifo_size - entries;
- if (srwm < 0)
- srwm = 1;
-
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF,
- FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
- else if (IS_I915GM(dev))
- I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
- }
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
-
- fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
- fwater_hi = (cwm & 0x1f);
-
- /* Set request length to 8 cachelines per fetch */
- fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
- fwater_hi = fwater_hi | (1 << 8);
-
- I915_WRITE(FW_BLC, fwater_lo);
- I915_WRITE(FW_BLC2, fwater_hi);
-
- if (HAS_FW_BLC(dev)) {
- if (enabled) {
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF,
- FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
- DRM_DEBUG_KMS("memory self refresh enabled\n");
- } else
- DRM_DEBUG_KMS("memory self refresh disabled\n");
- }
-}
-
-static void i830_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- uint32_t fwater_lo;
- int planea_wm;
-
- crtc = single_enabled_crtc(dev);
- if (crtc == NULL)
- return;
-
- planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
- dev_priv->display.get_fifo_size(dev, 0),
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
- fwater_lo = I915_READ(FW_BLC) & ~0xfff;
- fwater_lo |= (3<<8) | planea_wm;
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
-
- I915_WRITE(FW_BLC, fwater_lo);
-}
-
-#define ILK_LP0_PLANE_LATENCY 700
-#define ILK_LP0_CURSOR_LATENCY 1300
-
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool ironlake_check_srwm(struct drm_device *dev, int level,
- int fbc_wm, int display_wm, int cursor_wm,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
- " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
-
- if (fbc_wm > SNB_FBC_MAX_SRWM) {
- DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
- fbc_wm, SNB_FBC_MAX_SRWM, level);
-
- /* fbc has it's own way to disable FBC WM */
- I915_WRITE(DISP_ARB_CTL,
- I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
- return false;
- }
-
- if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
- display_wm, SNB_DISPLAY_MAX_SRWM, level);
- return false;
- }
-
- if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
- cursor_wm, SNB_CURSOR_MAX_SRWM, level);
- return false;
- }
-
- if (!(fbc_wm || display_wm || cursor_wm)) {
- DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
- return false;
- }
-
- return true;
-}
-
-/*
- * Compute watermark values of WM[1-3],
- */
-static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
- int latency_ns,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor,
- int *fbc_wm, int *display_wm, int *cursor_wm)
-{
- struct drm_crtc *crtc;
- unsigned long line_time_us;
- int hdisplay, htotal, pixel_size, clock;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *fbc_wm = *display_wm = *cursor_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- hdisplay = crtc->mode.hdisplay;
- htotal = crtc->mode.htotal;
- clock = crtc->mode.clock;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- line_time_us = (htotal * 1000) / clock;
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = hdisplay * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *display_wm = entries + display->guard_size;
-
- /*
- * Spec says:
- * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
- */
- *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
-
- /* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * 64;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
-
- return ironlake_check_srwm(dev, level,
- *fbc_wm, *display_wm, *cursor_wm,
- display, cursor);
-}
-
-static void ironlake_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int fbc_wm, plane_wm, cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, 0,
- &ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
- &ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
- &plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEA_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1;
- }
-
- if (g4x_compute_wm0(dev, 1,
- &ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
- &ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
- &plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEB_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 2;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled))
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- ILK_READ_WM1_LATENCY() * 500,
- &ironlake_display_srwm_info,
- &ironlake_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- ILK_READ_WM2_LATENCY() * 500,
- &ironlake_display_srwm_info,
- &ironlake_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /*
- * WM3 is unsupported on ILK, probably because we don't have latency
- * data for that power state
- */
-}
-
-void sandybridge_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
- u32 val;
- int fbc_wm, plane_wm, cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, 0,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEA_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEA_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1;
- }
-
- if (g4x_compute_wm0(dev, 1,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEB_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEB_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 2;
- }
-
- /* IVB has 3 pipes */
- if (IS_IVYBRIDGE(dev) &&
- g4x_compute_wm0(dev, 2,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEC_IVB);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEC_IVB, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 3;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- *
- * SNB support 3 levels of watermark.
- *
- * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
- * and disabled in the descending order
- *
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled) ||
- dev_priv->sprite_scaling_enabled)
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- SNB_READ_WM1_LATENCY() * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- SNB_READ_WM2_LATENCY() * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM3 */
- if (!ironlake_compute_srwm(dev, 3, enabled,
- SNB_READ_WM3_LATENCY() * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM3_LP_ILK,
- WM3_LP_EN |
- (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-}
-
-static bool
-sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int display_latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- int clock;
- int entries, tlb_miss;
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
- *sprite_wm = display->guard_size;
- return false;
- }
-
- clock = crtc->mode.clock;
-
- /* Use the small buffer method to calculate the sprite watermark */
- entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
- tlb_miss = display->fifo_size*display->cacheline_size -
- sprite_width * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
- if (*sprite_wm > (int)display->max_wm)
- *sprite_wm = display->max_wm;
-
- return true;
-}
-
-static bool
-sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- unsigned long line_time_us;
- int clock;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *sprite_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- clock = crtc->mode.clock;
- if (!clock) {
- *sprite_wm = 0;
- return false;
- }
-
- line_time_us = (sprite_width * 1000) / clock;
- if (!line_time_us) {
- *sprite_wm = 0;
- return false;
- }
-
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = sprite_width * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
-
- return *sprite_wm > 0x3ff ? false : true;
-}
-
-static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
- uint32_t sprite_width, int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
- u32 val;
- int sprite_wm, reg;
- int ret;
-
- switch (pipe) {
- case 0:
- reg = WM0_PIPEA_ILK;
- break;
- case 1:
- reg = WM0_PIPEB_ILK;
- break;
- case 2:
- reg = WM0_PIPEC_IVB;
- break;
- default:
- return; /* bad pipe */
- }
-
- ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
- &sandybridge_display_wm_info,
- latency, &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
- pipe);
- return;
- }
-
- val = I915_READ(reg);
- val &= ~WM0_PIPE_SPRITE_MASK;
- I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
- DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
-
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM1_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM1S_LP_ILK, sprite_wm);
-
- /* Only IVB has two more LP watermarks for sprite */
- if (!IS_IVYBRIDGE(dev))
- return;
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM2_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM2S_LP_IVB, sprite_wm);
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM3_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM3S_LP_IVB, sprite_wm);
-}
-
-/**
- * intel_update_watermarks - update FIFO watermark values based on current modes
- *
- * Calculate watermark values for the various WM regs based on current mode
- * and plane configuration.
- *
- * There are several cases to deal with here:
- * - normal (i.e. non-self-refresh)
- * - self-refresh (SR) mode
- * - lines are large relative to FIFO size (buffer can hold up to 2)
- * - lines are small relative to FIFO size (buffer can hold more than 2
- * lines), so need to account for TLB latency
- *
- * The normal calculation is:
- * watermark = dotclock * bytes per pixel * latency
- * where latency is platform & configuration dependent (we assume pessimal
- * values here).
- *
- * The SR calculation is:
- * watermark = (trunc(latency/line time)+1) * surface width *
- * bytes per pixel
- * where
- * line time = htotal / dotclock
- * surface width = hdisplay for normal plane and 64 for cursor
- * and latency is assumed to be high, as above.
- *
- * The final value programmed to the register should always be rounded up,
- * and include an extra 2 entries to account for clock crossings.
- *
- * We don't use the sprite, so we can ignore that. And on Crestline we have
- * to set the non-SR watermarks to 8.
- */
-static void intel_update_watermarks(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->display.update_wm)
- dev_priv->display.update_wm(dev);
-}
-
-void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
- uint32_t sprite_width, int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->display.update_sprite_wm)
- dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
- pixel_size);
-}
-
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_panel_use_ssc >= 0)
@@ -5143,6 +3732,222 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
}
}
+static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ temp = I915_READ(LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (pipe == 1) {
+ temp |= LVDS_PIPEB_SELECT;
+ } else {
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock->p2 == 7)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+ /* set the dithering flag on LVDS as needed */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (dev_priv->lvds_dither)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
+ }
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+ I915_WRITE(LVDS, temp);
+}
+
+static void i9xx_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll;
+ bool is_sdvo;
+
+ is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ if (is_sdvo) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (pixel_multiplier > 1) {
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ }
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ dpll |= DPLL_DVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ if (IS_PINEVIEW(dev))
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ else {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (IS_G4X(dev) && reduced_clock)
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ }
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ if (INTEL_INFO(dev)->gen >= 4)
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+ if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ intel_update_lvds(crtc, clock, adjusted_mode);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ u32 temp = 0;
+ if (is_sdvo) {
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (temp > 1)
+ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ else
+ temp = 0;
+ }
+ I915_WRITE(DPLL_MD(pipe), temp);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(DPLL(pipe), dpll);
+ }
+}
+
+static void i8xx_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll;
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock->p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock->p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
+ }
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ intel_update_lvds(crtc, clock, adjusted_mode);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(DPLL(pipe), dpll);
+}
+
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -5156,15 +3961,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll, dspcntr, pipeconf, vsyncshift;
- bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
+ u32 dspcntr, pipeconf, vsyncshift;
+ bool ok, has_reduced_clock = false, is_sdvo = false;
+ bool is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
- u32 temp;
- u32 lvds_sync = 0;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
@@ -5180,15 +3983,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (encoder->needs_tv_clock)
is_tv = true;
break;
- case INTEL_OUTPUT_DVO:
- is_dvo = true;
- break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
- case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
@@ -5235,71 +4032,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
&reduced_clock : NULL);
- dpll = DPLL_VGA_MODE_DIS;
-
- if (!IS_GEN2(dev)) {
- if (is_lvds)
- dpll |= DPLLB_MODE_LVDS;
- else
- dpll |= DPLLB_MODE_DAC_SERIAL;
- if (is_sdvo) {
- int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
- if (pixel_multiplier > 1) {
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- }
- dpll |= DPLL_DVO_HIGH_SPEED;
- }
- if (is_dp)
- dpll |= DPLL_DVO_HIGH_SPEED;
-
- /* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev))
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
- else {
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev) && has_reduced_clock)
- dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- }
- switch (clock.p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
- }
- if (INTEL_INFO(dev)->gen >= 4)
- dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- } else {
- if (is_lvds) {
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- } else {
- if (clock.p1 == 2)
- dpll |= PLL_P1_DIVIDE_BY_TWO;
- else
- dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (clock.p2 == 4)
- dpll |= PLL_P2_DIVIDE_BY_4;
- }
- }
-
- if (is_sdvo && is_tv)
- dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (is_tv)
- /* XXX: just matching BIOS for now */
- /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
- dpll |= 3;
- else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ if (IS_GEN2(dev))
+ i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
else
- dpll |= PLL_REF_INPUT_DREFCLK;
+ i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
/* setup pipeconf */
pipeconf = I915_READ(PIPECONF(pipe));
@@ -5336,97 +4074,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
}
- dpll |= DPLL_VCO_ENABLE;
-
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
- if (is_lvds) {
- temp = I915_READ(LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (pipe == 1) {
- temp |= LVDS_PIPEB_SELECT;
- } else {
- temp &= ~LVDS_PIPEB_SELECT;
- }
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether we're going to
- * set the DPLLs for dual-channel mode or not.
- */
- if (clock.p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
- * appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
- */
- /* set the dithering flag on LVDS as needed */
- if (INTEL_INFO(dev)->gen >= 4) {
- if (dev_priv->lvds_dither)
- temp |= LVDS_ENABLE_DITHER;
- else
- temp &= ~LVDS_ENABLE_DITHER;
- }
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- lvds_sync |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- lvds_sync |= LVDS_VSYNC_POLARITY;
- if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
- != lvds_sync) {
- char flags[2] = "-+";
- DRM_INFO("Changing LVDS panel from "
- "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
- flags[!(temp & LVDS_HSYNC_POLARITY)],
- flags[!(temp & LVDS_VSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- temp |= lvds_sync;
- }
- I915_WRITE(LVDS, temp);
- }
-
- if (is_dp) {
- intel_dp_set_m_n(crtc, mode, adjusted_mode);
- }
-
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- if (INTEL_INFO(dev)->gen >= 4) {
- temp = 0;
- if (is_sdvo) {
- temp = intel_mode_get_pixel_multiplier(adjusted_mode);
- if (temp > 1)
- temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- else
- temp = 0;
- }
- I915_WRITE(DPLL_MD(pipe), temp);
- } else {
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(DPLL(pipe), dpll);
- }
-
if (HAS_PIPE_CXSR(dev)) {
if (intel_crtc->lowfreq_avail) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
@@ -5492,7 +4142,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
- intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -5668,17 +4317,16 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- struct intel_encoder *has_edp_encoder = NULL;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder;
+ struct intel_encoder *encoder, *edp_encoder = NULL;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
u32 temp;
- u32 lvds_sync = 0;
int target_clock, pixel_multiplier, lane, link_bw, factor;
unsigned int pipe_bpp;
bool dither;
+ bool is_cpu_edp = false, is_pch_edp = false;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
@@ -5704,7 +4352,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
- has_edp_encoder = encoder;
+ is_dp = true;
+ if (intel_encoder_is_pch_edp(&encoder->base))
+ is_pch_edp = true;
+ else
+ is_cpu_edp = true;
+ edp_encoder = encoder;
break;
}
@@ -5767,15 +4420,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
lane = 0;
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
- if (has_edp_encoder &&
- !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (is_cpu_edp) {
target_clock = mode->clock;
- intel_edp_link_config(has_edp_encoder,
- &lane, &link_bw);
+ intel_edp_link_config(edp_encoder, &lane, &link_bw);
} else {
/* [e]DP over FDI requires target mode clock
instead of link clock */
- if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ if (is_dp)
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
@@ -5866,7 +4517,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
- if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ if (is_dp && !is_cpu_edp)
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -5909,30 +4560,22 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
- /* PCH eDP needs FDI, but CPU eDP does not */
- if (!intel_crtc->no_pll) {
- if (!has_edp_encoder ||
- intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- I915_WRITE(PCH_FP0(pipe), fp);
- I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-
- POSTING_READ(PCH_DPLL(pipe));
- udelay(150);
- }
- } else {
- if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
- fp == I915_READ(PCH_FP0(0))) {
- intel_crtc->use_pll_a = true;
- DRM_DEBUG_KMS("using pipe a dpll\n");
- } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
- fp == I915_READ(PCH_FP0(1))) {
- intel_crtc->use_pll_a = false;
- DRM_DEBUG_KMS("using pipe b dpll\n");
- } else {
- DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
+ * pre-Haswell/LPT generation */
+ if (HAS_PCH_LPT(dev)) {
+ DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
+ pipe);
+ } else if (!is_cpu_edp) {
+ struct intel_pch_pll *pll;
+
+ pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+ pipe);
return -EINVAL;
}
- }
+ } else
+ intel_put_pch_pll(intel_crtc);
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
@@ -5965,22 +4608,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- lvds_sync |= LVDS_HSYNC_POLARITY;
+ temp |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- lvds_sync |= LVDS_VSYNC_POLARITY;
- if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
- != lvds_sync) {
- char flags[2] = "-+";
- DRM_INFO("Changing LVDS panel from "
- "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
- flags[!(temp & LVDS_HSYNC_POLARITY)],
- flags[!(temp & LVDS_VSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- temp |= lvds_sync;
- }
+ temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(PCH_LVDS, temp);
}
@@ -5990,7 +4622,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
pipeconf |= PIPECONF_DITHER_EN;
pipeconf |= PIPECONF_DITHER_TYPE_SP;
}
- if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (is_dp && !is_cpu_edp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
/* For non-DP output, clear any trans DP clock recovery setting.*/
@@ -6000,13 +4632,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
- if (!intel_crtc->no_pll &&
- (!has_edp_encoder ||
- intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
- I915_WRITE(PCH_DPLL(pipe), dpll);
+ if (intel_crtc->pch_pll) {
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(PCH_DPLL(pipe));
+ POSTING_READ(intel_crtc->pch_pll->pll_reg);
udelay(150);
/* The pixel multiplier can only be updated once the
@@ -6014,20 +4644,20 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
*
* So write it again.
*/
- I915_WRITE(PCH_DPLL(pipe), dpll);
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
}
intel_crtc->lowfreq_avail = false;
- if (!intel_crtc->no_pll) {
+ if (intel_crtc->pch_pll) {
if (is_lvds && has_reduced_clock && i915_powersave) {
- I915_WRITE(PCH_FP1(pipe), fp2);
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
- I915_WRITE(PCH_FP1(pipe), fp);
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -6080,10 +4710,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
- if (has_edp_encoder &&
- !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
- }
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
@@ -6097,6 +4725,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_update_watermarks(dev);
+ intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
return ret;
}
@@ -6451,7 +5081,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (!visible && !intel_crtc->cursor_visible)
return;
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
I915_WRITE(CURPOS_IVB(pipe), pos);
ivb_update_cursor(crtc, base);
} else {
@@ -6461,9 +5091,6 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
else
i9xx_update_cursor(crtc, base);
}
-
- if (visible)
- intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
}
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
@@ -6987,7 +5614,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
return mode;
}
@@ -7086,7 +5712,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
- u32 dpll;
+ int dpll;
DRM_DEBUG_DRIVER("downclocking LVDS\n");
@@ -7100,6 +5726,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
}
+
}
/**
@@ -7158,12 +5785,16 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy)
+ if (!dev_priv->busy) {
+ intel_sanitize_pm(dev);
dev_priv->busy = true;
- else
+ } else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ if (obj == NULL)
+ return;
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
@@ -7336,18 +5967,19 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
- ret = BEGIN_LP_RING(6);
+ ret = intel_ring_begin(ring, 6);
if (ret)
- goto out;
+ goto err_unpin;
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
@@ -7356,15 +5988,19 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(0); /* aux display base address, unused */
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, 0); /* aux display base address, unused */
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7377,33 +6013,38 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
- ret = BEGIN_LP_RING(6);
+ ret = intel_ring_begin(ring, 6);
if (ret)
- goto out;
+ goto err_unpin;
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_DISPLAY_FLIP_I915 |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
-
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7415,24 +6056,25 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
- ret = BEGIN_LP_RING(4);
+ ret = intel_ring_begin(ring, 4);
if (ret)
- goto out;
+ goto err_unpin;
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
- OUT_RING(obj->gtt_offset | obj->tiling_mode);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -7440,9 +6082,13 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7453,21 +6099,22 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
uint32_t pf, pipesrc;
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
- ret = BEGIN_LP_RING(4);
+ ret = intel_ring_begin(ring, 4);
if (ret)
- goto out;
+ goto err_unpin;
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0] | obj->tiling_mode);
- OUT_RING(obj->gtt_offset);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
+ intel_ring_emit(ring, obj->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -7477,9 +6124,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7501,18 +6152,22 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
ret = intel_ring_begin(ring, 4);
if (ret)
- goto out;
+ goto err_unpin;
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, (obj->gtt_offset));
intel_ring_emit(ring, (MI_NOOP));
intel_ring_advance(ring);
-out:
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7589,6 +6244,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup_pending;
intel_disable_fbc(dev);
+ intel_mark_busy(dev, obj);
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
@@ -7617,10 +6273,11 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg, val;
+ int i;
/* Clear any frame start delays used for debugging left by the BIOS */
- for_each_pipe(pipe) {
- reg = PIPECONF(pipe);
+ for_each_pipe(i) {
+ reg = PIPECONF(i);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
}
@@ -7690,6 +6347,23 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
};
+static void intel_pch_pll_init(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ if (dev_priv->num_pch_pll == 0) {
+ DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
+ return;
+ }
+
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
+ dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
+ dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
+ }
+}
+
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7727,8 +6401,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->bpp = 24; /* default for pre-Ironlake */
if (HAS_PCH_SPLIT(dev)) {
- if (pipe == 2 && IS_IVYBRIDGE(dev))
- intel_crtc->no_pll = true;
intel_helper_funcs.prepare = ironlake_crtc_prepare;
intel_helper_funcs.commit = ironlake_crtc_commit;
} else {
@@ -7747,15 +6419,12 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
DRM_MODE_OBJECT_CRTC);
@@ -7828,12 +6497,31 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_crt_init(dev);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_HASWELL(dev)) {
+ int found;
+
+ /* Haswell uses DDI functions to detect digital outputs */
+ found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
+ /* DDI A only supports eDP */
+ if (found)
+ intel_ddi_init(dev, PORT_A);
+
+ /* DDI B, C and D detection is indicated by the SFUSE_STRAP
+ * register */
+ found = I915_READ(SFUSE_STRAP);
+
+ if (found & SFUSE_STRAP_DDIB_DETECTED)
+ intel_ddi_init(dev, PORT_B);
+ if (found & SFUSE_STRAP_DDIC_DETECTED)
+ intel_ddi_init(dev, PORT_C);
+ if (found & SFUSE_STRAP_DDID_DETECTED)
+ intel_ddi_init(dev, PORT_D);
+ } else if (HAS_PCH_SPLIT(dev)) {
int found;
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
- found = intel_sdvo_init(dev, PCH_SDVOB);
+ found = intel_sdvo_init(dev, PCH_SDVOB, true);
if (!found)
intel_hdmi_init(dev, HDMIB);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -7857,7 +6545,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
- found = intel_sdvo_init(dev, SDVOB);
+ found = intel_sdvo_init(dev, SDVOB, true);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, SDVOB);
@@ -7873,7 +6561,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
- found = intel_sdvo_init(dev, SDVOC);
+ found = intel_sdvo_init(dev, SDVOC, false);
}
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
@@ -8002,882 +6690,6 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.output_poll_changed = intel_fb_output_poll_changed,
};
-static struct drm_i915_gem_object *
-intel_alloc_context_page(struct drm_device *dev)
-{
- struct drm_i915_gem_object *ctx;
- int ret;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ctx = i915_gem_alloc_object(dev, 4096);
- if (!ctx) {
- DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
- return NULL;
- }
-
- ret = i915_gem_object_pin(ctx, 4096, true);
- if (ret) {
- DRM_ERROR("failed to pin power context: %d\n", ret);
- goto err_unref;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
- if (ret) {
- DRM_ERROR("failed to set-domain on power context: %d\n", ret);
- goto err_unpin;
- }
-
- return ctx;
-
-err_unpin:
- i915_gem_object_unpin(ctx);
-err_unref:
- drm_gem_object_unreference(&ctx->base);
- mutex_unlock(&dev->struct_mutex);
- return NULL;
-}
-
-bool ironlake_set_drps(struct drm_device *dev, u8 val)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 rgvswctl;
-
- rgvswctl = I915_READ16(MEMSWCTL);
- if (rgvswctl & MEMCTL_CMD_STS) {
- DRM_DEBUG("gpu busy, RCS change rejected\n");
- return false; /* still busy with another command */
- }
-
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- I915_WRITE16(MEMSWCTL, rgvswctl);
- POSTING_READ16(MEMSWCTL);
-
- rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE16(MEMSWCTL, rgvswctl);
-
- return true;
-}
-
-void ironlake_enable_drps(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rgvmodectl = I915_READ(MEMMODECTL);
- u8 fmax, fmin, fstart, vstart;
-
- /* Enable temp reporting */
- I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
- I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
-
- /* 100ms RC evaluation intervals */
- I915_WRITE(RCUPEI, 100000);
- I915_WRITE(RCDNEI, 100000);
-
- /* Set max/min thresholds to 90ms and 80ms respectively */
- I915_WRITE(RCBMAXAVG, 90000);
- I915_WRITE(RCBMINAVG, 80000);
-
- I915_WRITE(MEMIHYST, 1);
-
- /* Set up min, max, and cur for interrupt handling */
- fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
- fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
- fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
- MEMMODE_FSTART_SHIFT;
-
- vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
-
- dev_priv->fmax = fmax; /* IPS callback will increase this */
- dev_priv->fstart = fstart;
-
- dev_priv->max_delay = fstart;
- dev_priv->min_delay = fmin;
- dev_priv->cur_delay = fstart;
-
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
- fmax, fmin, fstart);
-
- I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
-
- /*
- * Interrupts will be enabled in ironlake_irq_postinstall
- */
-
- I915_WRITE(VIDSTART, vstart);
- POSTING_READ(VIDSTART);
-
- rgvmodectl |= MEMMODE_SWMODE_EN;
- I915_WRITE(MEMMODECTL, rgvmodectl);
-
- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
- DRM_ERROR("stuck trying to change perf mode\n");
- msleep(1);
-
- ironlake_set_drps(dev, fstart);
-
- dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
- I915_READ(0x112e0);
- dev_priv->last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->last_count2 = I915_READ(0x112f4);
- getrawmonotonic(&dev_priv->last_time2);
-}
-
-void ironlake_disable_drps(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 rgvswctl = I915_READ16(MEMSWCTL);
-
- /* Ack interrupts, disable EFC interrupt */
- I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
- I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
- I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
- I915_WRITE(DEIIR, DE_PCU_EVENT);
- I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
-
- /* Go back to the starting frequency */
- ironlake_set_drps(dev, dev_priv->fstart);
- msleep(1);
- rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE(MEMSWCTL, rgvswctl);
- msleep(1);
-
-}
-
-void gen6_set_rps(struct drm_device *dev, u8 val)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 swreq;
-
- swreq = (val & 0x3ff) << 25;
- I915_WRITE(GEN6_RPNSWREQ, swreq);
-}
-
-void gen6_disable_rps(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
- I915_WRITE(GEN6_PMIER, 0);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (PMIMR) to mask PM interrupts. The only risk is in leaving
- * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
-
- spin_lock_irq(&dev_priv->rps_lock);
- dev_priv->pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps_lock);
-
- I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
-}
-
-static unsigned long intel_pxfreq(u32 vidfreq)
-{
- unsigned long freq;
- int div = (vidfreq & 0x3f0000) >> 16;
- int post = (vidfreq & 0x3000) >> 12;
- int pre = (vidfreq & 0x7);
-
- if (!pre)
- return 0;
-
- freq = ((div * 133333) / ((1<<post) * pre));
-
- return freq;
-}
-
-void intel_init_emon(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 lcfuse;
- u8 pxw[16];
- int i;
-
- /* Disable to program */
- I915_WRITE(ECR, 0);
- POSTING_READ(ECR);
-
- /* Program energy weights for various events */
- I915_WRITE(SDEW, 0x15040d00);
- I915_WRITE(CSIEW0, 0x007f0000);
- I915_WRITE(CSIEW1, 0x1e220004);
- I915_WRITE(CSIEW2, 0x04000004);
-
- for (i = 0; i < 5; i++)
- I915_WRITE(PEW + (i * 4), 0);
- for (i = 0; i < 3; i++)
- I915_WRITE(DEW + (i * 4), 0);
-
- /* Program P-state weights to account for frequency power adjustment */
- for (i = 0; i < 16; i++) {
- u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
- unsigned long freq = intel_pxfreq(pxvidfreq);
- unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
- unsigned long val;
-
- val = vid * vid;
- val *= (freq / 1000);
- val *= 255;
- val /= (127*127*900);
- if (val > 0xff)
- DRM_ERROR("bad pxval: %ld\n", val);
- pxw[i] = val;
- }
- /* Render standby states get 0 weight */
- pxw[14] = 0;
- pxw[15] = 0;
-
- for (i = 0; i < 4; i++) {
- u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
- (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
- I915_WRITE(PXW + (i * 4), val);
- }
-
- /* Adjust magic regs to magic values (more experimental results) */
- I915_WRITE(OGW0, 0);
- I915_WRITE(OGW1, 0);
- I915_WRITE(EG0, 0x00007f00);
- I915_WRITE(EG1, 0x0000000e);
- I915_WRITE(EG2, 0x000e0000);
- I915_WRITE(EG3, 0x68000300);
- I915_WRITE(EG4, 0x42000000);
- I915_WRITE(EG5, 0x00140031);
- I915_WRITE(EG6, 0);
- I915_WRITE(EG7, 0);
-
- for (i = 0; i < 8; i++)
- I915_WRITE(PXWL + (i * 4), 0);
-
- /* Enable PMON + select events */
- I915_WRITE(ECR, 0x80000019);
-
- lcfuse = I915_READ(LCFUSE02);
-
- dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
-}
-
-static int intel_enable_rc6(struct drm_device *dev)
-{
- /*
- * Respect the kernel parameter if it is set
- */
- if (i915_enable_rc6 >= 0)
- return i915_enable_rc6;
-
- /*
- * Disable RC6 on Ironlake
- */
- if (INTEL_INFO(dev)->gen == 5)
- return 0;
-
- /*
- * Disable rc6 on Sandybridge
- */
- if (INTEL_INFO(dev)->gen == 6) {
- DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
- return INTEL_RC6_ENABLE;
- }
- DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
- return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
-}
-
-void gen6_enable_rps(struct drm_i915_private *dev_priv)
-{
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
- u32 pcu_mbox, rc6_mask = 0;
- u32 gtfifodbg;
- int cur_freq, min_freq, max_freq;
- int rc6_mode;
- int i;
-
- /* Here begins a magic sequence of register writes to enable
- * auto-downclocking.
- *
- * Perhaps there might be some value in exposing these to
- * userspace...
- */
- I915_WRITE(GEN6_RC_STATE, 0);
- mutex_lock(&dev_priv->dev->struct_mutex);
-
- /* Clear the DBG now so we don't confuse earlier errors */
- if ((gtfifodbg = I915_READ(GTFIFODBG))) {
- DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- gen6_gt_force_wake_get(dev_priv);
-
- /* disable the counters and set deterministic thresholds */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
- I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
- for (i = 0; i < I915_NUM_RINGS; i++)
- I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
- I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
- I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
-
- rc6_mode = intel_enable_rc6(dev_priv->dev);
- if (rc6_mode & INTEL_RC6_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
-
- if (rc6_mode & INTEL_RC6p_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
-
- if (rc6_mode & INTEL_RC6pp_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
-
- DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
-
- I915_WRITE(GEN6_RC_CONTROL,
- rc6_mask |
- GEN6_RC_CTL_EI_MODE(1) |
- GEN6_RC_CTL_HW_ENABLE);
-
- I915_WRITE(GEN6_RPNSWREQ,
- GEN6_FREQUENCY(10) |
- GEN6_OFFSET(0) |
- GEN6_AGGRESSIVE_TURBO);
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN6_FREQUENCY(12));
-
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- 18 << 24 |
- 6 << 16);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
- I915_WRITE(GEN6_RP_UP_EI, 100000);
- I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_CONT);
-
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-
- I915_WRITE(GEN6_PCODE_DATA, 0);
- I915_WRITE(GEN6_PCODE_MAILBOX,
- GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-
- min_freq = (rp_state_cap & 0xff0000) >> 16;
- max_freq = rp_state_cap & 0xff;
- cur_freq = (gt_perf_status & 0xff00) >> 8;
-
- /* Check for overclock support */
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
- pcu_mbox = I915_READ(GEN6_PCODE_DATA);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
- if (pcu_mbox & (1<<31)) { /* OC supported */
- max_freq = pcu_mbox & 0xff;
- DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
- }
-
- /* In units of 100MHz */
- dev_priv->max_delay = max_freq;
- dev_priv->min_delay = min_freq;
- dev_priv->cur_delay = cur_freq;
-
- /* requires MSI enabled */
- I915_WRITE(GEN6_PMIER,
- GEN6_PM_MBOX_EVENT |
- GEN6_PM_THERMAL_EVENT |
- GEN6_PM_RP_DOWN_TIMEOUT |
- GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_UP_EI_EXPIRED |
- GEN6_PM_RP_DOWN_EI_EXPIRED);
- spin_lock_irq(&dev_priv->rps_lock);
- WARN_ON(dev_priv->pm_iir != 0);
- I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps_lock);
- /* enable all PM interrupts */
- I915_WRITE(GEN6_PMINTRMSK, 0);
-
- gen6_gt_force_wake_put(dev_priv);
- mutex_unlock(&dev_priv->dev->struct_mutex);
-}
-
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
- int min_freq = 15;
- int gpu_freq, ia_freq, max_ia_freq;
- int scaling_factor = 180;
-
- max_ia_freq = cpufreq_quick_get_max(0);
- /*
- * Default to measured freq if none found, PCU will ensure we don't go
- * over
- */
- if (!max_ia_freq)
- max_ia_freq = tsc_khz;
-
- /* Convert from kHz to MHz */
- max_ia_freq /= 1000;
-
- mutex_lock(&dev_priv->dev->struct_mutex);
-
- /*
- * For each potential GPU frequency, load a ring frequency we'd like
- * to use for memory access. We do this by specifying the IA frequency
- * the PCU should use as a reference to determine the ring frequency.
- */
- for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
- gpu_freq--) {
- int diff = dev_priv->max_delay - gpu_freq;
-
- /*
- * For GPU frequencies less than 750MHz, just use the lowest
- * ring freq.
- */
- if (gpu_freq < min_freq)
- ia_freq = 800;
- else
- ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
- ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
-
- I915_WRITE(GEN6_PCODE_DATA,
- (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
- gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
- GEN6_PCODE_READY) == 0, 10)) {
- DRM_ERROR("pcode write of freq table timed out\n");
- continue;
- }
- }
-
- mutex_unlock(&dev_priv->dev->struct_mutex);
-}
-
-static void ironlake_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- /* Required for FBC */
- dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
- DPFCRUNIT_CLOCK_GATE_DISABLE |
- DPFDUNIT_CLOCK_GATE_DISABLE;
- /* Required for CxSR */
- dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_3DCGDIS0,
- MARIUNIT_CLOCK_GATE_DISABLE |
- SVSMUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(PCH_3DCGDIS1,
- VFMUNIT_CLOCK_GATE_DISABLE);
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
- /*
- * According to the spec the following bits should be set in
- * order to enable memory self-refresh
- * The bit 22/21 of 0x42004
- * The bit 5 of 0x42020
- * The bit 15 of 0x45000
- */
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- (I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE | ILK_VSDPFD_FULL));
- I915_WRITE(ILK_DSPCLK_GATE,
- (I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE));
- I915_WRITE(DISP_ARB_CTL,
- (I915_READ(DISP_ARB_CTL) |
- DISP_FBC_WM_DIS));
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- /*
- * Based on the document from hardware guys the following bits
- * should be set unconditionally in order to enable FBC.
- * The bit 22 of 0x42000
- * The bit 22 of 0x42004
- * The bit 7,8,9 of 0x42020.
- */
- if (IS_IRONLAKE_M(dev)) {
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPFC_DIS1 |
- ILK_DPFC_DIS2 |
- ILK_CLK_FBC);
- }
-
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_ELPIN_409_SELECT);
- I915_WRITE(_3D_CHICKEN2,
- _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
- _3D_CHICKEN2_WM_READ_PIPELINED);
-}
-
-static void gen6_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_ELPIN_409_SELECT);
-
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- I915_WRITE(GEN6_UCGCTL1,
- I915_READ(GEN6_UCGCTL1) |
- GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
-
- /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
- * gating disable must be set. Failure to set it results in
- * flickering pixels due to Z write ordering failures after
- * some amount of runtime in the Mesa "fire" demo, and Unigine
- * Sanctuary and Tropics, and apparently anything else with
- * alpha test or pixel discard.
- *
- * According to the spec, bit 11 (RCCUNIT) must also be set,
- * but we didn't debug actual testcases to find it out.
- */
- I915_WRITE(GEN6_UCGCTL2,
- GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
- GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
-
- /*
- * According to the spec the following bits should be
- * set in order to enable memory self-refresh and fbc:
- * The bit21 and bit22 of 0x42000
- * The bit21 and bit22 of 0x42004
- * The bit5 and bit7 of 0x42020
- * The bit14 of 0x70180
- * The bit14 of 0x71180
- */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE |
- ILK_DPFD_CLK_GATE);
-
- for_each_pipe(pipe) {
- I915_WRITE(DSPCNTR(pipe),
- I915_READ(DSPCNTR(pipe)) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
- intel_flush_display_plane(dev_priv, pipe);
- }
-}
-
-static void ivybridge_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
- * This implements the WaDisableRCZUnitClockGating workaround.
- */
- I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
-
- I915_WRITE(IVB_CHICKEN3,
- CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
- CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
- /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
- I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
- GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
- /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
- I915_WRITE(GEN7_L3CNTLREG1,
- GEN7_WA_FOR_GEN7_L3_CONTROL);
- I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
- GEN7_WA_L3_CHICKEN_MODE);
-
- /* This is required by WaCatErrorRejectionIssue */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
- GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
- for_each_pipe(pipe) {
- I915_WRITE(DSPCNTR(pipe),
- I915_READ(DSPCNTR(pipe)) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
- intel_flush_display_plane(dev_priv, pipe);
- }
-}
-
-static void g4x_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dspclk_gate;
-
- I915_WRITE(RENCLK_GATE_D1, 0);
- I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
- GS_UNIT_CLOCK_GATE_DISABLE |
- CL_UNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(RAMCLK_GATE_D, 0);
- dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
- OVRUNIT_CLOCK_GATE_DISABLE |
- OVCUNIT_CLOCK_GATE_DISABLE;
- if (IS_GM45(dev))
- dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
-}
-
-static void crestline_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
- I915_WRITE(DSPCLK_GATE_D, 0);
- I915_WRITE(RAMCLK_GATE_D, 0);
- I915_WRITE16(DEUC, 0);
-}
-
-static void broadwater_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
- I965_RCC_CLOCK_GATE_DISABLE |
- I965_RCPB_CLOCK_GATE_DISABLE |
- I965_ISC_CLOCK_GATE_DISABLE |
- I965_FBC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
-}
-
-static void gen3_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dstate = I915_READ(D_STATE);
-
- dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
- DSTATE_DOT_CLOCK_GATING;
- I915_WRITE(D_STATE, dstate);
-}
-
-static void i85x_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
-}
-
-static void i830_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
- DPLS_EDP_PPS_FIX_DIS);
- /* Without this, mode sets may fail silently on FDI */
- for_each_pipe(pipe)
- I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
-}
-
-static void ironlake_teardown_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->renderctx) {
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(&dev_priv->renderctx->base);
- dev_priv->renderctx = NULL;
- }
-
- if (dev_priv->pwrctx) {
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(&dev_priv->pwrctx->base);
- dev_priv->pwrctx = NULL;
- }
-}
-
-static void ironlake_disable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (I915_READ(PWRCTXA)) {
- /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
- wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
- 50);
-
- I915_WRITE(PWRCTXA, 0);
- POSTING_READ(PWRCTXA);
-
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- POSTING_READ(RSTDBYCTL);
- }
-
- ironlake_teardown_rc6(dev);
-}
-
-static int ironlake_setup_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->renderctx == NULL)
- dev_priv->renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->renderctx)
- return -ENOMEM;
-
- if (dev_priv->pwrctx == NULL)
- dev_priv->pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->pwrctx) {
- ironlake_teardown_rc6(dev);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void ironlake_enable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- /* rc6 disabled by default due to repeated reports of hanging during
- * boot and resume.
- */
- if (!intel_enable_rc6(dev))
- return;
-
- mutex_lock(&dev->struct_mutex);
- ret = ironlake_setup_rc6(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return;
- }
-
- /*
- * GPU can automatically power down the render unit if given a page
- * to save state.
- */
- ret = BEGIN_LP_RING(6);
- if (ret) {
- ironlake_teardown_rc6(dev);
- mutex_unlock(&dev->struct_mutex);
- return;
- }
-
- OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
- OUT_RING(MI_SET_CONTEXT);
- OUT_RING(dev_priv->renderctx->gtt_offset |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
- OUT_RING(MI_SUSPEND_FLUSH);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_FLUSH);
- ADVANCE_LP_RING();
-
- /*
- * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
- * does an implicit flush, combined with MI_FLUSH above, it should be
- * safe to assume that renderctx is valid
- */
- ret = intel_wait_ring_idle(LP_RING(dev_priv));
- if (ret) {
- DRM_ERROR("failed to enable ironlake power power savings\n");
- ironlake_teardown_rc6(dev);
- mutex_unlock(&dev->struct_mutex);
- return;
- }
-
- I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- mutex_unlock(&dev->struct_mutex);
-}
-
-void intel_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- dev_priv->display.init_clock_gating(dev);
-
- if (dev_priv->display.init_pch_clock_gating)
- dev_priv->display.init_pch_clock_gating(dev);
-}
-
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
@@ -8887,32 +6699,20 @@ static void intel_init_display(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else {
dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_plane = i9xx_update_plane;
}
- if (I915_HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = ironlake_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (IS_GM45(dev)) {
- dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- dev_priv->display.enable_fbc = g4x_enable_fbc;
- dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else if (IS_CRESTLINE(dev)) {
- dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->display.enable_fbc = i8xx_enable_fbc;
- dev_priv->display.disable_fbc = i8xx_disable_fbc;
- }
- /* 855GM needs testing */
- }
-
/* Returns the core display clock speed */
- if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+ if (IS_VALLEYVIEW(dev))
+ dev_priv->display.get_display_clock_speed =
+ valleyview_get_display_clock_speed;
+ else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
@@ -8934,124 +6734,27 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
- /* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
-
- /* IVB configs may use multi-threaded forcewake */
- if (IS_IVYBRIDGE(dev)) {
- u32 ecobus;
-
- /* A small trick here - if the bios hasn't configured MT forcewake,
- * and if the device is in RC6, then force_wake_mt_get will not wake
- * the device and the ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT forcewake being
- * disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- DRM_DEBUG_KMS("Using MT version of forcewake\n");
- dev_priv->display.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->display.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- }
- }
-
- if (HAS_PCH_IBX(dev))
- dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
- else if (HAS_PCH_CPT(dev))
- dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
-
if (IS_GEN5(dev)) {
- if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
- dev_priv->display.update_wm = ironlake_update_wm;
- else {
- DRM_DEBUG_KMS("Failed to get proper latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
- dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_GEN6(dev)) {
- if (SNB_READ_WM0_LATENCY()) {
- dev_priv->display.update_wm = sandybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- dev_priv->display.init_clock_gating = gen6_init_clock_gating;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- if (SNB_READ_WM0_LATENCY()) {
- dev_priv->display.update_wm = sandybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
- dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else if (IS_HASWELL(dev)) {
+ dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
} else
dev_priv->display.update_wm = NULL;
- } else if (IS_PINEVIEW(dev)) {
- if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
- dev_priv->is_ddr3,
- dev_priv->fsb_freq,
- dev_priv->mem_freq)) {
- DRM_INFO("failed to find known CxSR latency "
- "(found ddr%s fsb freq %d, mem freq %d), "
- "disabling CxSR\n",
- (dev_priv->is_ddr3 == 1) ? "3" : "2",
- dev_priv->fsb_freq, dev_priv->mem_freq);
- /* Disable CxSR and never update its watermark again */
- pineview_disable_cxsr(dev);
- dev_priv->display.update_wm = NULL;
- } else
- dev_priv->display.update_wm = pineview_update_wm;
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.force_wake_get = vlv_force_wake_get;
+ dev_priv->display.force_wake_put = vlv_force_wake_put;
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
- dev_priv->display.update_wm = g4x_update_wm;
- dev_priv->display.init_clock_gating = g4x_init_clock_gating;
- } else if (IS_GEN4(dev)) {
- dev_priv->display.update_wm = i965_update_wm;
- if (IS_CRESTLINE(dev))
- dev_priv->display.init_clock_gating = crestline_init_clock_gating;
- else if (IS_BROADWATER(dev))
- dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
- } else if (IS_GEN3(dev)) {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
- } else if (IS_I865G(dev)) {
- dev_priv->display.update_wm = i830_update_wm;
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- dev_priv->display.get_fifo_size = i830_get_fifo_size;
- } else if (IS_I85X(dev)) {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i85x_get_fifo_size;
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- } else {
- dev_priv->display.update_wm = i830_update_wm;
- dev_priv->display.init_clock_gating = i830_init_clock_gating;
- if (IS_845G(dev))
- dev_priv->display.get_fifo_size = i845_get_fifo_size;
- else
- dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
/* Default just returns -ENODEV to indicate unsupported */
@@ -9090,7 +6793,7 @@ static void quirk_pipea_force(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_PIPEA_FORCE;
- DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
+ DRM_INFO("applying pipe a force quirk\n");
}
/*
@@ -9100,6 +6803,18 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ DRM_INFO("applying inverted panel brightness quirk\n");
}
struct intel_quirk {
@@ -9109,7 +6824,7 @@ struct intel_quirk {
void (*hook)(struct drm_device *dev);
};
-struct intel_quirk intel_quirks[] = {
+static struct intel_quirk intel_quirks[] = {
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
@@ -9134,6 +6849,9 @@ struct intel_quirk intel_quirks[] = {
/* Sony Vaio Y cannot use SSC on LVDS */
{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -9166,7 +6884,7 @@ static void i915_disable_vga(struct drm_device *dev)
vga_reg = VGACNTRL;
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
- outb(1, VGA_SR_INDEX);
+ outb(SR01, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
outb(sr1 | 1<<5, VGA_SR_DATA);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
@@ -9176,6 +6894,40 @@ static void i915_disable_vga(struct drm_device *dev)
POSTING_READ(vga_reg);
}
+static void ivb_pch_pwm_override(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * IVB has CPU eDP backlight regs too, set things up to let the
+ * PCH regs control the backlight
+ */
+ I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
+ I915_WRITE(BLC_PWM_CPU_CTL, 0);
+ I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
+}
+
+void intel_modeset_init_hw(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_init_clock_gating(dev);
+
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_enable_drps(dev);
+ ironlake_enable_rc6(dev);
+ intel_init_emon(dev);
+ }
+
+ if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+ gen6_enable_rps(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ }
+
+ if (IS_IVYBRIDGE(dev))
+ ivb_pch_pwm_override(dev);
+}
+
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9189,10 +6941,14 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- dev->mode_config.funcs = (void *)&intel_mode_funcs;
+ dev->mode_config.funcs = &intel_mode_funcs;
intel_init_quirks(dev);
+ intel_init_pm(dev);
+
+ intel_prepare_ddi(dev);
+
intel_init_display(dev);
if (IS_GEN2(dev)) {
@@ -9217,22 +6973,12 @@ void intel_modeset_init(struct drm_device *dev)
DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
+ intel_pch_pll_init(dev);
+
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
- intel_init_clock_gating(dev);
-
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- intel_init_emon(dev);
- }
-
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
- gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
- }
-
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
@@ -9240,8 +6986,7 @@ void intel_modeset_init(struct drm_device *dev)
void intel_modeset_gem_init(struct drm_device *dev)
{
- if (IS_IRONLAKE_M(dev))
- ironlake_enable_rc6(dev);
+ intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
}
@@ -9271,12 +7016,15 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
gen6_disable_rps(dev);
if (IS_IRONLAKE_M(dev))
ironlake_disable_rc6(dev);
+ if (IS_VALLEYVIEW(dev))
+ vlv_init_dpio(dev);
+
mutex_unlock(&dev->struct_mutex);
/* Disable the irq before mode object teardown, for the irq might
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4b637919f74f..71c7096e3869 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -688,7 +688,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
- int bpp;
+ int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -702,24 +702,30 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode->clock = intel_dp->panel_fixed_mode->clock;
}
+ DRM_DEBUG_KMS("DP link computation with max lane count %i "
+ "max bw %02x pixel clock %iKHz\n",
+ max_lane_count, bws[max_clock], mode->clock);
+
if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+ mode_rate = intel_dp_link_required(mode->clock, bpp);
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(mode->clock, bpp)
- <= link_avail) {
+ if (mode_rate <= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
- DRM_DEBUG_KMS("Display port link bw %02x lane "
- "count %d clock %d\n",
+ DRM_DEBUG_KMS("DP link bw %02x lane "
+ "count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
- adjusted_mode->clock);
+ adjusted_mode->clock, bpp);
+ DRM_DEBUG_KMS("DP link bw required %i available %i\n",
+ mode_rate, link_avail);
return true;
}
}
@@ -1149,6 +1155,7 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
+ ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
pp = ironlake_get_pp_control(dev_priv);
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
@@ -1954,6 +1961,23 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return false;
}
+static void
+intel_dp_probe_oui(struct intel_dp *intel_dp)
+{
+ u8 buf[3];
+
+ if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
+ DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
+ DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+}
+
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
@@ -2137,6 +2161,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
if (status != connector_status_connected)
return status;
+ intel_dp_probe_oui(intel_dp);
+
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
@@ -2438,6 +2464,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
}
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@@ -2483,6 +2510,13 @@ intel_dp_init(struct drm_device *dev, int output_reg)
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
pp_div = I915_READ(PCH_PP_DIVISOR);
+ if (!pp_on || !pp_off || !pp_div) {
+ DRM_INFO("bad panel power sequencing delays, disabling panel\n");
+ intel_dp_encoder_destroy(&intel_dp->base.base);
+ intel_dp_destroy(&intel_connector->base);
+ return;
+ }
+
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
PANEL_POWER_UP_DELAY_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 715afa153025..3e0918834e7e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -45,6 +45,18 @@
ret__; \
})
+#define wait_for_atomic_us(COND, US) ({ \
+ int i, ret__ = -ETIMEDOUT; \
+ for (i = 0; i < (US); i++) { \
+ if ((COND)) { \
+ ret__ = 0; \
+ break; \
+ } \
+ udelay(1); \
+ } \
+ ret__; \
+})
+
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
@@ -171,8 +183,8 @@ struct intel_crtc {
bool cursor_visible;
unsigned int bpp;
- bool no_pll; /* tertiary pipe for IVB */
- bool use_pll_a;
+ /* We can share PLLs across outputs if the timings match */
+ struct intel_pch_pll *pch_pll;
};
struct intel_plane {
@@ -196,6 +208,25 @@ struct intel_plane {
struct drm_intel_sprite_colorkey *key);
};
+struct intel_watermark_params {
+ unsigned long fifo_size;
+ unsigned long max_wm;
+ unsigned long default_wm;
+ unsigned long guard_size;
+ unsigned long cacheline_size;
+};
+
+struct cxsr_latency {
+ int is_desktop;
+ int is_ddr3;
+ unsigned long fsb_freq;
+ unsigned long mem_freq;
+ unsigned long display_sr;
+ unsigned long display_hpll_disable;
+ unsigned long cursor_sr;
+ unsigned long cursor_hpll_disable;
+};
+
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
@@ -207,6 +238,8 @@ struct intel_plane {
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
+#define DIP_AVI_PR_1 0
+#define DIP_AVI_PR_2 1
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
@@ -240,23 +273,36 @@ struct dip_infoframe {
uint8_t ITC_EC_Q_SC;
/* PB4 - VIC 6:0 */
uint8_t VIC;
- /* PB5 - PR 3:0 */
- uint8_t PR;
+ /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
+ uint8_t YQ_CN_PR;
/* PB6 to PB13 */
uint16_t top_bar_end;
uint16_t bottom_bar_start;
uint16_t left_bar_end;
uint16_t right_bar_start;
- } avi;
+ } __attribute__ ((packed)) avi;
struct {
uint8_t vn[8];
uint8_t pd[16];
uint8_t sdi;
- } spd;
+ } __attribute__ ((packed)) spd;
uint8_t payload[27];
} __attribute__ ((packed)) body;
} __attribute__((packed));
+struct intel_hdmi {
+ struct intel_encoder base;
+ u32 sdvox_reg;
+ int ddc_bus;
+ int ddi_port;
+ uint32_t color_range;
+ bool has_hdmi_sink;
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ void (*write_infoframe)(struct drm_encoder *encoder,
+ struct dip_infoframe *frame);
+};
+
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
@@ -296,8 +342,13 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
-void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
-extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
+extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode);
+extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
+extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
+extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
+ bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev,
@@ -311,6 +362,10 @@ extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
+extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane);
+
+void intel_sanitize_pm(struct drm_device *dev);
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@ -368,12 +423,9 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
+extern void ironlake_disable_rc6(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
-extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
-extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
-extern void gen6_disable_rps(struct drm_device *dev);
-extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -411,16 +463,43 @@ extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+extern void intel_prepare_ddi(struct drm_device *dev);
+extern void hsw_fdi_link_train(struct drm_crtc *crtc);
+extern void intel_ddi_init(struct drm_device *dev, enum port port);
/* For use by IVB LP watermark workaround in intel_sprite.c */
-extern void sandybridge_update_wm(struct drm_device *dev);
+extern void intel_update_watermarks(struct drm_device *dev);
extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width,
int pixel_size);
+extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode);
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
+
+/* Power-related functions, located in intel_pm.c */
+extern void intel_init_pm(struct drm_device *dev);
+/* FBC */
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern void intel_update_fbc(struct drm_device *dev);
+/* IPS */
+extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+extern void intel_gpu_ips_teardown(void);
+
+extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
+extern void gen6_disable_rps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
+
+extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
+extern void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 020a7d7f744d..60ba50b956f2 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -243,7 +243,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* that's not the case.
*/
intel_ddc_get_modes(connector,
- &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
+ intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
if (!list_empty(&connector->probed_modes))
return 1;
@@ -375,7 +375,7 @@ void intel_dvo_init(struct drm_device *dev)
* special cases, but otherwise default to what's defined
* in the spec.
*/
- if (dvo->gpio != 0)
+ if (intel_gmbus_is_port_valid(dvo->gpio))
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
gpio = GMBUS_PORT_SSC;
@@ -386,7 +386,7 @@ void intel_dvo_init(struct drm_device *dev)
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
- i2c = &dev_priv->gmbus[gpio].adapter;
+ i2c = intel_gmbus_get_adapter(dev_priv, gpio);
intel_dvo->dev = *dvo;
if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 6e9ee33fd412..bf8690720a0c 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -94,7 +94,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mutex_lock(&dev->struct_mutex);
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(dev, obj, false);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2d7f47b56b6a..2ead3bf7c21d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,19 +37,7 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intel_hdmi {
- struct intel_encoder base;
- u32 sdvox_reg;
- int ddc_bus;
- uint32_t color_range;
- bool has_hdmi_sink;
- bool has_audio;
- enum hdmi_force_audio force_audio;
- void (*write_infoframe)(struct drm_encoder *encoder,
- struct dip_infoframe *frame);
-};
-
-static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_hdmi, base.base);
}
@@ -75,108 +63,246 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame)
frame->checksum = 0x100 - sum;
}
-static u32 intel_infoframe_index(struct dip_infoframe *frame)
+static u32 g4x_infoframe_index(struct dip_infoframe *frame)
{
- u32 flags = 0;
-
switch (frame->type) {
case DIP_TYPE_AVI:
- flags |= VIDEO_DIP_SELECT_AVI;
- break;
+ return VIDEO_DIP_SELECT_AVI;
case DIP_TYPE_SPD:
- flags |= VIDEO_DIP_SELECT_SPD;
- break;
+ return VIDEO_DIP_SELECT_SPD;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
- break;
+ return 0;
}
-
- return flags;
}
-static u32 intel_infoframe_flags(struct dip_infoframe *frame)
+static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
{
- u32 flags = 0;
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return VIDEO_DIP_ENABLE_AVI;
+ case DIP_TYPE_SPD:
+ return VIDEO_DIP_ENABLE_SPD;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
+}
+static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
+{
switch (frame->type) {
case DIP_TYPE_AVI:
- flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
- break;
+ return VIDEO_DIP_ENABLE_AVI_HSW;
case DIP_TYPE_SPD:
- flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
- break;
+ return VIDEO_DIP_ENABLE_SPD_HSW;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
- break;
+ return 0;
}
+}
- return flags;
+static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
+{
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return HSW_TVIDEO_DIP_AVI_DATA(pipe);
+ case DIP_TYPE_SPD:
+ return HSW_TVIDEO_DIP_SPD_DATA(pipe);
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
}
-static void i9xx_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+static void g4x_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
+ u32 val = I915_READ(VIDEO_DIP_CTL);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
-
- /* XXX first guess at handling video port, is this corrent? */
+ val &= ~VIDEO_DIP_PORT_MASK;
if (intel_hdmi->sdvox_reg == SDVOB)
- port = VIDEO_DIP_PORT_B;
+ val |= VIDEO_DIP_PORT_B;
else if (intel_hdmi->sdvox_reg == SDVOC)
- port = VIDEO_DIP_PORT_C;
+ val |= VIDEO_DIP_PORT_C;
else
return;
- flags = intel_infoframe_index(frame);
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
- val &= ~VIDEO_DIP_SELECT_MASK;
+ val &= ~g4x_infoframe_enable(frame);
+ val |= VIDEO_DIP_ENABLE;
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+ I915_WRITE(VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
- flags |= intel_infoframe_flags(frame);
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+ I915_WRITE(VIDEO_DIP_CTL, val);
}
-static void ironlake_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+static void ibx_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
- u32 flags, val = I915_READ(reg);
+ u32 val = I915_READ(reg);
+
+ val &= ~VIDEO_DIP_PORT_MASK;
+ switch (intel_hdmi->sdvox_reg) {
+ case HDMIB:
+ val |= VIDEO_DIP_PORT_B;
+ break;
+ case HDMIC:
+ val |= VIDEO_DIP_PORT_C;
+ break;
+ case HDMID:
+ val |= VIDEO_DIP_PORT_D;
+ break;
+ default:
+ return;
+ }
intel_wait_for_vblank(dev, intel_crtc->pipe);
- flags = intel_infoframe_index(frame);
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+ val |= VIDEO_DIP_ENABLE;
+
+ I915_WRITE(reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+}
+
+static void cpt_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ /* The DIP control register spec says that we need to update the AVI
+ * infoframe without clearing its enable bit */
+ if (frame->type == DIP_TYPE_AVI)
+ val |= VIDEO_DIP_ENABLE_AVI;
+ else
+ val &= ~g4x_infoframe_enable(frame);
+
+ val |= VIDEO_DIP_ENABLE;
- I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+ I915_WRITE(reg, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
- flags |= intel_infoframe_flags(frame);
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+ I915_WRITE(reg, val);
}
+
+static void vlv_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+ val |= VIDEO_DIP_ENABLE;
+
+ I915_WRITE(reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+}
+
+static void hsw_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
+ unsigned int i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(ctl_reg);
+
+ if (data_reg == 0)
+ return;
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ val &= ~hsw_infoframe_enable(frame);
+ I915_WRITE(ctl_reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(data_reg + i, *data);
+ data++;
+ }
+
+ val |= hsw_infoframe_enable(frame);
+ I915_WRITE(ctl_reg, val);
+}
+
static void intel_set_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
@@ -189,7 +315,8 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
intel_hdmi->write_infoframe(encoder, frame);
}
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
@@ -197,10 +324,13 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
.len = DIP_LEN_AVI,
};
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+
intel_set_infoframe(encoder, &avi_if);
}
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
struct dip_infoframe spd_if;
@@ -221,8 +351,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
@@ -259,7 +388,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
- intel_hdmi_set_avi_infoframe(encoder);
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
@@ -334,7 +463,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
edid = drm_get_edid(connector,
- &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -367,7 +497,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
*/
return intel_ddc_get_modes(connector,
- &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
}
static bool
@@ -379,7 +510,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
bool has_audio = false;
edid = drm_get_edid(connector,
- &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
@@ -393,8 +525,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
static int
intel_hdmi_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
+ struct drm_property *property,
+ uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
@@ -453,6 +585,14 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector);
}
+static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
+ .dpms = intel_ddi_dpms,
+ .mode_fixup = intel_hdmi_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_ddi_mode_set,
+ .commit = intel_encoder_commit,
+};
+
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.dpms = intel_hdmi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
@@ -542,20 +682,60 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
+ DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
+ intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ intel_hdmi->ddi_port = PORT_B;
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
+ DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
+ intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ intel_hdmi->ddi_port = PORT_C;
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
+ DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
+ intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+ intel_hdmi->ddi_port = PORT_D;
+ dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ } else {
+ /* If we got an unknown sdvox_reg, things are pretty much broken
+ * in a way that we should let the kernel know about it */
+ BUG();
}
intel_hdmi->sdvox_reg = sdvox_reg;
if (!HAS_PCH_SPLIT(dev)) {
- intel_hdmi->write_infoframe = i9xx_write_infoframe;
+ intel_hdmi->write_infoframe = g4x_write_infoframe;
I915_WRITE(VIDEO_DIP_CTL, 0);
+ } else if (IS_VALLEYVIEW(dev)) {
+ intel_hdmi->write_infoframe = vlv_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
+ } else if (IS_HASWELL(dev)) {
+ /* FIXME: Haswell has a new set of DIP frame registers, but we are
+ * just doing the minimal required for HDMI to work at this stage.
+ */
+ intel_hdmi->write_infoframe = hsw_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0);
+ } else if (HAS_PCH_IBX(dev)) {
+ intel_hdmi->write_infoframe = ibx_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(TVIDEO_DIP_CTL(i), 0);
} else {
- intel_hdmi->write_infoframe = ironlake_write_infoframe;
+ intel_hdmi->write_infoframe = cpt_write_infoframe;
for_each_pipe(i)
I915_WRITE(TVIDEO_DIP_CTL(i), 0);
}
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+ if (IS_HASWELL(dev))
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
+ else
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_hdmi_add_properties(intel_hdmi, connector);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8fdc95700218..4a9707dd0f9c 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -35,6 +35,20 @@
#include "i915_drm.h"
#include "i915_drv.h"
+struct gmbus_port {
+ const char *name;
+ int reg;
+};
+
+static const struct gmbus_port gmbus_ports[] = {
+ { "ssc", GPIOB },
+ { "vga", GPIOA },
+ { "panel", GPIOC },
+ { "dpc", GPIOD },
+ { "dpb", GPIOE },
+ { "dpd", GPIOF },
+};
+
/* Intel GPIO access functions */
#define I2C_RISEFALL_TIME 10
@@ -49,10 +63,7 @@ void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_GMBUS0, 0);
- else
- I915_WRITE(GMBUS0, 0);
+ I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
}
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@@ -140,63 +151,173 @@ static void set_data(void *data, int state_high)
POSTING_READ(bus->gpio_reg);
}
-static bool
+static int
+intel_gpio_pre_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ intel_i2c_reset(dev_priv->dev);
+ intel_i2c_quirk_set(dev_priv, true);
+ set_data(bus, 1);
+ set_clock(bus, 1);
+ udelay(I2C_RISEFALL_TIME);
+ return 0;
+}
+
+static void
+intel_gpio_post_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ set_data(bus, 1);
+ set_clock(bus, 1);
+ intel_i2c_quirk_set(dev_priv, false);
+}
+
+static void
intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
- static const int map_pin_to_reg[] = {
- 0,
- GPIOB,
- GPIOA,
- GPIOC,
- GPIOD,
- GPIOE,
- 0,
- GPIOF,
- };
struct i2c_algo_bit_data *algo;
- if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
- return false;
-
algo = &bus->bit_algo;
- bus->gpio_reg = map_pin_to_reg[pin];
- if (HAS_PCH_SPLIT(dev_priv->dev))
- bus->gpio_reg += PCH_GPIOA - GPIOA;
+ /* -1 to map pin pair to gmbus index */
+ bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
bus->adapter.algo_data = algo;
algo->setsda = set_data;
algo->setscl = set_clock;
algo->getsda = get_data;
algo->getscl = get_clock;
+ algo->pre_xfer = intel_gpio_pre_xfer;
+ algo->post_xfer = intel_gpio_post_xfer;
algo->udelay = I2C_RISEFALL_TIME;
algo->timeout = usecs_to_jiffies(2200);
algo->data = bus;
+}
+
+static int
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ u32 gmbus1_index)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u16 len = msg->len;
+ u8 *buf = msg->buf;
+
+ I915_WRITE(GMBUS1 + reg_offset,
+ gmbus1_index |
+ GMBUS_CYCLE_WAIT |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+ u32 val, loop = 0;
+ u32 gmbus2;
+
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_RDY),
+ 50);
+ if (ret)
+ return -ETIMEDOUT;
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+
+ val = I915_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+ }
- return true;
+ return 0;
}
static int
-intel_i2c_quirk_xfer(struct intel_gmbus *bus,
- struct i2c_msg *msgs,
- int num)
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
{
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u16 len = msg->len;
+ u8 *buf = msg->buf;
+ u32 val, loop;
+
+ val = loop = 0;
+ while (len && loop < 4) {
+ val |= *buf++ << (8 * loop++);
+ len -= 1;
+ }
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT |
+ (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+ u32 gmbus2;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_RDY),
+ 50);
+ if (ret)
+ return -ETIMEDOUT;
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+ }
+ return 0;
+}
+
+/*
+ * The gmbus controller can combine a 1 or 2 byte write with a read that
+ * immediately follows it by using an "INDEX" cycle.
+ */
+static bool
+gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
+{
+ return (i + 1 < num &&
+ !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+ (msgs[i + 1].flags & I2C_M_RD));
+}
+
+static int
+gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u32 gmbus1_index = 0;
+ u32 gmbus5 = 0;
int ret;
- intel_i2c_reset(dev_priv->dev);
+ if (msgs[0].len == 2)
+ gmbus5 = GMBUS_2BYTE_INDEX_EN |
+ msgs[0].buf[1] | (msgs[0].buf[0] << 8);
+ if (msgs[0].len == 1)
+ gmbus1_index = GMBUS_CYCLE_INDEX |
+ (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
- intel_i2c_quirk_set(dev_priv, true);
- set_data(bus, 1);
- set_clock(bus, 1);
- udelay(I2C_RISEFALL_TIME);
+ /* GMBUS5 holds 16-bit index */
+ if (gmbus5)
+ I915_WRITE(GMBUS5 + reg_offset, gmbus5);
- ret = i2c_bit_algo.master_xfer(&bus->adapter, msgs, num);
+ ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
- set_data(bus, 1);
- set_clock(bus, 1);
- intel_i2c_quirk_set(dev_priv, false);
+ /* Clear GMBUS5 after each index transfer */
+ if (gmbus5)
+ I915_WRITE(GMBUS5 + reg_offset, 0);
return ret;
}
@@ -210,117 +331,108 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- int i, reg_offset, ret;
+ int i, reg_offset;
+ int ret = 0;
mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit) {
- ret = intel_i2c_quirk_xfer(bus, msgs, num);
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
goto out;
}
- reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
+ reg_offset = dev_priv->gpio_mmio_base;
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) {
- u16 len = msgs[i].len;
- u8 *buf = msgs[i].buf;
-
- if (msgs[i].flags & I2C_M_RD) {
- I915_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT |
- (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
- (len << GMBUS_BYTE_COUNT_SHIFT) |
- (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_READ | GMBUS_SW_RDY);
- POSTING_READ(GMBUS2+reg_offset);
- do {
- u32 val, loop = 0;
-
- if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
- goto timeout;
- if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
- goto clear_err;
-
- val = I915_READ(GMBUS3 + reg_offset);
- do {
- *buf++ = val & 0xff;
- val >>= 8;
- } while (--len && ++loop < 4);
- } while (len);
+ u32 gmbus2;
+
+ if (gmbus_is_index_read(msgs, i, num)) {
+ ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
+ i += 1; /* set i to the index of the read xfer */
+ } else if (msgs[i].flags & I2C_M_RD) {
+ ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
- u32 val, loop;
-
- val = loop = 0;
- do {
- val |= *buf++ << (8 * loop);
- } while (--len && ++loop < 4);
-
- I915_WRITE(GMBUS3 + reg_offset, val);
- I915_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT |
- (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
- (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
- (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
- POSTING_READ(GMBUS2+reg_offset);
-
- while (len) {
- if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
- goto timeout;
- if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
- goto clear_err;
-
- val = loop = 0;
- do {
- val |= *buf++ << (8 * loop);
- } while (--len && ++loop < 4);
-
- I915_WRITE(GMBUS3 + reg_offset, val);
- POSTING_READ(GMBUS2+reg_offset);
- }
+ ret = gmbus_xfer_write(dev_priv, &msgs[i]);
}
- if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+ if (ret == -ETIMEDOUT)
+ goto timeout;
+ if (ret == -ENXIO)
+ goto clear_err;
+
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
+ 50);
+ if (ret)
goto timeout;
- if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ if (gmbus2 & GMBUS_SATOER)
goto clear_err;
}
- goto done;
+ /* Generate a STOP condition on the bus. Note that gmbus can't generata
+ * a STOP on the very first cycle. To simplify the code we
+ * unconditionally generate the STOP condition with an additional gmbus
+ * cycle. */
+ I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+
+ /* Mark the GMBUS interface as disabled after waiting for idle.
+ * We will re-enable it at the start of the next xfer,
+ * till then let it sleep.
+ */
+ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
+ 10)) {
+ DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
+ adapter->name);
+ ret = -ETIMEDOUT;
+ }
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+ ret = ret ?: i;
+ goto out;
clear_err:
+ /*
+ * Wait for bus to IDLE before clearing NAK.
+ * If we clear the NAK while bus is still active, then it will stay
+ * active and the next transaction may fail.
+ */
+ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
+ 10))
+ DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
+ adapter->name);
+
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
I915_WRITE(GMBUS1 + reg_offset, 0);
+ I915_WRITE(GMBUS0 + reg_offset, 0);
-done:
- /* Mark the GMBUS interface as disabled after waiting for idle.
- * We will re-enable it at the start of the next xfer,
- * till then let it sleep.
+ DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ adapter->name, msgs[i].addr,
+ (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+
+ /*
+ * If no ACK is received during the address phase of a transaction,
+ * the adapter must report -ENXIO.
+ * It is not clear what to return if no ACK is received at other times.
+ * So, we always return -ENXIO in all NAK cases, to ensure we send
+ * it at least during the one case that is specified.
*/
- if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10))
- DRM_INFO("GMBUS timed out waiting for idle\n");
- I915_WRITE(GMBUS0 + reg_offset, 0);
- ret = i;
+ ret = -ENXIO;
goto out;
timeout:
- DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
- bus->reg0 & 0xff, bus->adapter.name);
+ DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
- if (!bus->has_gpio) {
- ret = -EIO;
- } else {
- bus->force_bit = true;
- ret = intel_i2c_quirk_xfer(bus, msgs, num);
- }
+ bus->force_bit = true;
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+
out:
mutex_unlock(&dev_priv->gmbus_mutex);
return ret;
@@ -346,35 +458,26 @@ static const struct i2c_algorithm gmbus_algorithm = {
*/
int intel_setup_gmbus(struct drm_device *dev)
{
- static const char *names[GMBUS_NUM_PORTS] = {
- "disabled",
- "ssc",
- "vga",
- "panel",
- "dpc",
- "dpb",
- "reserved",
- "dpd",
- };
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
- dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
- GFP_KERNEL);
- if (dev_priv->gmbus == NULL)
- return -ENOMEM;
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+ else
+ dev_priv->gpio_mmio_base = 0;
mutex_init(&dev_priv->gmbus_mutex);
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ u32 port = i + 1; /* +1 to map gmbus index to pin pair */
bus->adapter.owner = THIS_MODULE;
bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"i915 gmbus %s",
- names[i]);
+ gmbus_ports[i].name);
bus->adapter.dev.parent = &dev->pdev->dev;
bus->dev_priv = dev_priv;
@@ -385,13 +488,13 @@ int intel_setup_gmbus(struct drm_device *dev)
goto err;
/* By default use a conservative clock rate */
- bus->reg0 = i | GMBUS_RATE_100KHZ;
+ bus->reg0 = port | GMBUS_RATE_100KHZ;
- bus->has_gpio = intel_gpio_setup(bus, i);
-
- /* XXX force bit banging until GMBUS is fully debugged */
- if (bus->has_gpio)
+ /* gmbus seems to be broken on i830 */
+ if (IS_I830(dev))
bus->force_bit = true;
+
+ intel_gpio_setup(bus, port);
}
intel_i2c_reset(dev_priv->dev);
@@ -403,11 +506,18 @@ err:
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
}
- kfree(dev_priv->gmbus);
- dev_priv->gmbus = NULL;
return ret;
}
+struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+ unsigned port)
+{
+ WARN_ON(!intel_gmbus_is_port_valid(port));
+ /* -1 to map pin pair to gmbus index */
+ return (intel_gmbus_is_port_valid(port)) ?
+ &dev_priv->gmbus[port - 1].adapter : NULL;
+}
+
void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
@@ -419,8 +529,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- if (bus->has_gpio)
- bus->force_bit = force_bit;
+ bus->force_bit = force_bit;
}
void intel_teardown_gmbus(struct drm_device *dev)
@@ -435,7 +544,4 @@ void intel_teardown_gmbus(struct drm_device *dev)
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
}
-
- kfree(dev_priv->gmbus);
- dev_priv->gmbus = NULL;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 9c71183629c2..9dee82350def 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -480,7 +480,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
+ DRM_INFO("Skipping forced modeset for %s\n", id->ident);
return 1;
}
@@ -628,7 +628,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
+ DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
return 1;
}
@@ -851,8 +851,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
child->device_type != DEVICE_TYPE_LFP)
continue;
- if (child->i2c_pin)
- *i2c_pin = child->i2c_pin;
+ if (intel_gmbus_is_port_valid(child->i2c_pin))
+ *i2c_pin = child->i2c_pin;
/* However, we cannot trust the BIOS writers to populate
* the VBT correctly. Since LVDS requires additional
@@ -993,7 +993,8 @@ bool intel_lvds_init(struct drm_device *dev)
* preferred mode is the right one.
*/
intel_lvds->edid = drm_get_edid(connector,
- &dev_priv->gmbus[pin].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ pin));
if (intel_lvds->edid) {
if (drm_add_edid_modes(connector,
intel_lvds->edid)) {
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index d1928e79d9b6..d67ec3a51e42 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -56,7 +56,8 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
}
};
- return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
+ return i2c_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus),
+ msgs, 2) == 2;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 289140bc83cb..18bd0af855dc 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -25,6 +25,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/acpi.h>
#include <linux/acpi_io.h>
#include <acpi/video.h>
@@ -149,7 +151,7 @@ struct opregion_asle {
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 max;
if (!(bclp & ASLE_BCLP_VALID))
@@ -161,7 +163,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
max = intel_panel_get_max_backlight(dev);
intel_panel_set_backlight(dev, bclp * max / 255);
- asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+ iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
return 0;
}
@@ -198,14 +200,14 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
void intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
- asle_req = asle->aslc & ASLE_REQ_MSK;
+ asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -213,31 +215,31 @@ void intel_opregion_asle_intr(struct drm_device *dev)
}
if (asle_req & ASLE_SET_ALS_ILLUM)
- asle_stat |= asle_set_als_illum(dev, asle->alsi);
+ asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
if (asle_req & ASLE_SET_PFIT)
- asle_stat |= asle_set_pfit(dev, asle->pfit);
+ asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
if (asle_req & ASLE_SET_PWM_FREQ)
- asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+ asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
- asle->aslc = asle_stat;
+ iowrite32(asle_stat, &asle->aslc);
}
void intel_opregion_gse_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
- asle_req = asle->aslc & ASLE_REQ_MSK;
+ asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -250,7 +252,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
}
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
if (asle_req & ASLE_SET_PFIT) {
DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -262,7 +264,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
asle_stat |= ASLE_PWM_FREQ_FAILED;
}
- asle->aslc = asle_stat;
+ iowrite32(asle_stat, &asle->aslc);
}
#define ASLE_ALS_EN (1<<0)
#define ASLE_BLC_EN (1<<1)
@@ -272,15 +274,16 @@ void intel_opregion_gse_intr(struct drm_device *dev)
void intel_opregion_enable_asle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
if (asle) {
if (IS_MOBILE(dev))
intel_enable_asle(dev);
- asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
- ASLE_PFMB_EN;
- asle->ardy = 1;
+ iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
+ ASLE_PFMB_EN,
+ &asle->tche);
+ iowrite32(1, &asle->ardy);
}
}
@@ -298,7 +301,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
Linux, these are handled by the dock, button and video drivers.
*/
- struct opregion_acpi *acpi;
+ struct opregion_acpi __iomem *acpi;
struct acpi_bus_event *event = data;
int ret = NOTIFY_OK;
@@ -310,10 +313,11 @@ static int intel_opregion_video_event(struct notifier_block *nb,
acpi = system_opregion->acpi;
- if (event->type == 0x80 && !(acpi->cevt & 0x1))
+ if (event->type == 0x80 &&
+ (ioread32(&acpi->cevt) & 1) == 0)
ret = NOTIFY_BAD;
- acpi->csts = 0;
+ iowrite32(0, &acpi->csts);
return ret;
}
@@ -337,6 +341,7 @@ static void intel_didl_outputs(struct drm_device *dev)
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
unsigned long long device_id;
acpi_status status;
+ u32 temp;
int i = 0;
handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
@@ -355,7 +360,7 @@ static void intel_didl_outputs(struct drm_device *dev)
}
if (!acpi_video_bus) {
- printk(KERN_WARNING "No ACPI video bus found\n");
+ pr_warn("No ACPI video bus found\n");
return;
}
@@ -371,7 +376,8 @@ static void intel_didl_outputs(struct drm_device *dev)
if (ACPI_SUCCESS(status)) {
if (!device_id)
goto blind_set;
- opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+ iowrite32((u32)(device_id & 0x0f0f),
+ &opregion->acpi->didl[i]);
i++;
}
}
@@ -379,7 +385,7 @@ static void intel_didl_outputs(struct drm_device *dev)
end:
/* If fewer than 8 outputs, the list must be null terminated */
if (i < 8)
- opregion->acpi->didl[i] = 0;
+ iowrite32(0, &opregion->acpi->didl[i]);
return;
blind_set:
@@ -413,7 +419,9 @@ blind_set:
output_type = ACPI_LVDS_OUTPUT;
break;
}
- opregion->acpi->didl[i] |= (1<<31) | output_type | i;
+ temp = ioread32(&opregion->acpi->didl[i]);
+ iowrite32(temp | (1<<31) | output_type | i,
+ &opregion->acpi->didl[i]);
i++;
}
goto end;
@@ -434,8 +442,8 @@ void intel_opregion_init(struct drm_device *dev)
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
* We don't actually need to do anything with them. */
- opregion->acpi->csts = 0;
- opregion->acpi->drdy = 1;
+ iowrite32(0, &opregion->acpi->csts);
+ iowrite32(1, &opregion->acpi->drdy);
system_opregion = opregion;
register_acpi_notifier(&intel_opregion_notifier);
@@ -454,7 +462,7 @@ void intel_opregion_fini(struct drm_device *dev)
return;
if (opregion->acpi) {
- opregion->acpi->drdy = 0;
+ iowrite32(0, &opregion->acpi->drdy);
system_opregion = NULL;
unregister_acpi_notifier(&intel_opregion_notifier);
@@ -474,8 +482,9 @@ int intel_opregion_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
- void *base;
+ void __iomem *base;
u32 asls, mboxes;
+ char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
@@ -489,7 +498,9 @@ int intel_opregion_setup(struct drm_device *dev)
if (!base)
return -ENOMEM;
- if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ memcpy_fromio(buf, base, sizeof(buf));
+
+ if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
@@ -499,7 +510,7 @@ int intel_opregion_setup(struct drm_device *dev)
opregion->lid_state = base + ACPI_CLID;
- mboxes = opregion->header->mboxes;
+ mboxes = ioread32(&opregion->header->mboxes);
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 80b331c322fb..458743da3774 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -187,14 +187,14 @@ struct intel_overlay {
void (*flip_tail)(struct intel_overlay *);
};
-static struct overlay_registers *
+static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
overlay->reg_bo->gtt_offset);
@@ -203,7 +203,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
}
static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
io_mapping_unmap(regs);
@@ -215,20 +215,21 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
BUG_ON(overlay->last_flip_req);
- ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+ ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
- true);
+ ret = i915_wait_request(ring, overlay->last_flip_req);
if (ret)
return ret;
+ i915_gem_retire_requests(dev);
overlay->last_flip_req = 0;
return 0;
@@ -262,7 +263,7 @@ i830_activate_pipe_a(struct drm_device *dev)
DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
mode = drm_mode_duplicate(dev, &vesa_640x480);
- drm_mode_set_crtcinfo(mode, 0);
+
if (!drm_crtc_helper_set_mode(&crtc->base, mode,
crtc->base.x, crtc->base.y,
crtc->base.fb))
@@ -287,6 +288,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_request *request;
int pipe_a_quirk = 0;
int ret;
@@ -306,17 +308,17 @@ static int intel_overlay_on(struct intel_overlay *overlay)
goto out;
}
- ret = BEGIN_LP_RING(4);
+ ret = intel_ring_begin(ring, 4);
if (ret) {
kfree(request);
goto out;
}
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
- OUT_RING(overlay->flip_addr | OFC_UPDATE);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
@@ -332,6 +334,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
@@ -351,16 +354,16 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- ret = BEGIN_LP_RING(2);
+ ret = intel_ring_begin(ring, 2);
if (ret) {
kfree(request);
return ret;
}
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- OUT_RING(flip_addr);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_advance(ring);
- ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+ ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
@@ -401,6 +404,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
struct drm_i915_gem_request *request;
int ret;
@@ -417,20 +421,20 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- ret = BEGIN_LP_RING(6);
+ ret = intel_ring_begin(ring, 6);
if (ret) {
kfree(request);
return ret;
}
/* wait for overlay to go idle */
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_advance(ring);
return intel_overlay_do_wait_request(overlay, request,
intel_overlay_off_tail);
@@ -442,15 +446,16 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
- true);
+ ret = i915_wait_request(ring, overlay->last_flip_req);
if (ret)
return ret;
+ i915_gem_retire_requests(dev);
if (overlay->flip_tail)
overlay->flip_tail(overlay);
@@ -467,6 +472,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
/* Only wait if there is actually an old frame to release to
@@ -483,15 +489,15 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (request == NULL)
return -ENOMEM;
- ret = BEGIN_LP_RING(2);
+ ret = intel_ring_begin(ring, 2);
if (ret) {
kfree(request);
return ret;
}
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
ret = intel_overlay_do_wait_request(overlay, request,
intel_overlay_release_old_vid_tail);
@@ -619,14 +625,15 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
0x3000, 0x0800, 0x3000
};
-static void update_polyphase_filter(struct overlay_registers *regs)
+static void update_polyphase_filter(struct overlay_registers __iomem *regs)
{
- memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
- memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+ memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+ memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
+ sizeof(uv_static_hcoeffs));
}
static bool update_scaling_factors(struct intel_overlay *overlay,
- struct overlay_registers *regs,
+ struct overlay_registers __iomem *regs,
struct put_image_params *params)
{
/* fixed point with a 12 bit shift */
@@ -665,16 +672,19 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
overlay->old_xscale = xscale;
overlay->old_yscale = yscale;
- regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
- ((xscale >> FP_SHIFT) << 16) |
- ((xscale & FRACT_MASK) << 3));
+ iowrite32(((yscale & FRACT_MASK) << 20) |
+ ((xscale >> FP_SHIFT) << 16) |
+ ((xscale & FRACT_MASK) << 3),
+ &regs->YRGBSCALE);
- regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
- ((xscale_UV >> FP_SHIFT) << 16) |
- ((xscale_UV & FRACT_MASK) << 3));
+ iowrite32(((yscale_UV & FRACT_MASK) << 20) |
+ ((xscale_UV >> FP_SHIFT) << 16) |
+ ((xscale_UV & FRACT_MASK) << 3),
+ &regs->UVSCALE);
- regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
- ((yscale_UV >> FP_SHIFT) << 0)));
+ iowrite32((((yscale >> FP_SHIFT) << 16) |
+ ((yscale_UV >> FP_SHIFT) << 0)),
+ &regs->UVSCALEV);
if (scale_changed)
update_polyphase_filter(regs);
@@ -683,30 +693,32 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
}
static void update_colorkey(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
u32 key = overlay->color_key;
switch (overlay->crtc->base.fb->bits_per_pixel) {
case 8:
- regs->DCLRKV = 0;
- regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ iowrite32(0, &regs->DCLRKV);
+ iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
break;
case 16:
if (overlay->crtc->base.fb->depth == 15) {
- regs->DCLRKV = RGB15_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
+ iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
+ &regs->DCLRKM);
} else {
- regs->DCLRKV = RGB16_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ iowrite32(RGB16_TO_COLORKEY(key), &regs->DCLRKV);
+ iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE,
+ &regs->DCLRKM);
}
break;
case 24:
case 32:
- regs->DCLRKV = key;
- regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ iowrite32(key, &regs->DCLRKV);
+ iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
break;
}
}
@@ -761,9 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct put_image_params *params)
{
int ret, tmp_width;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
bool scale_changed = false;
struct drm_device *dev = overlay->dev;
+ u32 swidth, swidthsw, sheight, ostride;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
@@ -782,16 +795,18 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
if (!overlay->active) {
+ u32 oconfig;
regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
}
- regs->OCONFIG = OCONF_CC_OUT_8BIT;
+ oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(overlay->dev))
- regs->OCONFIG |= OCONF_CSC_MODE_BT709;
- regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+ oconfig |= OCONF_CSC_MODE_BT709;
+ oconfig |= overlay->crtc->pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
+ iowrite32(oconfig, &regs->OCONFIG);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
@@ -805,42 +820,46 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
}
- regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
- regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+ iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
+ iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
if (params->format & I915_OVERLAY_YUV_PACKED)
tmp_width = packed_width_bytes(params->format, params->src_w);
else
tmp_width = params->src_w;
- regs->SWIDTH = params->src_w;
- regs->SWIDTHSW = calc_swidthsw(overlay->dev,
- params->offset_Y, tmp_width);
- regs->SHEIGHT = params->src_h;
- regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y;
- regs->OSTRIDE = params->stride_Y;
+ swidth = params->src_w;
+ swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+ sheight = params->src_h;
+ iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
+ ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
int uv_hscale = uv_hsubsampling(params->format);
int uv_vscale = uv_vsubsampling(params->format);
u32 tmp_U, tmp_V;
- regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+ swidth |= (params->src_w/uv_hscale) << 16;
tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
params->src_w/uv_hscale);
tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
params->src_w/uv_hscale);
- regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
- regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
- regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
- regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
- regs->OSTRIDE |= params->stride_UV << 16;
+ swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
+ sheight |= (params->src_h/uv_vscale) << 16;
+ iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
+ iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
+ ostride |= params->stride_UV << 16;
}
+ iowrite32(swidth, &regs->SWIDTH);
+ iowrite32(swidthsw, &regs->SWIDTHSW);
+ iowrite32(sheight, &regs->SHEIGHT);
+ iowrite32(ostride, &regs->OSTRIDE);
+
scale_changed = update_scaling_factors(overlay, regs, params);
update_colorkey(overlay, regs);
- regs->OCMD = overlay_cmd_reg(params);
+ iowrite32(overlay_cmd_reg(params), &regs->OCMD);
intel_overlay_unmap_regs(overlay, regs);
@@ -860,7 +879,7 @@ out_unpin:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
struct drm_device *dev = overlay->dev;
int ret;
@@ -879,7 +898,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
return ret;
regs = intel_overlay_map_regs(overlay);
- regs->OCMD = 0;
+ iowrite32(0, &regs->OCMD);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_off(overlay);
@@ -1109,11 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct put_image_params *params;
int ret;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
@@ -1250,10 +1265,11 @@ out_free:
}
static void update_reg_attrs(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
- regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
- regs->OCLRC1 = overlay->saturation;
+ iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
+ &regs->OCLRC0);
+ iowrite32(overlay->saturation, &regs->OCLRC1);
}
static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
@@ -1306,14 +1322,10 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_intel_overlay_attrs *attrs = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
int ret;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
@@ -1396,7 +1408,7 @@ void intel_setup_overlay(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
int ret;
if (!HAS_OVERLAY(dev))
@@ -1451,7 +1463,7 @@ void intel_setup_overlay(struct drm_device *dev)
if (!regs)
goto out_unpin_bo;
- memset(regs, 0, sizeof(struct overlay_registers));
+ memset_io(regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(regs);
update_reg_attrs(overlay, regs);
@@ -1499,14 +1511,17 @@ struct intel_overlay_error_state {
u32 isr;
};
-static struct overlay_registers *
+static struct overlay_registers __iomem *
intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
{
drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ /* Cast to make sparse happy, but it's wc memory anyway, so
+ * equivalent to the wc io mapping on X86. */
+ regs = (struct overlay_registers __iomem *)
+ overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
overlay->reg_bo->gtt_offset);
@@ -1515,7 +1530,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
}
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
io_mapping_unmap_atomic(regs);
@@ -1540,9 +1555,9 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+ error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
else
- error->base = (long) overlay->reg_bo->gtt_offset;
+ error->base = overlay->reg_bo->gtt_offset;
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 48177ec4720e..2a1625d84a69 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -28,6 +28,9 @@
* Chris Wilson <chris@chris-wilson.co.uk>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/moduleparam.h>
#include "intel_drv.h"
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
@@ -169,7 +172,7 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
/* XXX add code here to query mode clock or hardware clock
* and program max PWM appropriately.
*/
- printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
+ pr_warn_once("fixme: max PWM is zero\n");
return 1;
}
@@ -189,6 +192,27 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
return max;
}
+static int i915_panel_invert_brightness;
+MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
+module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
+static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (i915_panel_invert_brightness < 0)
+ return val;
+
+ if (i915_panel_invert_brightness > 0 ||
+ dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
+ return intel_panel_get_max_backlight(dev) - val;
+
+ return val;
+}
+
u32 intel_panel_get_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -209,6 +233,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
}
}
+ val = intel_panel_compute_brightness(dev, val);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
}
@@ -226,6 +251,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
u32 tmp;
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+ level = intel_panel_compute_brightness(dev, level);
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
@@ -342,6 +368,7 @@ int intel_panel_setup_backlight(struct drm_device *dev)
else
return -ENODEV;
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = intel_panel_get_max_backlight(dev);
dev_priv->backlight =
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
new file mode 100644
index 000000000000..8e79ff67ec98
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -0,0 +1,3796 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "../../../platform/x86/intel_ips.h"
+#include <linux/module.h>
+
+/* FBC, or Frame Buffer Compression, is a technique employed to compress the
+ * framebuffer contents in-memory, aiming at reducing the required bandwidth
+ * during in-memory transfers and, therefore, reduce the power packet.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns.
+ *
+ * FBC-related functionality can be enabled by the means of the
+ * i915.i915_enable_fbc parameter
+ */
+
+static void i8xx_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int cfb_pitch;
+ int plane, i;
+ u32 fbc_ctl, fbc_ctl2;
+
+ cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
+
+ /* FBC_CTL wants 64B units */
+ cfb_pitch = (cfb_pitch / 64) - 1;
+ plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= plane;
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+
+ /* enable it... */
+ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl |= obj->fence_reg;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
+ cfb_pitch, crtc->y, intel_crtc->plane);
+}
+
+static bool i8xx_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+ I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+static void g4x_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool g4x_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 blt_ecoskpd;
+
+ /* Make sure blitter notifies FBC of writes */
+ gen6_gt_force_wake_get(dev_priv);
+ blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT);
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ POSTING_READ(GEN6_BLITTER_ECOSKPD);
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl &= DPFC_RESERVED;
+ dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
+ /* Set persistent mode for front-buffer rendering, ala X. */
+ dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
+ dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+ I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+ /* enable it... */
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ sandybridge_blit_fbc_update(dev);
+ }
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+static void ironlake_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool ironlake_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.fbc_enabled)
+ return false;
+
+ return dev_priv->display.fbc_enabled(dev);
+}
+
+static void intel_fbc_work_fn(struct work_struct *__work)
+{
+ struct intel_fbc_work *work =
+ container_of(to_delayed_work(__work),
+ struct intel_fbc_work, work);
+ struct drm_device *dev = work->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ if (work == dev_priv->fbc_work) {
+ /* Double check that we haven't switched fb without cancelling
+ * the prior work.
+ */
+ if (work->crtc->fb == work->fb) {
+ dev_priv->display.enable_fbc(work->crtc,
+ work->interval);
+
+ dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->cfb_fb = work->crtc->fb->base.id;
+ dev_priv->cfb_y = work->crtc->y;
+ }
+
+ dev_priv->fbc_work = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(work);
+}
+
+static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->fbc_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+ /* Synchronisation is provided by struct_mutex and checking of
+ * dev_priv->fbc_work, so we can perform the cancellation
+ * entirely asynchronously.
+ */
+ if (cancel_delayed_work(&dev_priv->fbc_work->work))
+ /* tasklet was killed before being run, clean up */
+ kfree(dev_priv->fbc_work);
+
+ /* Mark the work as no longer wanted so that if it does
+ * wake-up (because the work was already running and waiting
+ * for our mutex), it will discover that is no longer
+ * necessary to run.
+ */
+ dev_priv->fbc_work = NULL;
+}
+
+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct intel_fbc_work *work;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL) {
+ dev_priv->display.enable_fbc(crtc, interval);
+ return;
+ }
+
+ work->crtc = crtc;
+ work->fb = crtc->fb;
+ work->interval = interval;
+ INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
+
+ dev_priv->fbc_work = work;
+
+ DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ */
+ schedule_delayed_work(&work->work, msecs_to_jiffies(50));
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+ dev_priv->cfb_plane = -1;
+}
+
+/**
+ * intel_update_fbc - enable/disable FBC as needed
+ * @dev: the drm_device
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel mulitply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= 2048 in width, 1536 in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+void intel_update_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int enable_fbc;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!i915_powersave)
+ return;
+
+ if (!I915_HAS_FBC(dev))
+ return;
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - more than one pipe is active
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+ if (tmp_crtc->enabled && tmp_crtc->fb) {
+ if (crtc) {
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
+ }
+
+ if (!crtc || crtc->fb == NULL) {
+ DRM_DEBUG_KMS("no output, disabling\n");
+ dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+ goto out_disable;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->fb;
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ enable_fbc = i915_enable_fbc;
+ if (enable_fbc < 0) {
+ DRM_DEBUG_KMS("fbc set to per-chip default\n");
+ enable_fbc = 1;
+ if (INTEL_INFO(dev)->gen <= 6)
+ enable_fbc = 0;
+ }
+ if (!enable_fbc) {
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+ goto out_disable;
+ }
+ if (intel_fb->obj->base.size > dev_priv->cfb_size) {
+ DRM_DEBUG_KMS("framebuffer too large, disabling "
+ "compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ goto out_disable;
+ }
+ if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
+ (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
+ dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+ goto out_disable;
+ }
+ if ((crtc->mode.hdisplay > 2048) ||
+ (crtc->mode.vdisplay > 1536)) {
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+ goto out_disable;
+ }
+ if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
+ DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+ goto out_disable;
+ }
+
+ /* The use of a CPU fence is mandatory in order to detect writes
+ * by the CPU to the scanout and trigger updates to the FBC.
+ */
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_NOT_TILED;
+ goto out_disable;
+ }
+
+ /* If the kernel debugger is active, always disable compression */
+ if (in_dbg_master())
+ goto out_disable;
+
+ /* If the scanout has not changed, don't modify the FBC settings.
+ * Note that we make the fundamental assumption that the fb->obj
+ * cannot be unpinned (and have its GTT offset and fence revoked)
+ * without first being decoupled from the scanout and FBC disabled.
+ */
+ if (dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_fb == fb->base.id &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ if (intel_fbc_enabled(dev)) {
+ /* We update FBC along two paths, after changing fb/crtc
+ * configuration (modeswitching) and after page-flipping
+ * finishes. For the latter, we know that not only did
+ * we disable the FBC at the start of the page-flip
+ * sequence, but also more than one vblank has passed.
+ *
+ * For the former case of modeswitching, it is possible
+ * to switch between two FBC valid configurations
+ * instantaneously so we do need to disable the FBC
+ * before we can modify its control registers. We also
+ * have to wait for the next vblank for that to take
+ * effect. However, since we delay enabling FBC we can
+ * assume that a vblank has passed since disabling and
+ * that we can safely alter the registers in the deferred
+ * callback.
+ *
+ * In the scenario that we go from a valid to invalid
+ * and then back to valid FBC configuration we have
+ * no strict enforcement that a vblank occurred since
+ * disabling the FBC. However, along all current pipe
+ * disabling paths we do need to wait for a vblank at
+ * some point. And we wait before enabling FBC anyway.
+ */
+ DRM_DEBUG_KMS("disabling active FBC for update\n");
+ intel_disable_fbc(dev);
+ }
+
+ intel_enable_fbc(crtc, 500);
+ return;
+
+out_disable:
+ /* Multiple disables should be harmless */
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+ intel_disable_fbc(dev);
+ }
+}
+
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ tmp = I915_READ(CLKCFG);
+
+ switch (tmp & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_533:
+ dev_priv->fsb_freq = 533; /* 133*4 */
+ break;
+ case CLKCFG_FSB_800:
+ dev_priv->fsb_freq = 800; /* 200*4 */
+ break;
+ case CLKCFG_FSB_667:
+ dev_priv->fsb_freq = 667; /* 167*4 */
+ break;
+ case CLKCFG_FSB_400:
+ dev_priv->fsb_freq = 400; /* 100*4 */
+ break;
+ }
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ dev_priv->mem_freq = 533;
+ break;
+ case CLKCFG_MEM_667:
+ dev_priv->mem_freq = 667;
+ break;
+ case CLKCFG_MEM_800:
+ dev_priv->mem_freq = 800;
+ break;
+ }
+
+ /* detect pineview DDR3 setting */
+ tmp = I915_READ(CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 ddrpll, csipll;
+
+ ddrpll = I915_READ16(DDRMPLL1);
+ csipll = I915_READ16(CSIPLL0);
+
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ dev_priv->r_t = dev_priv->mem_freq;
+
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+
+ if (dev_priv->fsb_freq == 3200) {
+ dev_priv->c_m = 0;
+ } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+ dev_priv->c_m = 1;
+ } else {
+ dev_priv->c_m = 2;
+ }
+}
+
+static const struct cxsr_latency cxsr_latency_table[] = {
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
+};
+
+static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
+ int is_ddr3,
+ int fsb,
+ int mem)
+{
+ const struct cxsr_latency *latency;
+ int i;
+
+ if (fsb == 0 || mem == 0)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
+ latency = &cxsr_latency_table[i];
+ if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
+ }
+
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
+}
+
+static void pineview_disable_cxsr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* deactivate cxsr */
+ I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+}
+
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ * - memory configuration (speed, channels)
+ * - chipset
+ * - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value. It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+static const int latency_ns = 5000;
+
+static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ if (plane)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x1ff;
+ if (plane)
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i845_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+/* Pineview has different values for various configs */
+static const struct intel_watermark_params pineview_display_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_HPLLOFF_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_cursor_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params g4x_wm_info = {
+ G4X_FIFO_SIZE,
+ G4X_MAX_WM,
+ G4X_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params g4x_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_wm_info = {
+ VALLEYVIEW_FIFO_SIZE,
+ VALLEYVIEW_MAX_WM,
+ VALLEYVIEW_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ VALLEYVIEW_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i965_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ I915_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i945_wm_info = {
+ I945_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i915_wm_info = {
+ I915_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i855_wm_info = {
+ I855GM_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i830_wm_info = {
+ I830_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params ironlake_display_wm_info = {
+ ILK_DISPLAY_FIFO,
+ ILK_DISPLAY_MAXWM,
+ ILK_DISPLAY_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_wm_info = {
+ ILK_CURSOR_FIFO,
+ ILK_CURSOR_MAXWM,
+ ILK_CURSOR_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_display_srwm_info = {
+ ILK_DISPLAY_SR_FIFO,
+ ILK_DISPLAY_MAX_SRWM,
+ ILK_DISPLAY_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_srwm_info = {
+ ILK_CURSOR_SR_FIFO,
+ ILK_CURSOR_MAX_SRWM,
+ ILK_CURSOR_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params sandybridge_display_wm_info = {
+ SNB_DISPLAY_FIFO,
+ SNB_DISPLAY_MAXWM,
+ SNB_DISPLAY_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_wm_info = {
+ SNB_CURSOR_FIFO,
+ SNB_CURSOR_MAXWM,
+ SNB_CURSOR_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_display_srwm_info = {
+ SNB_DISPLAY_SR_FIFO,
+ SNB_DISPLAY_MAX_SRWM,
+ SNB_DISPLAY_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
+ SNB_CURSOR_SR_FIFO,
+ SNB_CURSOR_MAX_SRWM,
+ SNB_CURSOR_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+
+/**
+ * intel_calculate_wm - calculate watermark level
+ * @clock_in_khz: pixel clock
+ * @wm: chip FIFO params
+ * @pixel_size: display pixel size
+ * @latency_ns: memory latency for the platform
+ *
+ * Calculate the watermark level (the level at which the display plane will
+ * start fetching from memory again). Each chip has a different display
+ * FIFO size and allocation, so the caller needs to figure that out and pass
+ * in the correct intel_watermark_params structure.
+ *
+ * As the pixel clock runs, the FIFO will be drained at a rate that depends
+ * on the pixel size. When it reaches the watermark level, it'll start
+ * fetching FIFO line sized based chunks from memory until the FIFO fills
+ * past the watermark point. If the FIFO drains completely, a FIFO underrun
+ * will occur, and a display engine hang could result.
+ */
+static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
+ const struct intel_watermark_params *wm,
+ int fifo_size,
+ int pixel_size,
+ unsigned long latency_ns)
+{
+ long entries_required, wm_size;
+
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+ 1000;
+ entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
+
+ DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
+
+ wm_size = fifo_size - (entries_required + wm->guard_size);
+
+ DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
+
+ /* Don't promote wm_size to unsigned... */
+ if (wm_size > (long)wm->max_wm)
+ wm_size = wm->max_wm;
+ if (wm_size <= 0)
+ wm_size = wm->default_wm;
+ return wm_size;
+}
+
+static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+{
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled && crtc->fb) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+static void pineview_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ const struct cxsr_latency *latency;
+ u32 reg;
+ unsigned long wm;
+
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
+ return;
+ }
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ int clock = crtc->mode.clock;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Display SR */
+ wm = intel_calculate_wm(clock, &pineview_display_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= wm << DSPFW_SR_SHIFT;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_CURSOR_SR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->display_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_SR_MASK;
+ reg |= wm & DSPFW_HPLL_SR_MASK;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ I915_WRITE(DSPFW3,
+ I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
+ DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ } else {
+ pineview_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ }
+}
+
+static bool g4x_compute_wm0(struct drm_device *dev,
+ int plane,
+ const struct intel_watermark_params *display,
+ int display_latency_ns,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency_ns,
+ int *plane_wm,
+ int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int htotal, hdisplay, clock, pixel_size;
+ int line_time_us, line_count;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *cursor_wm = cursor->guard_size;
+ *plane_wm = display->guard_size;
+ return false;
+ }
+
+ htotal = crtc->mode.htotal;
+ hdisplay = crtc->mode.hdisplay;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
+
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = ((htotal * 1000) / clock);
+ line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+ entries = line_count * 64 * pixel_size;
+ tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
+
+ return true;
+}
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool g4x_check_srwm(struct drm_device *dev,
+ int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+ display_wm, cursor_wm);
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+ display_wm, display->max_wm);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+ cursor_wm, cursor->max_wm);
+ return false;
+ }
+
+ if (!(display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool g4x_compute_srwm(struct drm_device *dev,
+ int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int hdisplay, htotal, pixel_size, clock;
+ unsigned long line_time_us;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return g4x_check_srwm(dev,
+ *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static bool vlv_compute_drain_latency(struct drm_device *dev,
+ int plane,
+ int *plane_prec_mult,
+ int *plane_dl,
+ int *cursor_prec_mult,
+ int *cursor_dl)
+{
+ struct drm_crtc *crtc;
+ int clock, pixel_size;
+ int entries;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled)
+ return false;
+
+ clock = crtc->mode.clock; /* VESA DOT Clock */
+ pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
+
+ entries = (clock / 1000) * pixel_size;
+ *plane_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
+ pixel_size);
+
+ entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
+ *cursor_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
+
+ return true;
+}
+
+/*
+ * Update drain latency registers of memory arbiter
+ *
+ * Valleyview SoC has a new memory arbiter and needs drain latency registers
+ * to be programmed. Each plane has a drain latency multiplier and a drain
+ * latency value.
+ */
+
+static void vlv_update_drain_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_prec, planea_dl, planeb_prec, planeb_dl;
+ int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
+ int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
+ either 16 or 32 */
+
+ /* For plane A, Cursor A */
+ if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
+ &cursor_prec_mult, &cursora_dl)) {
+ cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
+ planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
+
+ I915_WRITE(VLV_DDL1, cursora_prec |
+ (cursora_dl << DDL_CURSORA_SHIFT) |
+ planea_prec | planea_dl);
+ }
+
+ /* For plane B, Cursor B */
+ if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
+ &cursor_prec_mult, &cursorb_dl)) {
+ cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
+ planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
+
+ I915_WRITE(VLV_DDL2, cursorb_prec |
+ (cursorb_dl << DDL_CURSORB_SHIFT) |
+ planeb_prec | planeb_dl);
+ }
+}
+
+#define single_plane_enabled(mask) is_power_of_2(mask)
+
+static void valleyview_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ vlv_update_drain_latency(dev);
+
+ if (g4x_compute_wm0(dev, 0,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ plane_sr = cursor_sr = 0;
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &plane_sr, &cursor_sr))
+ I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+ else
+ I915_WRITE(FW_BLC_SELF_VLV,
+ I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
+}
+
+static void g4x_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ if (g4x_compute_wm0(dev, 0,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ plane_sr = cursor_sr = 0;
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &g4x_wm_info,
+ &g4x_cursor_wm_info,
+ &plane_sr, &cursor_sr))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ else
+ I915_WRITE(FW_BLC_SELF,
+ I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ /* HPLL off in SR has some issues on G4x... disable it */
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i965_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ int srwm = 1;
+ int cursor_sr = 16;
+
+ /* Calc sr entries for one plane configs */
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 12000;
+ int clock = crtc->mode.clock;
+ int htotal = crtc->mode.htotal;
+ int hdisplay = crtc->mode.hdisplay;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = ((htotal * 1000) / clock);
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x1ff;
+ DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
+
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * 64;
+ entries = DIV_ROUND_UP(entries,
+ i965_cursor_wm_info.cacheline_size);
+ cursor_sr = i965_cursor_wm_info.fifo_size -
+ (entries + i965_cursor_wm_info.guard_size);
+
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = i965_cursor_wm_info.max_wm;
+
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
+
+ /* 965 has limitations... */
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
+ (8 << 16) | (8 << 8) | (8 << 0));
+ I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+ /* update cursor SR watermark */
+ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i9xx_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_watermark_params *wm_info;
+ uint32_t fwater_lo;
+ uint32_t fwater_hi;
+ int cwm, srwm = 1;
+ int fifo_size;
+ int planea_wm, planeb_wm;
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ if (IS_I945GM(dev))
+ wm_info = &i945_wm_info;
+ else if (!IS_GEN2(dev))
+ wm_info = &i915_wm_info;
+ else
+ wm_info = &i855_wm_info;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = intel_get_crtc_for_plane(dev, 0);
+ if (crtc->enabled && crtc->fb) {
+ planea_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ enabled = crtc;
+ } else
+ planea_wm = fifo_size - wm_info->guard_size;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ crtc = intel_get_crtc_for_plane(dev, 1);
+ if (crtc->enabled && crtc->fb) {
+ planeb_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ if (enabled == NULL)
+ enabled = crtc;
+ else
+ enabled = NULL;
+ } else
+ planeb_wm = fifo_size - wm_info->guard_size;
+
+ DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+ /*
+ * Overlay gets an aggressive default since video jitter is bad.
+ */
+ cwm = 2;
+
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+
+ /* Calc sr entries for one plane configs */
+ if (HAS_FW_BLC(dev) && enabled) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 6000;
+ int clock = enabled->mode.clock;
+ int htotal = enabled->mode.htotal;
+ int hdisplay = enabled->mode.hdisplay;
+ int pixel_size = enabled->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = (htotal * 1000) / clock;
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
+ if (srwm < 0)
+ srwm = 1;
+
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev))
+ I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
+
+ fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+ fwater_hi = (cwm & 0x1f);
+
+ /* Set request length to 8 cachelines per fetch */
+ fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
+ fwater_hi = fwater_hi | (1 << 8);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+ I915_WRITE(FW_BLC2, fwater_hi);
+
+ if (HAS_FW_BLC(dev)) {
+ if (enabled) {
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ DRM_DEBUG_KMS("memory self refresh enabled\n");
+ } else
+ DRM_DEBUG_KMS("memory self refresh disabled\n");
+ }
+}
+
+static void i830_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ uint32_t fwater_lo;
+ int planea_wm;
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc == NULL)
+ return;
+
+ planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+ dev_priv->display.get_fifo_size(dev, 0),
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ fwater_lo |= (3<<8) | planea_wm;
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+}
+
+#define ILK_LP0_PLANE_LATENCY 700
+#define ILK_LP0_CURSOR_LATENCY 1300
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool ironlake_check_srwm(struct drm_device *dev, int level,
+ int fbc_wm, int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+ " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+ if (fbc_wm > SNB_FBC_MAX_SRWM) {
+ DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+ fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+ /* fbc has it's own way to disable FBC WM */
+ I915_WRITE(DISP_ARB_CTL,
+ I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+ return false;
+ }
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+ display_wm, SNB_DISPLAY_MAX_SRWM, level);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+ cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+ return false;
+ }
+
+ if (!(fbc_wm || display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *fbc_wm, int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int hdisplay, htotal, pixel_size, clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *fbc_wm = *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /*
+ * Spec says:
+ * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+ */
+ *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return ironlake_check_srwm(dev, level,
+ *fbc_wm, *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static void ironlake_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled))
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ ILK_READ_WM1_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ ILK_READ_WM2_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /*
+ * WM3 is unsupported on ILK, probably because we don't have latency
+ * data for that power state
+ */
+}
+
+static void sandybridge_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ if ((dev_priv->num_pipe == 3) &&
+ g4x_compute_wm0(dev, 2,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEC_IVB);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEC_IVB, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 3;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+static void
+haswell_update_linetime_wm(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 temp;
+
+ temp = I915_READ(PIPE_WM_LINETIME(pipe));
+ temp &= ~PIPE_WM_LINETIME_MASK;
+
+ /* The WM are computed with base on how long it takes to fill a single
+ * row at the given clock rate, multiplied by 8.
+ * */
+ temp |= PIPE_WM_LINETIME_TIME(
+ ((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
+
+ /* IPS watermarks are only used by pipe A, and are ignored by
+ * pipes B and C. They are calculated similarly to the common
+ * linetime values, except that we are using CD clock frequency
+ * in MHz instead of pixel rate for the division.
+ *
+ * This is a placeholder for the IPS watermark calculation code.
+ */
+
+ I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
+}
+
+static bool
+sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int display_latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ int clock;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *sprite_wm = display->guard_size;
+ return false;
+ }
+
+ clock = crtc->mode.clock;
+
+ /* Use the small buffer method to calculate the sprite watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size -
+ sprite_width * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+ if (*sprite_wm > (int)display->max_wm)
+ *sprite_wm = display->max_wm;
+
+ return true;
+}
+
+static bool
+sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ clock = crtc->mode.clock;
+ if (!clock) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_time_us = (sprite_width * 1000) / clock;
+ if (!line_time_us) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = sprite_width * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+
+ return *sprite_wm > 0x3ff ? false : true;
+}
+
+static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int sprite_wm, reg;
+ int ret;
+
+ switch (pipe) {
+ case 0:
+ reg = WM0_PIPEA_ILK;
+ break;
+ case 1:
+ reg = WM0_PIPEB_ILK;
+ break;
+ case 2:
+ reg = WM0_PIPEC_IVB;
+ break;
+ default:
+ return; /* bad pipe */
+ }
+
+ ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+ &sandybridge_display_wm_info,
+ latency, &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
+ pipe);
+ return;
+ }
+
+ val = I915_READ(reg);
+ val &= ~WM0_PIPE_SPRITE_MASK;
+ I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+ DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM1S_LP_ILK, sprite_wm);
+
+ /* Only IVB has two more LP watermarks for sprite */
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM2S_LP_IVB, sprite_wm);
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM3S_LP_IVB, sprite_wm);
+}
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_wm)
+ dev_priv->display.update_wm(dev);
+}
+
+void intel_update_linetime_watermarks(struct drm_device *dev,
+ int pipe, struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_linetime_wm)
+ dev_priv->display.update_linetime_wm(dev, pipe, mode);
+}
+
+void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_sprite_wm)
+ dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+ pixel_size);
+}
+
+static struct drm_i915_gem_object *
+intel_alloc_context_page(struct drm_device *dev)
+{
+ struct drm_i915_gem_object *ctx;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ ctx = i915_gem_alloc_object(dev, 4096);
+ if (!ctx) {
+ DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+ return NULL;
+ }
+
+ ret = i915_gem_object_pin(ctx, 4096, true);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n", ret);
+ goto err_unref;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
+ if (ret) {
+ DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+ goto err_unpin;
+ }
+
+ return ctx;
+
+err_unpin:
+ i915_gem_object_unpin(ctx);
+err_unref:
+ drm_gem_object_unreference(&ctx->base);
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+}
+
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+ POSTING_READ16(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
+void ironlake_enable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u8 fmax, fmin, fstart, vstart;
+
+ /* Enable temp reporting */
+ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+ I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
+ /* 100ms RC evaluation intervals */
+ I915_WRITE(RCUPEI, 100000);
+ I915_WRITE(RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ I915_WRITE(RCBMAXAVG, 90000);
+ I915_WRITE(RCBMINAVG, 80000);
+
+ I915_WRITE(MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+
+ vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+
+ dev_priv->fmax = fmax; /* IPS callback will increase this */
+ dev_priv->fstart = fstart;
+
+ dev_priv->max_delay = fstart;
+ dev_priv->min_delay = fmin;
+ dev_priv->cur_delay = fstart;
+
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
+
+ I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ /*
+ * Interrupts will be enabled in ironlake_irq_postinstall
+ */
+
+ I915_WRITE(VIDSTART, vstart);
+ POSTING_READ(VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ I915_WRITE(MEMMODECTL, rgvmodectl);
+
+ if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ DRM_ERROR("stuck trying to change perf mode\n");
+ msleep(1);
+
+ ironlake_set_drps(dev, fstart);
+
+ dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ I915_READ(0x112e0);
+ dev_priv->last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->last_count2 = I915_READ(0x112f4);
+ getrawmonotonic(&dev_priv->last_time2);
+}
+
+void ironlake_disable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+ I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+ I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ ironlake_set_drps(dev, dev_priv->fstart);
+ msleep(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ msleep(1);
+
+}
+
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 swreq;
+
+ swreq = (val & 0x3ff) << 25;
+ I915_WRITE(GEN6_RPNSWREQ, swreq);
+}
+
+void gen6_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (PMIMR) to mask PM interrupts. The only risk is in leaving
+ * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+
+ spin_lock_irq(&dev_priv->rps_lock);
+ dev_priv->pm_iir = 0;
+ spin_unlock_irq(&dev_priv->rps_lock);
+
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+}
+
+int intel_enable_rc6(const struct drm_device *dev)
+{
+ /*
+ * Respect the kernel parameter if it is set
+ */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /*
+ * Disable RC6 on Ironlake
+ */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ /* Sorry Haswell, no RC6 for you for now. */
+ if (IS_HASWELL(dev))
+ return 0;
+
+ /*
+ * Disable rc6 on Sandybridge
+ */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+ return INTEL_RC6_ENABLE;
+ }
+ DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
+ return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+}
+
+void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+ struct intel_ring_buffer *ring;
+ u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ u32 pcu_mbox, rc6_mask = 0;
+ u32 gtfifodbg;
+ int cur_freq, min_freq, max_freq;
+ int rc6_mode;
+ int i;
+
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ I915_WRITE(GEN6_RC_STATE, 0);
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /* Clear the DBG now so we don't confuse earlier errors */
+ if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ gen6_gt_force_wake_get(dev_priv);
+
+ /* disable the counters and set deterministic thresholds */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_ring(ring, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ rc6_mode = intel_enable_rc6(dev_priv->dev);
+ if (rc6_mode & INTEL_RC6_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+ if (rc6_mode & INTEL_RC6p_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+ if (rc6_mode & INTEL_RC6pp_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+
+ DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ rc6_mask |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(10) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN6_FREQUENCY(12));
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ 18 << 24 |
+ 6 << 16);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
+ I915_WRITE(GEN6_RP_UP_EI, 100000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+ I915_WRITE(GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+
+ min_freq = (rp_state_cap & 0xff0000) >> 16;
+ max_freq = rp_state_cap & 0xff;
+ cur_freq = (gt_perf_status & 0xff00) >> 8;
+
+ /* Check for overclock support */
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
+ pcu_mbox = I915_READ(GEN6_PCODE_DATA);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+ if (pcu_mbox & (1<<31)) { /* OC supported */
+ max_freq = pcu_mbox & 0xff;
+ DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ }
+
+ /* In units of 100MHz */
+ dev_priv->max_delay = max_freq;
+ dev_priv->min_delay = min_freq;
+ dev_priv->cur_delay = cur_freq;
+
+ /* requires MSI enabled */
+ I915_WRITE(GEN6_PMIER,
+ GEN6_PM_MBOX_EVENT |
+ GEN6_PM_THERMAL_EVENT |
+ GEN6_PM_RP_DOWN_TIMEOUT |
+ GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_EI_EXPIRED);
+ spin_lock_irq(&dev_priv->rps_lock);
+ WARN_ON(dev_priv->pm_iir != 0);
+ I915_WRITE(GEN6_PMIMR, 0);
+ spin_unlock_irq(&dev_priv->rps_lock);
+ /* enable all PM interrupts */
+ I915_WRITE(GEN6_PMINTRMSK, 0);
+
+ gen6_gt_force_wake_put(dev_priv);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+{
+ int min_freq = 15;
+ int gpu_freq, ia_freq, max_ia_freq;
+ int scaling_factor = 180;
+
+ max_ia_freq = cpufreq_quick_get_max(0);
+ /*
+ * Default to measured freq if none found, PCU will ensure we don't go
+ * over
+ */
+ if (!max_ia_freq)
+ max_ia_freq = tsc_khz;
+
+ /* Convert from kHz to MHz */
+ max_ia_freq /= 1000;
+
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+ gpu_freq--) {
+ int diff = dev_priv->max_delay - gpu_freq;
+
+ /*
+ * For GPU frequencies less than 750MHz, just use the lowest
+ * ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+
+ I915_WRITE(GEN6_PCODE_DATA,
+ (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
+ gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
+ GEN6_PCODE_READY) == 0, 10)) {
+ DRM_ERROR("pcode write of freq table timed out\n");
+ continue;
+ }
+ }
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+static void ironlake_teardown_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx) {
+ i915_gem_object_unpin(dev_priv->renderctx);
+ drm_gem_object_unreference(&dev_priv->renderctx->base);
+ dev_priv->renderctx = NULL;
+ }
+
+ if (dev_priv->pwrctx) {
+ i915_gem_object_unpin(dev_priv->pwrctx);
+ drm_gem_object_unreference(&dev_priv->pwrctx->base);
+ dev_priv->pwrctx = NULL;
+ }
+}
+
+void ironlake_disable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_READ(PWRCTXA)) {
+ /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+ wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+ 50);
+
+ I915_WRITE(PWRCTXA, 0);
+ POSTING_READ(PWRCTXA);
+
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ POSTING_READ(RSTDBYCTL);
+ }
+
+ ironlake_teardown_rc6(dev);
+}
+
+static int ironlake_setup_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx == NULL)
+ dev_priv->renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->renderctx)
+ return -ENOMEM;
+
+ if (dev_priv->pwrctx == NULL)
+ dev_priv->pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->pwrctx) {
+ ironlake_teardown_rc6(dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ironlake_enable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
+
+ /* rc6 disabled by default due to repeated reports of hanging during
+ * boot and resume.
+ */
+ if (!intel_enable_rc6(dev))
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = ironlake_setup_rc6(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+
+ /*
+ * GPU can automatically power down the render unit if given a page
+ * to save state.
+ */
+ ret = intel_ring_begin(ring, 6);
+ if (ret) {
+ ironlake_teardown_rc6(dev);
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+ intel_ring_emit(ring, MI_SET_CONTEXT);
+ intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_advance(ring);
+
+ /*
+ * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
+ * does an implicit flush, combined with MI_FLUSH above, it should be
+ * safe to assume that renderctx is valid
+ */
+ ret = intel_wait_ring_idle(ring);
+ if (ret) {
+ DRM_ERROR("failed to enable ironlake power power savings\n");
+ ironlake_teardown_rc6(dev);
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+
+ I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ unsigned long freq;
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ freq = ((div * 133333) / ((1<<post) * pre));
+
+ return freq;
+}
+
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ u64 total_count, diff, ret;
+ u32 count1, count2, count3, m = 0, c = 0;
+ unsigned long now = jiffies_to_msecs(jiffies), diff1;
+ int i;
+
+ diff1 = now - dev_priv->last_time1;
+
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ if (diff1 <= 10)
+ return dev_priv->chipset_power;
+
+ count1 = I915_READ(DMIEC);
+ count2 = I915_READ(DDREC);
+ count3 = I915_READ(CSIEC);
+
+ total_count = count1 + count2 + count3;
+
+ /* FIXME: handle per-counter overflow */
+ if (total_count < dev_priv->last_count1) {
+ diff = ~0UL - dev_priv->last_count1;
+ diff += total_count;
+ } else {
+ diff = total_count - dev_priv->last_count1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == dev_priv->c_m &&
+ cparams[i].t == dev_priv->r_t) {
+ m = cparams[i].m;
+ c = cparams[i].c;
+ break;
+ }
+ }
+
+ diff = div_u64(diff, diff1);
+ ret = ((m * diff) + c);
+ ret = div_u64(ret, 10);
+
+ dev_priv->last_count1 = total_count;
+ dev_priv->last_time1 = now;
+
+ dev_priv->chipset_power = ret;
+
+ return ret;
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long m, x, b;
+ u32 tsfs;
+
+ tsfs = I915_READ(TSFS);
+
+ m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+ x = I915_READ8(TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+
+ return ((m * x) / 127) - b;
+}
+
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+ static const struct v_table {
+ u16 vd; /* in .1 mil */
+ u16 vm; /* in .1 mil */
+ } v_table[] = {
+ { 0, 0, },
+ { 375, 0, },
+ { 500, 0, },
+ { 625, 0, },
+ { 750, 0, },
+ { 875, 0, },
+ { 1000, 0, },
+ { 1125, 0, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4250, 3125, },
+ { 4375, 3250, },
+ { 4500, 3375, },
+ { 4625, 3500, },
+ { 4750, 3625, },
+ { 4875, 3750, },
+ { 5000, 3875, },
+ { 5125, 4000, },
+ { 5250, 4125, },
+ { 5375, 4250, },
+ { 5500, 4375, },
+ { 5625, 4500, },
+ { 5750, 4625, },
+ { 5875, 4750, },
+ { 6000, 4875, },
+ { 6125, 5000, },
+ { 6250, 5125, },
+ { 6375, 5250, },
+ { 6500, 5375, },
+ { 6625, 5500, },
+ { 6750, 5625, },
+ { 6875, 5750, },
+ { 7000, 5875, },
+ { 7125, 6000, },
+ { 7250, 6125, },
+ { 7375, 6250, },
+ { 7500, 6375, },
+ { 7625, 6500, },
+ { 7750, 6625, },
+ { 7875, 6750, },
+ { 8000, 6875, },
+ { 8125, 7000, },
+ { 8250, 7125, },
+ { 8375, 7250, },
+ { 8500, 7375, },
+ { 8625, 7500, },
+ { 8750, 7625, },
+ { 8875, 7750, },
+ { 9000, 7875, },
+ { 9125, 8000, },
+ { 9250, 8125, },
+ { 9375, 8250, },
+ { 9500, 8375, },
+ { 9625, 8500, },
+ { 9750, 8625, },
+ { 9875, 8750, },
+ { 10000, 8875, },
+ { 10125, 9000, },
+ { 10250, 9125, },
+ { 10375, 9250, },
+ { 10500, 9375, },
+ { 10625, 9500, },
+ { 10750, 9625, },
+ { 10875, 9750, },
+ { 11000, 9875, },
+ { 11125, 10000, },
+ { 11250, 10125, },
+ { 11375, 10250, },
+ { 11500, 10375, },
+ { 11625, 10500, },
+ { 11750, 10625, },
+ { 11875, 10750, },
+ { 12000, 10875, },
+ { 12125, 11000, },
+ { 12250, 11125, },
+ { 12375, 11250, },
+ { 12500, 11375, },
+ { 12625, 11500, },
+ { 12750, 11625, },
+ { 12875, 11750, },
+ { 13000, 11875, },
+ { 13125, 12000, },
+ { 13250, 12125, },
+ { 13375, 12250, },
+ { 13500, 12375, },
+ { 13625, 12500, },
+ { 13750, 12625, },
+ { 13875, 12750, },
+ { 14000, 12875, },
+ { 14125, 13000, },
+ { 14250, 13125, },
+ { 14375, 13250, },
+ { 14500, 13375, },
+ { 14625, 13500, },
+ { 14750, 13625, },
+ { 14875, 13750, },
+ { 15000, 13875, },
+ { 15125, 14000, },
+ { 15250, 14125, },
+ { 15375, 14250, },
+ { 15500, 14375, },
+ { 15625, 14500, },
+ { 15750, 14625, },
+ { 15875, 14750, },
+ { 16000, 14875, },
+ { 16125, 15000, },
+ };
+ if (dev_priv->info->is_mobile)
+ return v_table[pxvid].vm;
+ else
+ return v_table[pxvid].vd;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ struct timespec now, diff1;
+ u64 diff;
+ unsigned long diffms;
+ u32 count;
+
+ if (dev_priv->info->gen != 5)
+ return;
+
+ getrawmonotonic(&now);
+ diff1 = timespec_sub(now, dev_priv->last_time2);
+
+ /* Don't divide by 0 */
+ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+ if (!diffms)
+ return;
+
+ count = I915_READ(GFXEC);
+
+ if (count < dev_priv->last_count2) {
+ diff = ~0UL - dev_priv->last_count2;
+ diff += count;
+ } else {
+ diff = count - dev_priv->last_count2;
+ }
+
+ dev_priv->last_count2 = count;
+ dev_priv->last_time2 = now;
+
+ /* More magic constants... */
+ diff = diff * 1181;
+ diff = div_u64(diff, diffms * 10);
+ dev_priv->gfx_power = diff;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+ state1 = ext_v;
+
+ t = i915_mch_val(dev_priv);
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ if (t > 80)
+ corr = ((t * 2349) + 135940);
+ else if (t >= 50)
+ corr = ((t * 964) + 29317);
+ else /* < 50 */
+ corr = ((t * 301) + 1004);
+
+ corr = corr * ((150142 * state1) / 10000 - 78642);
+ corr /= 100000;
+ corr2 = (corr * dev_priv->corr);
+
+ state2 = (corr2 * state1) / 10000;
+ state2 /= 100; /* convert to mW */
+
+ i915_update_gfx_val(dev_priv);
+
+ return dev_priv->gfx_power + state2;
+}
+
+/* Global for IPS driver to get at the current i915 device */
+static struct drm_i915_private *i915_mch_dev;
+/*
+ * Lock protecting IPS related data structures
+ * - i915_mch_dev
+ * - dev_priv->max_delay
+ * - dev_priv->min_delay
+ * - dev_priv->fmax
+ * - dev_priv->gpu_busy
+ */
+static DEFINE_SPINLOCK(mchdev_lock);
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *dev_priv;
+ unsigned long chipset_val, graphics_val, ret = 0;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ chipset_val = i915_chipset_val(dev_priv);
+ graphics_val = i915_gfx_val(dev_priv);
+
+ ret = chipset_val + graphics_val;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay > dev_priv->fmax)
+ dev_priv->max_delay--;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay < dev_priv->min_delay)
+ dev_priv->max_delay++;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = false;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ ret = dev_priv->busy;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ dev_priv->max_delay = dev_priv->fstart;
+
+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+ ret = false;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
+{
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = dev_priv;
+ dev_priv->mchdev_lock = &mchdev_lock;
+ spin_unlock(&mchdev_lock);
+
+ ips_ping_for_i915_load();
+}
+
+void intel_gpu_ips_teardown(void)
+{
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = NULL;
+ spin_unlock(&mchdev_lock);
+}
+
+void intel_init_emon(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lcfuse;
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ I915_WRITE(ECR, 0);
+ POSTING_READ(ECR);
+
+ /* Program energy weights for various events */
+ I915_WRITE(SDEW, 0x15040d00);
+ I915_WRITE(CSIEW0, 0x007f0000);
+ I915_WRITE(CSIEW1, 0x1e220004);
+ I915_WRITE(CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ I915_WRITE(PEW + (i * 4), 0);
+ for (i = 0; i < 3; i++)
+ I915_WRITE(DEW + (i * 4), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+ unsigned long freq = intel_pxfreq(pxvidfreq);
+ unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+ unsigned long val;
+
+ val = vid * vid;
+ val *= (freq / 1000);
+ val *= 255;
+ val /= (127*127*900);
+ if (val > 0xff)
+ DRM_ERROR("bad pxval: %ld\n", val);
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+ (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+ I915_WRITE(PXW + (i * 4), val);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ I915_WRITE(OGW0, 0);
+ I915_WRITE(OGW1, 0);
+ I915_WRITE(EG0, 0x00007f00);
+ I915_WRITE(EG1, 0x0000000e);
+ I915_WRITE(EG2, 0x000e0000);
+ I915_WRITE(EG3, 0x68000300);
+ I915_WRITE(EG4, 0x42000000);
+ I915_WRITE(EG5, 0x00140031);
+ I915_WRITE(EG6, 0);
+ I915_WRITE(EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ I915_WRITE(PXWL + (i * 4), 0);
+
+ /* Enable PMON + select events */
+ I915_WRITE(ECR, 0x80000019);
+
+ lcfuse = I915_READ(LCFUSE02);
+
+ dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
+static void ironlake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ /* Required for FBC */
+ dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
+ DPFCRUNIT_CLOCK_GATE_DISABLE |
+ DPFDUNIT_CLOCK_GATE_DISABLE;
+ /* Required for CxSR */
+ dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_3DCGDIS0,
+ MARIUNIT_CLOCK_GATE_DISABLE |
+ SVSMUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(PCH_3DCGDIS1,
+ VFMUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+ I915_WRITE(ILK_DSPCLK_GATE,
+ (I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE));
+ I915_WRITE(DISP_ARB_CTL,
+ (I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS));
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /*
+ * Based on the document from hardware guys the following bits
+ * should be set unconditionally in order to enable FBC.
+ * The bit 22 of 0x42000
+ * The bit 22 of 0x42004
+ * The bit 7,8,9 of 0x42020.
+ */
+ if (IS_IRONLAKE_M(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPFC_DIS1 |
+ ILK_DPFC_DIS2 |
+ ILK_CLK_FBC);
+ }
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+ I915_WRITE(_3D_CHICKEN2,
+ _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+ _3D_CHICKEN2_WM_READ_PIPELINED);
+}
+
+static void gen6_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+ I915_WRITE(GEN6_UCGCTL1,
+ I915_READ(GEN6_UCGCTL1) |
+ GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /* Bspec says we need to always set all mask bits. */
+ I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
+ _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
+
+ /*
+ * According to the spec the following bits should be
+ * set in order to enable memory self-refresh and fbc:
+ * The bit21 and bit22 of 0x42000
+ * The bit21 and bit22 of 0x42004
+ * The bit5 and bit7 of 0x42020
+ * The bit14 of 0x70180
+ * The bit14 of 0x71180
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE |
+ ILK_DPFD_CLK_GATE);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+}
+
+static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+{
+ uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+
+ reg &= ~GEN7_FF_SCHED_MASK;
+ reg |= GEN7_FF_TS_SCHED_HW;
+ reg |= GEN7_FF_VS_SCHED_HW;
+ reg |= GEN7_FF_DS_SCHED_HW;
+
+ I915_WRITE(GEN7_FF_THREAD_MODE, reg);
+}
+
+static void ivybridge_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /* WaDisable4x2SubspanOptimization */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+}
+
+static void valleyview_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+}
+
+static void g4x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate;
+
+ I915_WRITE(RENCLK_GATE_D1, 0);
+ I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+ GS_UNIT_CLOCK_GATE_DISABLE |
+ CL_UNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
+ OVRUNIT_CLOCK_GATE_DISABLE |
+ OVCUNIT_CLOCK_GATE_DISABLE;
+ if (IS_GM45(dev))
+ dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+}
+
+static void crestline_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+ I915_WRITE(DSPCLK_GATE_D, 0);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ I915_WRITE16(DEUC, 0);
+}
+
+static void broadwater_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ I965_RCC_CLOCK_GATE_DISABLE |
+ I965_RCPB_CLOCK_GATE_DISABLE |
+ I965_ISC_CLOCK_GATE_DISABLE |
+ I965_FBC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+}
+
+static void gen3_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dstate = I915_READ(D_STATE);
+
+ dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+ DSTATE_DOT_CLOCK_GATING;
+ I915_WRITE(D_STATE, dstate);
+
+ if (IS_PINEVIEW(dev))
+ I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+}
+
+static void i85x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+}
+
+static void i830_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ DPLS_EDP_PPS_FIX_DIS);
+ /* Without this, mode sets may fail silently on FDI */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
+}
+
+void intel_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->display.init_clock_gating(dev);
+
+ if (dev_priv->display.init_pch_clock_gating)
+ dev_priv->display.init_pch_clock_gating(dev);
+}
+
+static void gen6_sanitize_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 limits, delay, old;
+
+ gen6_gt_force_wake_get(dev_priv);
+
+ old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
+ /* Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ limits &= ~(0x3f << 16 | 0x3f << 24);
+ delay = dev_priv->cur_delay;
+ if (delay < dev_priv->max_delay)
+ limits |= (dev_priv->max_delay & 0x3f) << 24;
+ if (delay > dev_priv->min_delay)
+ limits |= (dev_priv->min_delay & 0x3f) << 16;
+
+ if (old != limits) {
+ DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
+ limits, old);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+ }
+
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+void intel_sanitize_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.sanitize_pm)
+ dev_priv->display.sanitize_pm(dev);
+}
+
+/* Starting with Haswell, we have different power wells for
+ * different parts of the GPU. This attempts to enable them all.
+ */
+void intel_init_power_wells(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long power_wells[] = {
+ HSW_PWR_WELL_CTL1,
+ HSW_PWR_WELL_CTL2,
+ HSW_PWR_WELL_CTL4
+ };
+ int i;
+
+ if (!IS_HASWELL(dev))
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
+ int well = I915_READ(power_wells[i]);
+
+ if ((well & HSW_PWR_WELL_STATE) == 0) {
+ I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
+ if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
+ DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
+ }
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/* Set up chip specific power management-related functions */
+void intel_init_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_HAS_FBC(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (IS_GM45(dev)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
+ } else if (IS_CRESTLINE(dev)) {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
+ }
+ /* 855GM needs testing */
+ }
+
+ /* For cxsr */
+ if (IS_PINEVIEW(dev))
+ i915_pineview_get_mem_freq(dev);
+ else if (IS_GEN5(dev))
+ i915_ironlake_get_mem_freq(dev);
+
+ /* For FIFO watermark updates */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ u32 ecobus;
+
+ /* A small trick here - if the bios hasn't configured MT forcewake,
+ * and if the device is in RC6, then force_wake_mt_get will not wake
+ * the device and the ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT forcewake being
+ * disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->display.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->display.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+
+ if (HAS_PCH_IBX(dev))
+ dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
+ else if (HAS_PCH_CPT(dev))
+ dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
+
+ if (IS_GEN5(dev)) {
+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ dev_priv->display.update_wm = ironlake_update_wm;
+ else {
+ DRM_DEBUG_KMS("Failed to get proper latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+ } else if (IS_GEN6(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+ dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+ } else if (IS_IVYBRIDGE(dev)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+ } else if (IS_HASWELL(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+ } else
+ dev_priv->display.update_wm = NULL;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.update_wm = valleyview_update_wm;
+ dev_priv->display.init_clock_gating =
+ valleyview_init_clock_gating;
+ dev_priv->display.force_wake_get = vlv_force_wake_get;
+ dev_priv->display.force_wake_put = vlv_force_wake_put;
+ } else if (IS_PINEVIEW(dev)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ DRM_INFO("failed to find known CxSR latency "
+ "(found ddr%s fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3" : "2",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ pineview_disable_cxsr(dev);
+ dev_priv->display.update_wm = NULL;
+ } else
+ dev_priv->display.update_wm = pineview_update_wm;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_G4X(dev)) {
+ dev_priv->display.update_wm = g4x_update_wm;
+ dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+ } else if (IS_GEN4(dev)) {
+ dev_priv->display.update_wm = i965_update_wm;
+ if (IS_CRESTLINE(dev))
+ dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+ else if (IS_BROADWATER(dev))
+ dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+ } else if (IS_GEN3(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_I865G(dev)) {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ } else if (IS_I85X(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ } else {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ if (IS_845G(dev))
+ dev_priv->display.get_fifo_size = i845_get_fifo_size;
+ else
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ }
+
+ /* We attempt to init the necessary power wells early in the initialization
+ * time, so the subsystems that expect power to be enabled can work.
+ */
+ intel_init_power_wells(dev);
+}
+
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 62892a826ede..b59b6d5b7583 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -53,9 +53,35 @@ static inline int ring_space(struct intel_ring_buffer *ring)
}
static int
-render_ring_flush(struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+gen2_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ u32 cmd;
+ int ret;
+
+ cmd = MI_FLUSH;
+ if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
+ cmd |= MI_NO_WRITE_FLUSH;
+
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen4_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
{
struct drm_device *dev = ring->dev;
u32 cmd;
@@ -90,17 +116,8 @@ render_ring_flush(struct intel_ring_buffer *ring,
*/
cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains|flush_domains) &
- I915_GEM_DOMAIN_RENDER)
+ if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
cmd &= ~MI_NO_WRITE_FLUSH;
- if (INTEL_INFO(dev)->gen < 4) {
- /*
- * On the 965, the sampler cache always gets flushed
- * and this bit is reserved.
- */
- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
- cmd |= MI_READ_FLUSH;
- }
if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
cmd |= MI_EXE_FLUSH;
@@ -290,9 +307,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
- if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
- I915_READ_START(ring) != obj->gtt_offset ||
- (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
+ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+ I915_READ_START(ring) == obj->gtt_offset &&
+ (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -384,12 +401,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
int ret = init_ring_common(ring);
if (INTEL_INFO(dev)->gen > 3) {
- int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
- I915_WRITE(MI_MODE, mode);
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
if (IS_GEN7(dev))
I915_WRITE(GFX_MODE_GEN7,
- GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
- GFX_MODE_ENABLE(GFX_REPLAY_MODE));
+ _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
}
if (INTEL_INFO(dev)->gen >= 5) {
@@ -398,7 +414,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
return ret;
}
-
if (IS_GEN6(dev)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@@ -406,13 +421,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
* policy is not supported."
*/
I915_WRITE(CACHE_MODE_0,
- CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (INTEL_INFO(dev)->gen >= 6) {
- I915_WRITE(INSTPM,
- INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
- }
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return ret;
}
@@ -483,21 +496,30 @@ gen6_add_request(struct intel_ring_buffer *ring,
* @seqno - seqno which the waiter will block on
*/
static int
-intel_ring_sync(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- int ring,
- u32 seqno)
+gen6_ring_sync(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ u32 seqno)
{
int ret;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
+ /* Throughout all of the GEM code, seqno passed implies our current
+ * seqno is >= the last seqno executed. However for hardware the
+ * comparison is strictly greater than.
+ */
+ seqno -= 1;
+
+ WARN_ON(signaller->semaphore_register[waiter->id] ==
+ MI_SEMAPHORE_SYNC_INVALID);
+
ret = intel_ring_begin(waiter, 4);
if (ret)
return ret;
- intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
+ intel_ring_emit(waiter,
+ dw1 | signaller->semaphore_register[waiter->id]);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
intel_ring_emit(waiter, MI_NOOP);
@@ -506,47 +528,6 @@ intel_ring_sync(struct intel_ring_buffer *waiter,
return 0;
}
-/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
-int
-render_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- RCS,
- seqno);
-}
-
-/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
-int
-gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- VCS,
- seqno);
-}
-
-/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
-int
-gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- BCS,
- seqno);
-}
-
-
-
#define PIPE_CONTROL_FLUSH(ring__, addr__) \
do { \
intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
@@ -608,27 +589,6 @@ pc_render_add_request(struct intel_ring_buffer *ring,
return 0;
}
-static int
-render_ring_add_request(struct intel_ring_buffer *ring,
- u32 *result)
-{
- u32 seqno = i915_gem_next_request_seqno(ring);
- int ret;
-
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, seqno);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_advance(ring);
-
- *result = seqno;
- return 0;
-}
-
static u32
gen6_ring_get_seqno(struct intel_ring_buffer *ring)
{
@@ -655,76 +615,115 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
return pc->cpu_page[0];
}
-static void
-ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+static bool
+gen5_ring_get_irq(struct intel_ring_buffer *ring)
{
- dev_priv->gt_irq_mask &= ~mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
}
static void
-ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+gen5_ring_put_irq(struct intel_ring_buffer *ring)
{
- dev_priv->gt_irq_mask |= mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
-static void
-i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+static bool
+i9xx_ring_get_irq(struct intel_ring_buffer *ring)
{
- dev_priv->irq_mask &= ~mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
}
static void
-i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+i9xx_ring_put_irq(struct intel_ring_buffer *ring)
{
- dev_priv->irq_mask |= mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static bool
-render_ring_get_irq(struct intel_ring_buffer *ring)
+i8xx_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
if (!dev->irq_enabled)
return false;
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- if (HAS_PCH_SPLIT(dev))
- ironlake_enable_irq(dev_priv,
- GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
- else
- i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
static void
-render_ring_put_irq(struct intel_ring_buffer *ring)
+i8xx_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- if (HAS_PCH_SPLIT(dev))
- ironlake_disable_irq(dev_priv,
- GT_USER_INTERRUPT |
- GT_PIPE_NOTIFY);
- else
- i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -776,7 +775,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
}
static int
-ring_add_request(struct intel_ring_buffer *ring,
+i9xx_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
u32 seqno;
@@ -799,10 +798,11 @@ ring_add_request(struct intel_ring_buffer *ring,
}
static bool
-gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+gen6_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
if (!dev->irq_enabled)
return false;
@@ -812,120 +812,87 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
* blt/bsd rings on ivb. */
gen6_gt_force_wake_get(dev_priv);
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- ring->irq_mask &= ~rflag;
- I915_WRITE_IMR(ring, ring->irq_mask);
- ironlake_enable_irq(dev_priv, gflag);
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
static void
-gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+gen6_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- ring->irq_mask |= rflag;
- I915_WRITE_IMR(ring, ring->irq_mask);
- ironlake_disable_irq(dev_priv, gflag);
+ I915_WRITE_IMR(ring, ~0);
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
gen6_gt_force_wake_put(dev_priv);
}
-static bool
-bsd_ring_get_irq(struct intel_ring_buffer *ring)
+static int
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
{
- struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!dev->irq_enabled)
- return false;
+ int ret;
- spin_lock(&ring->irq_lock);
- if (ring->irq_refcount++ == 0) {
- if (IS_G4X(dev))
- i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
- else
- ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
- }
- spin_unlock(&ring->irq_lock);
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- return true;
-}
-static void
-bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
- struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ MI_BATCH_GTT |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
- spin_lock(&ring->irq_lock);
- if (--ring->irq_refcount == 0) {
- if (IS_G4X(dev))
- i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
- else
- ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
- }
- spin_unlock(&ring->irq_lock);
+ return 0;
}
static int
-ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len)
{
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(ring, offset);
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
}
static int
-render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
{
- struct drm_device *dev = ring->dev;
int ret;
- if (IS_I830(dev) || IS_845G(dev)) {
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, 0);
- } else {
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- if (INTEL_INFO(dev)->gen >= 4) {
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(ring, offset);
- } else {
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | (2 << 6));
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
- }
- }
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
intel_ring_advance(ring);
return 0;
@@ -933,7 +900,6 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
static void cleanup_status_page(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
obj = ring->status_page.obj;
@@ -944,14 +910,11 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
-
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
}
static int init_status_page(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret;
@@ -972,7 +935,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) {
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
goto err_unpin;
}
ring->status_page.obj = obj;
@@ -992,8 +954,8 @@ err:
return ret;
}
-int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
int ret;
@@ -1002,10 +964,9 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->gpu_write_list);
+ ring->size = 32 * PAGE_SIZE;
init_waitqueue_head(&ring->irq_queue);
- spin_lock_init(&ring->irq_lock);
- ring->irq_mask = ~0;
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring);
@@ -1026,20 +987,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto err_unref;
- ring->map.size = ring->size;
- ring->map.offset = dev->agp->base + obj->gtt_offset;
- ring->map.type = 0;
- ring->map.flags = 0;
- ring->map.mtrr = 0;
-
- drm_core_ioremap_wc(&ring->map, dev);
- if (ring->map.handle == NULL) {
+ ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
+ ring->size);
+ if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
ret = -EINVAL;
goto err_unpin;
}
- ring->virtual_start = ring->map.handle;
ret = ring->init(ring);
if (ret)
goto err_unmap;
@@ -1055,7 +1010,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
return 0;
err_unmap:
- drm_core_ioremapfree(&ring->map, dev);
+ iounmap(ring->virtual_start);
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
@@ -1083,7 +1038,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
I915_WRITE_CTL(ring, 0);
- drm_core_ioremapfree(&ring->map, ring->dev);
+ iounmap(ring->virtual_start);
i915_gem_object_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
@@ -1097,7 +1052,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
- unsigned int *virt;
+ uint32_t __iomem *virt;
int rem = ring->size - ring->tail;
if (ring->space < rem) {
@@ -1106,12 +1061,10 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
return ret;
}
- virt = (unsigned int *)(ring->virtual_start + ring->tail);
- rem /= 8;
- while (rem--) {
- *virt++ = MI_NOOP;
- *virt++ = MI_NOOP;
- }
+ virt = ring->virtual_start + ring->tail;
+ rem /= 4;
+ while (rem--)
+ iowrite32(MI_NOOP, virt++);
ring->tail = 0;
ring->space = ring_space(ring);
@@ -1132,9 +1085,11 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- ret = i915_wait_request(ring, seqno, true);
+ ret = i915_wait_request(ring, seqno);
dev_priv->mm.interruptible = was_interruptible;
+ if (!ret)
+ i915_gem_retire_requests_ring(ring);
return ret;
}
@@ -1208,15 +1163,12 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return ret;
trace_i915_ring_wait_begin(ring);
- if (drm_core_check_feature(dev, DRIVER_GEM))
- /* With GEM the hangcheck timer should kick us out of the loop,
- * leaving it early runs the risk of corrupting GEM state (due
- * to running on almost untested codepaths). But on resume
- * timers don't work yet, so prevent a complete hang in that
- * case by choosing an insanely large timeout. */
- end = jiffies + 60 * HZ;
- else
- end = jiffies + 3 * HZ;
+ /* With GEM the hangcheck timer should kick us out of the loop,
+ * leaving it early runs the risk of corrupting GEM state (due
+ * to running on almost untested codepaths). But on resume
+ * timers don't work yet, so prevent a complete hang in that
+ * case by choosing an insanely large timeout. */
+ end = jiffies + 60 * HZ;
do {
ring->head = I915_READ_HEAD(ring);
@@ -1268,48 +1220,14 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
void intel_ring_advance(struct intel_ring_buffer *ring)
{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
ring->tail &= ring->size - 1;
+ if (dev_priv->stop_rings & intel_ring_flag(ring))
+ return;
ring->write_tail(ring, ring->tail);
}
-static const struct intel_ring_buffer render_ring = {
- .name = "render ring",
- .id = RCS,
- .mmio_base = RENDER_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_render_ring,
- .write_tail = ring_write_tail,
- .flush = render_ring_flush,
- .add_request = render_ring_add_request,
- .get_seqno = ring_get_seqno,
- .irq_get = render_ring_get_irq,
- .irq_put = render_ring_put_irq,
- .dispatch_execbuffer = render_ring_dispatch_execbuffer,
- .cleanup = render_ring_cleanup,
- .sync_to = render_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
- MI_SEMAPHORE_SYNC_RV,
- MI_SEMAPHORE_SYNC_RB},
- .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
-};
-
-/* ring buffer for bit-stream decoder */
-
-static const struct intel_ring_buffer bsd_ring = {
- .name = "bsd ring",
- .id = VCS,
- .mmio_base = BSD_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_ring_common,
- .write_tail = ring_write_tail,
- .flush = bsd_ring_flush,
- .add_request = ring_add_request,
- .get_seqno = ring_get_seqno,
- .irq_get = bsd_ring_get_irq,
- .irq_put = bsd_ring_put_irq,
- .dispatch_execbuffer = ring_dispatch_execbuffer,
-};
-
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
@@ -1372,77 +1290,8 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
return 0;
}
-static bool
-gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_get_irq(ring,
- GT_USER_INTERRUPT,
- GEN6_RENDER_USER_INTERRUPT);
-}
-
-static void
-gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_put_irq(ring,
- GT_USER_INTERRUPT,
- GEN6_RENDER_USER_INTERRUPT);
-}
-
-static bool
-gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_get_irq(ring,
- GT_GEN6_BSD_USER_INTERRUPT,
- GEN6_BSD_USER_INTERRUPT);
-}
-
-static void
-gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_put_irq(ring,
- GT_GEN6_BSD_USER_INTERRUPT,
- GEN6_BSD_USER_INTERRUPT);
-}
-
-/* ring buffer for Video Codec for Gen6+ */
-static const struct intel_ring_buffer gen6_bsd_ring = {
- .name = "gen6 bsd ring",
- .id = VCS,
- .mmio_base = GEN6_BSD_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_ring_common,
- .write_tail = gen6_bsd_ring_write_tail,
- .flush = gen6_ring_flush,
- .add_request = gen6_add_request,
- .get_seqno = gen6_ring_get_seqno,
- .irq_get = gen6_bsd_ring_get_irq,
- .irq_put = gen6_bsd_ring_put_irq,
- .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .sync_to = gen6_bsd_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
- MI_SEMAPHORE_SYNC_INVALID,
- MI_SEMAPHORE_SYNC_VB},
- .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
-};
-
/* Blitter support (SandyBridge+) */
-static bool
-blt_ring_get_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_get_irq(ring,
- GT_BLT_USER_INTERRUPT,
- GEN6_BLITTER_USER_INTERRUPT);
-}
-
-static void
-blt_ring_put_irq(struct intel_ring_buffer *ring)
-{
- gen6_ring_put_irq(ring,
- GT_BLT_USER_INTERRUPT,
- GEN6_BLITTER_USER_INTERRUPT);
-}
-
static int blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
@@ -1464,42 +1313,63 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
-static const struct intel_ring_buffer gen6_blt_ring = {
- .name = "blt ring",
- .id = BCS,
- .mmio_base = BLT_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_ring_common,
- .write_tail = ring_write_tail,
- .flush = blt_ring_flush,
- .add_request = gen6_add_request,
- .get_seqno = gen6_ring_get_seqno,
- .irq_get = blt_ring_get_irq,
- .irq_put = blt_ring_put_irq,
- .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .sync_to = gen6_blt_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
- MI_SEMAPHORE_SYNC_BV,
- MI_SEMAPHORE_SYNC_INVALID},
- .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
-};
-
int intel_init_render_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
- *ring = render_ring;
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
ring->flush = gen6_render_ring_flush;
- ring->irq_get = gen6_render_ring_get_irq;
- ring->irq_put = gen6_render_ring_put_irq;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
+ ring->signal_mbox[0] = GEN6_VRSYNC;
+ ring->signal_mbox[1] = GEN6_BRSYNC;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
+ ring->flush = gen4_render_ring_flush;
ring->get_seqno = pc_render_get_seqno;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
+ } else {
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
}
+ ring->write_tail = ring_write_tail;
+ if (INTEL_INFO(dev)->gen >= 6)
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
+
if (!I915_NEED_GFX_HWS(dev)) {
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1514,15 +1384,41 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
- *ring = render_ring;
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
if (INTEL_INFO(dev)->gen >= 6) {
- ring->add_request = gen6_add_request;
- ring->irq_get = gen6_render_ring_get_irq;
- ring->irq_put = gen6_render_ring_put_irq;
- } else if (IS_GEN5(dev)) {
- ring->add_request = pc_render_add_request;
- ring->get_seqno = pc_render_get_seqno;
+ /* non-kms not supported on gen6+ */
+ return -ENODEV;
+ }
+
+ /* Note: gem is not supported on gen5/ilk without kms (the corresponding
+ * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
+ * the special gen5 functions. */
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
}
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
+ ring->write_tail = ring_write_tail;
+ if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
if (!I915_NEED_GFX_HWS(dev))
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1537,20 +1433,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
if (IS_I830(ring->dev))
ring->effective_size -= 128;
- ring->map.offset = start;
- ring->map.size = size;
- ring->map.type = 0;
- ring->map.flags = 0;
- ring->map.mtrr = 0;
-
- drm_core_ioremap_wc(&ring->map, dev);
- if (ring->map.handle == NULL) {
+ ring->virtual_start = ioremap_wc(start, size);
+ if (ring->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
- ring->virtual_start = (void __force __iomem *)ring->map.handle;
return 0;
}
@@ -1559,10 +1448,46 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
- if (IS_GEN6(dev) || IS_GEN7(dev))
- *ring = gen6_bsd_ring;
- else
- *ring = bsd_ring;
+ ring->name = "bsd ring";
+ ring->id = VCS;
+
+ ring->write_tail = ring_write_tail;
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ ring->mmio_base = GEN6_BSD_RING_BASE;
+ /* gen6 bsd needs a special wa for tail updates */
+ if (IS_GEN6(dev))
+ ring->write_tail = gen6_bsd_ring_write_tail;
+ ring->flush = gen6_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
+ ring->signal_mbox[0] = GEN6_RVSYNC;
+ ring->signal_mbox[1] = GEN6_BVSYNC;
+ } else {
+ ring->mmio_base = BSD_RING_BASE;
+ ring->flush = bsd_ring_flush;
+ ring->add_request = i9xx_add_request;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN5(dev)) {
+ ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ } else {
+ ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ }
+ ring->init = init_ring_common;
+
return intel_init_ring_buffer(dev, ring);
}
@@ -1572,7 +1497,25 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
- *ring = gen6_blt_ring;
+ ring->name = "blitter ring";
+ ring->id = BCS;
+
+ ring->mmio_base = BLT_RING_BASE;
+ ring->write_tail = ring_write_tail;
+ ring->flush = blt_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->signal_mbox[0] = GEN6_RBSYNC;
+ ring->signal_mbox[1] = GEN6_VBSYNC;
+ ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index bc0365b8fa4d..55d3da26bae7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -2,7 +2,7 @@
#define _INTEL_RINGBUFFER_H_
struct intel_hw_status_page {
- u32 __iomem *page_addr;
+ u32 *page_addr;
unsigned int gfx_addr;
struct drm_i915_gem_object *obj;
};
@@ -56,12 +56,9 @@ struct intel_ring_buffer {
*/
u32 last_retired_head;
- spinlock_t irq_lock;
- u32 irq_refcount;
- u32 irq_mask;
- u32 irq_seqno; /* last seq seem at irq time */
+ u32 irq_refcount; /* protected by dev_priv->irq_lock */
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
- u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring);
@@ -118,11 +115,16 @@ struct intel_ring_buffer {
u32 outstanding_lazy_request;
wait_queue_head_t irq_queue;
- drm_local_map_t map;
void *private;
};
+static inline bool
+intel_ring_initialized(struct intel_ring_buffer *ring)
+{
+ return ring->obj != NULL;
+}
+
static inline unsigned
intel_ring_flag(struct intel_ring_buffer *ring)
{
@@ -152,7 +154,9 @@ static inline u32
intel_read_status_page(struct intel_ring_buffer *ring,
int reg)
{
- return ioread32(ring->status_page.page_addr + reg);
+ /* Ensure that the compiler doesn't optimize away the load. */
+ barrier();
+ return ring->status_page.page_addr[reg];
}
/**
@@ -170,10 +174,7 @@ intel_read_status_page(struct intel_ring_buffer *ring,
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
-#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
-#define I915_BREADCRUMB_INDEX 0x21
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index ae5e748f39bb..a949b73880c8 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -41,7 +41,7 @@
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
-#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
SDVO_TV_MASK)
@@ -74,7 +74,7 @@ struct intel_sdvo {
struct i2c_adapter ddc;
/* Register for the SDVO device: SDVOB or SDVOC */
- int sdvo_reg;
+ uint32_t sdvo_reg;
/* Active outputs controlled by this SDVO output */
uint16_t controlled_output;
@@ -114,6 +114,9 @@ struct intel_sdvo {
*/
bool is_tv;
+ /* On different gens SDVOB is at different places. */
+ bool is_sdvob;
+
/* This is for current tv format name */
int tv_format_index;
@@ -403,8 +406,7 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
-#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
-#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
@@ -441,9 +443,17 @@ static const char *cmd_status_names[] = {
static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
- u8 buf[args_len*2 + 2], status;
- struct i2c_msg msgs[args_len + 3];
- int i, ret;
+ u8 *buf, status;
+ struct i2c_msg *msgs;
+ int i, ret = true;
+
+ buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
+ if (!buf)
+ return false;
+
+ msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
+ if (!msgs)
+ return false;
intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
@@ -477,15 +487,19 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
if (ret < 0) {
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
- return false;
+ ret = false;
+ goto out;
}
if (ret != i+3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
- return false;
+ ret = false;
}
- return true;
+out:
+ kfree(msgs);
+ kfree(buf);
+ return ret;
}
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
@@ -733,18 +747,18 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
uint16_t h_sync_offset, v_sync_offset;
int mode_clock;
- width = mode->crtc_hdisplay;
- height = mode->crtc_vdisplay;
+ width = mode->hdisplay;
+ height = mode->vdisplay;
/* do some mode translations */
- h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
- h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ h_blank_len = mode->htotal - mode->hdisplay;
+ h_sync_len = mode->hsync_end - mode->hsync_start;
- v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
- v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ v_blank_len = mode->vtotal - mode->vdisplay;
+ v_sync_len = mode->vsync_end - mode->vsync_start;
- h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
- v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+ h_sync_offset = mode->hsync_start - mode->hdisplay;
+ v_sync_offset = mode->vsync_start - mode->vdisplay;
mode_clock = mode->clock;
mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
@@ -873,17 +887,24 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
};
uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
uint8_t set_buf_index[2] = { 1, 0 };
- uint64_t *data = (uint64_t *)&avi_if;
+ uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+ uint64_t *data = (uint64_t *)sdvo_data;
unsigned i;
intel_dip_infoframe_csum(&avi_if);
+ /* sdvo spec says that the ecc is handled by the hw, and it looks like
+ * we must not send the ecc field, either. */
+ memcpy(sdvo_data, &avi_if, 3);
+ sdvo_data[3] = avi_if.checksum;
+ memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
+
if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2))
return false;
- for (i = 0; i < sizeof(avi_if); i += 8) {
+ for (i = 0; i < sizeof(sdvo_data); i += 8) {
if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_DATA,
data, 8))
@@ -1260,10 +1281,11 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
struct drm_i915_private *dev_priv = connector->dev->dev_private;
return drm_get_edid(connector,
- &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ dev_priv->crt_ddc_pin));
}
-enum drm_connector_status
+static enum drm_connector_status
intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
@@ -1349,8 +1371,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
return connector_status_unknown;
/* add 30ms delay when the output type might be TV */
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
+ if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
mdelay(30);
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
@@ -1570,9 +1591,6 @@ end:
intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
- drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
- 0);
-
intel_sdvo->is_lvds = true;
break;
}
@@ -1901,7 +1919,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
{
struct sdvo_device_mapping *mapping;
- if (IS_SDVOB(reg))
+ if (sdvo->is_sdvob)
mapping = &(dev_priv->sdvo_mappings[0]);
else
mapping = &(dev_priv->sdvo_mappings[1]);
@@ -1919,7 +1937,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *mapping;
u8 pin;
- if (IS_SDVOB(reg))
+ if (sdvo->is_sdvob)
mapping = &dev_priv->sdvo_mappings[0];
else
mapping = &dev_priv->sdvo_mappings[1];
@@ -1928,12 +1946,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
if (mapping->initialized)
pin = mapping->i2c_pin;
- if (pin < GMBUS_NUM_PORTS) {
- sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+ if (intel_gmbus_is_port_valid(pin)) {
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
intel_gmbus_force_bit(sdvo->i2c, true);
} else {
- sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
}
}
@@ -1944,12 +1962,12 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
}
static u8
-intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
- if (IS_SDVOB(sdvo_reg)) {
+ if (sdvo->is_sdvob) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
@@ -1974,7 +1992,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
- if (IS_SDVOB(sdvo_reg))
+ if (sdvo->is_sdvob)
return 0x70;
else
return 0x72;
@@ -2199,6 +2217,10 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
return false;
+ if (flags & SDVO_OUTPUT_YPRPB0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
+ return false;
+
if (flags & SDVO_OUTPUT_RGB0)
if (!intel_sdvo_analog_init(intel_sdvo, 0))
return false;
@@ -2490,7 +2512,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
return i2c_add_adapter(&sdvo->ddc) == 0;
}
-bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
@@ -2502,7 +2524,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
return false;
intel_sdvo->sdvo_reg = sdvo_reg;
- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ intel_sdvo->is_sdvob = is_sdvob;
+ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
kfree(intel_sdvo);
@@ -2519,13 +2542,13 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
u8 byte;
if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
- DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
- IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ DRM_DEBUG_KMS("No SDVO device found on %s\n",
+ SDVO_NAME(intel_sdvo));
goto err;
}
}
- if (IS_SDVOB(sdvo_reg))
+ if (intel_sdvo->is_sdvob)
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
@@ -2546,8 +2569,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
- DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
- IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
+ SDVO_NAME(intel_sdvo));
goto err;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index e90dfb625c42..2a20fb0781d7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -110,14 +110,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
* when scaling is disabled.
*/
if (crtc_w != src_w || crtc_h != src_h) {
- dev_priv->sprite_scaling_enabled = true;
- sandybridge_update_wm(dev);
- intel_wait_for_vblank(dev, pipe);
+ if (!dev_priv->sprite_scaling_enabled) {
+ dev_priv->sprite_scaling_enabled = true;
+ intel_update_watermarks(dev);
+ intel_wait_for_vblank(dev, pipe);
+ }
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
} else {
- dev_priv->sprite_scaling_enabled = false;
- /* potentially re-enable LP watermarks */
- sandybridge_update_wm(dev);
+ if (dev_priv->sprite_scaling_enabled) {
+ dev_priv->sprite_scaling_enabled = false;
+ /* potentially re-enable LP watermarks */
+ intel_update_watermarks(dev);
+ }
}
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
@@ -133,7 +137,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
POSTING_READ(SPRSURF(pipe));
}
@@ -149,8 +153,11 @@ ivb_disable_plane(struct drm_plane *plane)
/* Can't leave the scaler enabled... */
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
- I915_WRITE(SPRSURF(pipe), 0);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
+
+ dev_priv->sprite_scaling_enabled = false;
+ intel_update_watermarks(dev);
}
static int
@@ -208,7 +215,7 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
}
static void
-snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -218,7 +225,7 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe, pixel_size;
- u32 dvscntr, dvsscale = 0;
+ u32 dvscntr, dvsscale;
dvscntr = I915_READ(DVSCNTR(pipe));
@@ -262,8 +269,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (obj->tiling_mode != I915_TILING_NONE)
dvscntr |= DVS_TILED;
- /* must disable */
- dvscntr |= DVS_TRICKLE_FEED_DISABLE;
+ if (IS_GEN6(dev))
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
/* Sizes are 0 based */
@@ -274,7 +281,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
- if (crtc_w != src_w || crtc_h != src_h)
+ dvsscale = 0;
+ if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -290,12 +298,12 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
POSTING_READ(DVSSURF(pipe));
}
static void
-snb_disable_plane(struct drm_plane *plane)
+ilk_disable_plane(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -306,7 +314,7 @@ snb_disable_plane(struct drm_plane *plane)
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
- I915_WRITE(DVSSURF(pipe), 0);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
}
@@ -333,7 +341,7 @@ intel_disable_primary(struct drm_crtc *crtc)
}
static int
-snb_update_colorkey(struct drm_plane *plane,
+ilk_update_colorkey(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
@@ -362,7 +370,7 @@ snb_update_colorkey(struct drm_plane *plane,
}
static void
-snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -550,14 +558,13 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *set = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
- if (!dev_priv)
- return -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
@@ -584,14 +591,13 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *get = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
- if (!dev_priv)
- return -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
mutex_lock(&dev->mode_config.mutex);
@@ -616,6 +622,14 @@ static const struct drm_plane_funcs intel_plane_funcs = {
.destroy = intel_destroy_plane,
};
+static uint32_t ilk_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
static uint32_t snb_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
@@ -630,34 +644,56 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
{
struct intel_plane *intel_plane;
unsigned long possible_crtcs;
+ const uint32_t *plane_formats;
+ int num_plane_formats;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
if (!intel_plane)
return -ENOMEM;
- if (IS_GEN6(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 5:
+ case 6:
intel_plane->max_downscale = 16;
- intel_plane->update_plane = snb_update_plane;
- intel_plane->disable_plane = snb_disable_plane;
- intel_plane->update_colorkey = snb_update_colorkey;
- intel_plane->get_colorkey = snb_get_colorkey;
- } else if (IS_GEN7(dev)) {
+ intel_plane->update_plane = ilk_update_plane;
+ intel_plane->disable_plane = ilk_disable_plane;
+ intel_plane->update_colorkey = ilk_update_colorkey;
+ intel_plane->get_colorkey = ilk_get_colorkey;
+
+ if (IS_GEN6(dev)) {
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ plane_formats = ilk_plane_formats;
+ num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
+ }
+ break;
+
+ case 7:
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->update_colorkey = ivb_update_colorkey;
intel_plane->get_colorkey = ivb_get_colorkey;
+
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ break;
+
+ default:
+ return -ENODEV;
}
intel_plane->pipe = pipe;
possible_crtcs = (1 << pipe);
ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs, snb_plane_formats,
- ARRAY_SIZE(snb_plane_formats), false);
+ &intel_plane_funcs,
+ plane_formats, num_plane_formats,
+ false);
if (ret)
kfree(intel_plane);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 05f765ef5464..3346612d2953 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -811,7 +811,7 @@ intel_tv_mode_lookup(const char *tv_format)
{
int i;
- for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
const struct tv_mode *tv_mode = &tv_modes[i];
if (!strcmp(tv_format, tv_mode->name))
@@ -1153,6 +1153,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
DAC_B_0_7_V |
DAC_C_0_7_V);
+
+ /*
+ * The TV sense state should be cleared to zero on cantiga platform. Otherwise
+ * the TV is misdetected. This is hardware requirement.
+ */
+ if (IS_GM45(dev))
+ tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
+
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
@@ -1240,11 +1249,8 @@ intel_tv_detect(struct drm_connector *connector, bool force)
int type;
mode = reported_modes[0];
- drm_mode_set_crtcinfo(&mode, 0);
- if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
- type = intel_tv_detect_type(intel_tv, connector);
- } else if (force) {
+ if (force) {
struct intel_load_detect_pipe tmp;
if (intel_get_load_detect_pipe(&intel_tv->base, connector,
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
new file mode 100644
index 000000000000..d63013497f66
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -0,0 +1,15 @@
+config DRM_MGAG200
+ tristate "Kernel modesetting driver for MGA G200 server engines"
+ depends on DRM && PCI && EXPERIMENTAL
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ This is a KMS driver for the MGA G200 server chips, it
+ does not support the original MGA G200 or any of the desktop
+ chips. It requires 0.3.0 of the modesetting userspace driver,
+ and a version of mga driver that will fail on KMS enabled
+ devices.
+
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
new file mode 100644
index 000000000000..7db592eedbf1
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+mgag200-y := mgag200_main.o mgag200_mode.o \
+ mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o
+
+obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
new file mode 100644
index 000000000000..3c8e04f54713
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "mgag200_drv.h"
+
+#include "drm_pciids.h"
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+int mgag200_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, mgag200_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
+ { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
+ { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
+ { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
+ { PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
+ { PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int __devinit
+mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void mga_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static const struct file_operations mgag200_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = mgag200_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR,
+ .load = mgag200_driver_load,
+ .unload = mgag200_driver_unload,
+ .fops = &mgag200_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_init_object = mgag200_gem_init_object,
+ .gem_free_object = mgag200_gem_free_object,
+ .dumb_create = mgag200_dumb_create,
+ .dumb_map_offset = mgag200_dumb_mmap_offset,
+ .dumb_destroy = mgag200_dumb_destroy,
+};
+
+static struct pci_driver mgag200_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = mga_pci_probe,
+ .remove = mga_pci_remove,
+};
+
+static int __init mgag200_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && mgag200_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (mgag200_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &mgag200_pci_driver);
+}
+
+static void __exit mgag200_exit(void)
+{
+ drm_pci_exit(&driver, &mgag200_pci_driver);
+}
+
+module_init(mgag200_init);
+module_exit(mgag200_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
new file mode 100644
index 000000000000..6f13b3563234
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#ifndef __MGAG200_DRV_H__
+#define __MGAG200_DRV_H__
+
+#include <video/vga.h>
+
+#include "drm/drm_fb_helper.h"
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include "mgag200_reg.h"
+
+#define DRIVER_AUTHOR "Matthew Garrett"
+
+#define DRIVER_NAME "mgag200"
+#define DRIVER_DESC "MGA G200 SE"
+#define DRIVER_DATE "20110418"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define MGAG200FB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)mdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)mdev->rmmio) + (reg))
+
+#define ATTR_INDEX 0x1fc0
+#define ATTR_DATA 0x1fc1
+
+#define WREG_ATTR(reg, v) \
+ do { \
+ RREG8(0x1fda); \
+ WREG8(ATTR_INDEX, reg); \
+ WREG8(ATTR_DATA, v); \
+ } while (0) \
+
+#define WREG_SEQ(reg, v) \
+ do { \
+ WREG8(MGAREG_SEQ_INDEX, reg); \
+ WREG8(MGAREG_SEQ_DATA, v); \
+ } while (0) \
+
+#define WREG_CRT(reg, v) \
+ do { \
+ WREG8(MGAREG_CRTC_INDEX, reg); \
+ WREG8(MGAREG_CRTC_DATA, v); \
+ } while (0) \
+
+
+#define WREG_ECRT(reg, v) \
+ do { \
+ WREG8(MGAREG_CRTCEXT_INDEX, reg); \
+ WREG8(MGAREG_CRTCEXT_DATA, v); \
+ } while (0) \
+
+#define GFX_INDEX 0x1fce
+#define GFX_DATA 0x1fcf
+
+#define WREG_GFX(reg, v) \
+ do { \
+ WREG8(GFX_INDEX, reg); \
+ WREG8(GFX_DATA, v); \
+ } while (0) \
+
+#define DAC_INDEX 0x3c00
+#define DAC_DATA 0x3c0a
+
+#define WREG_DAC(reg, v) \
+ do { \
+ WREG8(DAC_INDEX, reg); \
+ WREG8(DAC_DATA, v); \
+ } while (0) \
+
+#define MGA_MISC_OUT 0x1fc2
+#define MGA_MISC_IN 0x1fcc
+
+#define MGAG200_MAX_FB_HEIGHT 4096
+#define MGAG200_MAX_FB_WIDTH 4096
+
+#define MATROX_DPMS_CLEARED (-1)
+
+#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
+#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
+#define to_mga_connector(x) container_of(x, struct mga_connector, base)
+#define to_mga_framebuffer(x) container_of(x, struct mga_framebuffer, base)
+
+struct mga_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct mga_fbdev {
+ struct drm_fb_helper helper;
+ struct mga_framebuffer mfb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+ struct ttm_bo_kmap_obj mapping;
+};
+
+struct mga_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ int last_dpms;
+ bool enabled;
+};
+
+struct mga_mode_info {
+ bool mode_config_initialized;
+ struct mga_crtc *crtc;
+};
+
+struct mga_encoder {
+ struct drm_encoder base;
+ int last_dpms;
+};
+
+
+struct mga_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ struct i2c_algo_bit_data bit;
+ int data, clock;
+};
+
+struct mga_connector {
+ struct drm_connector base;
+ struct mga_i2c_chan *i2c;
+};
+
+
+struct mga_mc {
+ resource_size_t vram_size;
+ resource_size_t vram_base;
+ resource_size_t vram_window;
+};
+
+enum mga_type {
+ G200_SE_A,
+ G200_SE_B,
+ G200_WB,
+ G200_EV,
+ G200_EH,
+ G200_ER,
+};
+
+#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
+
+struct mga_device {
+ struct drm_device *dev;
+ unsigned long flags;
+
+ resource_size_t rmmio_base;
+ resource_size_t rmmio_size;
+ void __iomem *rmmio;
+
+ drm_local_map_t *framebuffer;
+
+ struct mga_mc mc;
+ struct mga_mode_info mode_info;
+
+ struct mga_fbdev *mfbdev;
+
+ bool suspended;
+ int num_crtc;
+ enum mga_type type;
+ int has_sdram;
+ struct drm_display_mode mode;
+
+ int bpp_shifts[4];
+
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ atomic_t validate_sequence;
+ } ttm;
+
+ u32 reg_1e24; /* SE model number */
+};
+
+
+struct mgag200_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem)
+
+static inline struct mgag200_bo *
+mgag200_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct mgag200_bo, bo);
+}
+ /* mga_crtc.c */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+ /* mgag200_mode.c */
+int mgag200_modeset_init(struct mga_device *mdev);
+void mgag200_modeset_fini(struct mga_device *mdev);
+
+ /* mga_fbdev.c */
+int mgag200_fbdev_init(struct mga_device *mdev);
+void mgag200_fbdev_fini(struct mga_device *mdev);
+
+ /* mgag200_main.c */
+int mgag200_framebuffer_init(struct drm_device *dev,
+ struct mga_framebuffer *mfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
+int mgag200_driver_unload(struct drm_device *dev);
+int mgag200_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+int mgag200_gem_init_object(struct drm_gem_object *obj);
+int mgag200_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int mgag200_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+void mgag200_gem_free_object(struct drm_gem_object *obj);
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+ /* mga_i2c.c */
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
+
+int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait);
+void mgag200_bo_unreserve(struct mgag200_bo *bo);
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct mgag200_bo **pastbo);
+int mgag200_mm_init(struct mga_device *mdev);
+void mgag200_mm_fini(struct mga_device *mdev);
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int mgag200_bo_unpin(struct mgag200_bo *bo);
+int mgag200_bo_push_sysram(struct mgag200_bo *bo);
+#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
new file mode 100644
index 000000000000..880d3369760e
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_fb_helper.h"
+
+#include <linux/fb.h>
+
+#include "mgag200_drv.h"
+
+static void mga_dirty_update(struct mga_fbdev *mfbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct mgag200_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
+ int ret;
+ bool unmap = false;
+
+ obj = mfbdev->mfb.obj;
+ bo = gem_to_mga_bo(obj);
+
+ ret = mgag200_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("failed to reserve fb bo\n");
+ return;
+ }
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ mgag200_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i < y + height; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ mgag200_bo_unreserve(bo);
+}
+
+static void mga_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_fillrect(info, rect);
+ mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void mga_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_copyarea(info, area);
+ mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void mga_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_imageblit(info, image);
+ mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+
+static struct fb_ops mgag200fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = mga_fillrect,
+ .fb_copyarea = mga_copyarea,
+ .fb_imageblit = mga_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int mgag200fb_create_object(struct mga_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = mgag200_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int mgag200fb_create(struct mga_fbdev *mfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = mfbdev->helper.dev;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct mga_device *mdev = dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *gobj = NULL;
+ struct device *device = &dev->pdev->dev;
+ struct mgag200_bo *bo;
+ int ret;
+ void *sysram;
+ int size;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+ bo = gem_to_mga_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (info == NULL)
+ return -ENOMEM;
+
+ info->par = mfbdev;
+
+ ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ mfbdev->sysram = sysram;
+ mfbdev->size = size;
+
+ fb = &mfbdev->mfb.base;
+
+ /* setup helper */
+ mfbdev->helper.fb = fb;
+ mfbdev->helper.fbdev = info;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ strcpy(info->fix.id, "mgadrmfb");
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &mgag200fb_ops;
+
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = mdev->mc.vram_size;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n",
+ fb->width, fb->height);
+ return 0;
+out:
+ return ret;
+}
+
+static int mga_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size
+ *sizes)
+{
+ struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = mgag200fb_create(mfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static int mga_fbdev_destroy(struct drm_device *dev,
+ struct mga_fbdev *mfbdev)
+{
+ struct fb_info *info;
+ struct mga_framebuffer *mfb = &mfbdev->mfb;
+
+ if (mfbdev->helper.fbdev) {
+ info = mfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (mfb->obj) {
+ drm_gem_object_unreference_unlocked(mfb->obj);
+ mfb->obj = NULL;
+ }
+ drm_fb_helper_fini(&mfbdev->helper);
+ vfree(mfbdev->sysram);
+ drm_framebuffer_cleanup(&mfb->base);
+
+ return 0;
+}
+
+static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
+ .gamma_set = mga_crtc_fb_gamma_set,
+ .gamma_get = mga_crtc_fb_gamma_get,
+ .fb_probe = mga_fb_find_or_create_single,
+};
+
+int mgag200_fbdev_init(struct mga_device *mdev)
+{
+ struct mga_fbdev *mfbdev;
+ int ret;
+
+ mfbdev = kzalloc(sizeof(struct mga_fbdev), GFP_KERNEL);
+ if (!mfbdev)
+ return -ENOMEM;
+
+ mdev->mfbdev = mfbdev;
+ mfbdev->helper.funcs = &mga_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
+ mdev->num_crtc, MGAG200FB_CONN_LIMIT);
+ if (ret) {
+ kfree(mfbdev);
+ return ret;
+ }
+ drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+ drm_fb_helper_initial_config(&mfbdev->helper, 32);
+
+ return 0;
+}
+
+void mgag200_fbdev_fini(struct mga_device *mdev)
+{
+ if (!mdev->mfbdev)
+ return;
+
+ mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
+ kfree(mdev->mfbdev);
+ mdev->mfbdev = NULL;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
new file mode 100644
index 000000000000..dd3568a1b6b0
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "mgag200_drv.h"
+
+static int mga_i2c_read_gpio(struct mga_device *mdev)
+{
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ return RREG8(DAC_DATA);
+}
+
+static void mga_i2c_set_gpio(struct mga_device *mdev, int mask, int val)
+{
+ int tmp;
+
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+ tmp = (RREG8(DAC_DATA) & mask) | val;
+ WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+ WREG_DAC(MGA1064_GEN_IO_DATA, 0);
+}
+
+static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
+{
+ if (state)
+ state = 0;
+ else
+ state = mask;
+ mga_i2c_set_gpio(mdev, ~mask, state);
+}
+
+static void mga_gpio_setsda(void *data, int state)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ mga_i2c_set(mdev, i2c->data, state);
+}
+
+static void mga_gpio_setscl(void *data, int state)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ mga_i2c_set(mdev, i2c->clock, state);
+}
+
+static int mga_gpio_getsda(void *data)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
+}
+
+static int mga_gpio_getscl(void *data)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
+}
+
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mga_i2c_chan *i2c;
+ int ret;
+ int data, clock;
+
+ WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
+ WREG_DAC(MGA1064_GEN_IO_CTL, 0);
+
+ switch (mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ case G200_EV:
+ case G200_WB:
+ data = 1;
+ clock = 2;
+ break;
+ case G200_EH:
+ case G200_ER:
+ data = 2;
+ clock = 1;
+ break;
+ default:
+ data = 2;
+ clock = 8;
+ break;
+ }
+
+ i2c = kzalloc(sizeof(struct mga_i2c_chan), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+
+ i2c->data = data;
+ i2c->clock = clock;
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c");
+
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 10;
+ i2c->bit.timeout = 2;
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = mga_gpio_setsda;
+ i2c->bit.setscl = mga_gpio_setscl;
+ i2c->bit.getsda = mga_gpio_getsda;
+ i2c->bit.getscl = mga_gpio_getscl;
+
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ kfree(i2c);
+ i2c = NULL;
+ }
+ return i2c;
+}
+
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
new file mode 100644
index 000000000000..636a81cd2f37
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+#include "mgag200_drv.h"
+
+static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
+ if (mga_fb->obj)
+ drm_gem_object_unreference_unlocked(mga_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static int mga_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs mga_fb_funcs = {
+ .destroy = mga_user_framebuffer_destroy,
+ .create_handle = mga_user_framebuffer_create_handle,
+};
+
+int mgag200_framebuffer_init(struct drm_device *dev,
+ struct mga_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ return 0;
+}
+
+static struct drm_framebuffer *
+mgag200_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct mga_framebuffer *mga_fb;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
+ if (!mga_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(mga_fb);
+ return ERR_PTR(ret);
+ }
+ return &mga_fb->base;
+}
+
+static const struct drm_mode_config_funcs mga_mode_funcs = {
+ .fb_create = mgag200_user_framebuffer_create,
+};
+
+/* Unmap the framebuffer from the core and release the memory */
+static void mga_vram_fini(struct mga_device *mdev)
+{
+ pci_iounmap(mdev->dev->pdev, mdev->rmmio);
+ mdev->rmmio = NULL;
+ if (mdev->mc.vram_base)
+ release_mem_region(mdev->mc.vram_base, mdev->mc.vram_window);
+}
+
+static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
+{
+ int offset;
+ int orig;
+ int test1, test2;
+ int orig1, orig2;
+
+ /* Probe */
+ orig = ioread16(mem);
+ iowrite16(0, mem);
+
+ for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) {
+ orig1 = ioread8(mem + offset);
+ orig2 = ioread8(mem + offset + 0x100);
+
+ iowrite16(0xaa55, mem + offset);
+ iowrite16(0xaa55, mem + offset + 0x100);
+
+ test1 = ioread16(mem + offset);
+ test2 = ioread16(mem);
+
+ iowrite16(orig1, mem + offset);
+ iowrite16(orig2, mem + offset + 0x100);
+
+ if (test1 != 0xaa55) {
+ break;
+ }
+
+ if (test2) {
+ break;
+ }
+ }
+
+ iowrite16(orig, mem);
+ return offset - 65536;
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int mga_vram_init(struct mga_device *mdev)
+{
+ void __iomem *mem;
+ struct apertures_struct *aper = alloc_apertures(1);
+
+ /* BAR 0 is VRAM */
+ mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
+ mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
+
+ aper->ranges[0].base = mdev->mc.vram_base;
+ aper->ranges[0].size = mdev->mc.vram_window;
+ aper->count = 1;
+
+ remove_conflicting_framebuffers(aper, "mgafb", true);
+
+ if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
+ "mgadrmfb_vram")) {
+ DRM_ERROR("can't reserve VRAM\n");
+ return -ENXIO;
+ }
+
+ mem = pci_iomap(mdev->dev->pdev, 0, 0);
+
+ mdev->mc.vram_size = mga_probe_vram(mdev, mem);
+
+ pci_iounmap(mdev->dev->pdev, mem);
+
+ return 0;
+}
+
+static int mgag200_device_init(struct drm_device *dev,
+ uint32_t flags)
+{
+ struct mga_device *mdev = dev->dev_private;
+ int ret, option;
+
+ mdev->type = flags;
+
+ /* Hardcode the number of CRTCs to 1 */
+ mdev->num_crtc = 1;
+
+ pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
+ mdev->has_sdram = !(option & (1 << 14));
+
+ /* BAR 0 is the framebuffer, BAR 1 contains registers */
+ mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
+ mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
+
+ if (!request_mem_region(mdev->rmmio_base, mdev->rmmio_size,
+ "mgadrmfb_mmio")) {
+ DRM_ERROR("can't reserve mmio registers\n");
+ return -ENOMEM;
+ }
+
+ mdev->rmmio = pci_iomap(dev->pdev, 1, 0);
+ if (mdev->rmmio == NULL)
+ return -ENOMEM;
+
+ /* stash G200 SE model number for later use */
+ if (IS_G200_SE(mdev))
+ mdev->reg_1e24 = RREG32(0x1e24);
+
+ ret = mga_vram_init(mdev);
+ if (ret) {
+ release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
+ return ret;
+ }
+
+ mdev->bpp_shifts[0] = 0;
+ mdev->bpp_shifts[1] = 1;
+ mdev->bpp_shifts[2] = 0;
+ mdev->bpp_shifts[3] = 2;
+ return 0;
+}
+
+void mgag200_device_fini(struct mga_device *mdev)
+{
+ release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
+ mga_vram_fini(mdev);
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct mga_device *mdev;
+ int r;
+
+ mdev = kzalloc(sizeof(struct mga_device), GFP_KERNEL);
+ if (mdev == NULL)
+ return -ENOMEM;
+ dev->dev_private = (void *)mdev;
+ mdev->dev = dev;
+
+ r = mgag200_device_init(dev, flags);
+ if (r) {
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+ goto out;
+ }
+ r = mgag200_mm_init(mdev);
+ if (r)
+ goto out;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.funcs = (void *)&mga_mode_funcs;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ r = mgag200_modeset_init(mdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+out:
+ if (r)
+ mgag200_driver_unload(dev);
+ return r;
+}
+
+int mgag200_driver_unload(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+
+ if (mdev == NULL)
+ return 0;
+ mgag200_modeset_fini(mdev);
+ mgag200_fbdev_fini(mdev);
+ drm_mode_config_cleanup(dev);
+ mgag200_mm_fini(mdev);
+ mgag200_device_fini(mdev);
+ kfree(mdev);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int mgag200_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct mgag200_bo *astbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = mgag200_bo_create(dev, size, 0, 0, &astbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &astbo->gem;
+ return 0;
+}
+
+int mgag200_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = mgag200_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int mgag200_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+int mgag200_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
+ return 0;
+}
+
+void mgag200_bo_unref(struct mgag200_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+
+void mgag200_gem_free_object(struct drm_gem_object *obj)
+{
+ struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj);
+
+ if (!mgag200_bo)
+ return;
+ mgag200_bo_unref(&mgag200_bo);
+}
+
+
+static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
+{
+ return bo->bo.addr_space_offset;
+}
+
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct mgag200_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_mga_bo(obj);
+ *offset = mgag200_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
new file mode 100644
index 000000000000..d303061b251e
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -0,0 +1,1533 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+
+#include <linux/delay.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "mgag200_drv.h"
+
+#define MGAG200_LUT_SIZE 256
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void mga_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+
+ for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+ /* VGA registers */
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_b[i]);
+ }
+}
+
+static inline void mga_wait_vsync(struct mga_device *mdev)
+{
+ unsigned int count = 0;
+ unsigned int status = 0;
+
+ do {
+ status = RREG32(MGAREG_Status);
+ count++;
+ } while ((status & 0x08) && (count < 250000));
+ count = 0;
+ status = 0;
+ do {
+ status = RREG32(MGAREG_Status);
+ count++;
+ } while (!(status & 0x08) && (count < 250000));
+}
+
+static inline void mga_wait_busy(struct mga_device *mdev)
+{
+ unsigned int count = 0;
+ unsigned int status = 0;
+ do {
+ status = RREG8(MGAREG_Status + 2);
+ count++;
+ } while ((status & 0x01) && (count < 500000));
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+
+ m = n = p = 0;
+ vcomax = 320000;
+ vcomin = 160000;
+ pllreffreq = 25000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 8; testp > 0; testp /= 2) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 17; testn < 256; testn++) {
+ for (testm = 1; testm < 32; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm - 1;
+ n = testn - 1;
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ if (delta > permitteddelta) {
+ printk(KERN_WARNING "PLL delta too large\n");
+ return 1;
+ }
+
+ WREG_DAC(MGA1064_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_PIX_PLLC_P, p);
+ return 0;
+}
+
+static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int i, j, tmpcount, vcount;
+ bool pll_locked = false;
+ u8 tmp;
+
+ m = n = p = 0;
+ vcomax = 550000;
+ vcomin = 150000;
+ pllreffreq = 48000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 1; testp < 9; testp++) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 17; testm++) {
+ for (testn = 1; testn < 151; testn++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = (testm - 1) | ((n >> 1) & 0x80);
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ if (i > 0) {
+ WREG8(MGAREG_CRTC_INDEX, 0x1e);
+ tmp = RREG8(MGAREG_CRTC_DATA);
+ if (tmp < 0xff)
+ WREG8(MGAREG_CRTC_DATA, tmp+1);
+ }
+
+ /* set pixclkdis to 1 */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+ /* select PLL Set C */
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ /* reset the PLL */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x04;
+ WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+ udelay(50);
+
+ /* program pixel pll register */
+ WREG_DAC(MGA1064_WB_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ /* turn pll on */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x04;
+ WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+ udelay(500);
+
+ /* select the pixel pll */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
+ tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+ /* reset dotclock rate bit */
+ WREG8(MGAREG_SEQ_INDEX, 1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+ tmp &= ~0x8;
+ WREG8(MGAREG_SEQ_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+ return 0;
+}
+
+static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ u8 tmp;
+
+ m = n = p = 0;
+ vcomax = 550000;
+ vcomin = 150000;
+ pllreffreq = 50000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 16; testp > 0; testp--) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 1; testn < 257; testn++) {
+ for (testm = 1; testm < 17; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = testm - 1;
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3 << 2);
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ return 0;
+}
+
+static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int i, j, tmpcount, vcount;
+ u8 tmp;
+ bool pll_locked = false;
+
+ m = n = p = 0;
+ vcomax = 800000;
+ vcomin = 400000;
+ pllreffreq = 3333;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 16; testp > 0; testp--) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 33; testm++) {
+ for (testn = 1; testn < 257; testn++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = (testm - 1) | ((n >> 1) & 0x80);
+ p = testp - 1;
+ }
+ if ((clock * testp) >= 600000)
+ p |= 80;
+ }
+ }
+ }
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_EH_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_P, p);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+
+ return 0;
+}
+
+static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta;
+ unsigned int testr, testn, testm, testo;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int tmp;
+
+ m = n = p = 0;
+ vcomax = 1488000;
+ vcomin = 1056000;
+ pllreffreq = 48000;
+
+ delta = 0xffffffff;
+
+ for (testr = 0; testr < 4; testr++) {
+ if (delta == 0)
+ break;
+ for (testn = 5; testn < 129; testn++) {
+ if (delta == 0)
+ break;
+ for (testm = 3; testm >= 0; testm--) {
+ if (delta == 0)
+ break;
+ for (testo = 5; testo < 33; testo++) {
+ computed = pllreffreq * (testn + 1) /
+ (testr + 1);
+ if (computed < vcomin)
+ continue;
+ if (computed > vcomax)
+ continue;
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm | (testo << 3);
+ n = testn;
+ p = testr | (testr << 3);
+ }
+ }
+ }
+ }
+ }
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3<<2) | 0xc0;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_ER_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ return 0;
+}
+
+static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
+{
+ switch(mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ return mga_g200se_set_plls(mdev, clock);
+ break;
+ case G200_WB:
+ return mga_g200wb_set_plls(mdev, clock);
+ break;
+ case G200_EV:
+ return mga_g200ev_set_plls(mdev, clock);
+ break;
+ case G200_EH:
+ return mga_g200eh_set_plls(mdev, clock);
+ break;
+ case G200_ER:
+ return mga_g200er_set_plls(mdev, clock);
+ break;
+ }
+ return 0;
+}
+
+static void mga_g200wb_prepare(struct drm_crtc *crtc)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ u8 tmp;
+ int iter_max;
+
+ /* 1- The first step is to warn the BMC of an upcoming mode change.
+ * We are putting the misc<0> to output.*/
+
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+
+ /* we are putting a 1 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+
+ /* 2- Second step to mask and further scan request
+ * This will be done by asserting the remfreqmsk bit (XSPAREREG<7>)
+ */
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x80;
+ WREG_DAC(MGA1064_SPAREREG, tmp);
+
+ /* 3a- the third step is to verifu if there is an active scan
+ * We are searching for a 0 on remhsyncsts <XSPAREREG<0>)
+ */
+ iter_max = 300;
+ while (!(tmp & 0x1) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+
+ /* 3b- this step occurs only if the remove is actually scanning
+ * we are waiting for the end of the frame which is a 1 on
+ * remvsyncsts (XSPAREREG<1>)
+ */
+ if (iter_max) {
+ iter_max = 300;
+ while ((tmp & 0x2) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+ }
+}
+
+static void mga_g200wb_commit(struct drm_crtc *crtc)
+{
+ u8 tmp;
+ struct mga_device *mdev = crtc->dev->dev_private;
+
+ /* 1- The first step is to ensure that the vrsten and hrsten are set */
+ WREG8(MGAREG_CRTCEXT_INDEX, 1);
+ tmp = RREG8(MGAREG_CRTCEXT_DATA);
+ WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
+
+ /* 2- second step is to assert the rstlvl2 */
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x8;
+ WREG8(DAC_DATA, tmp);
+
+ /* wait 10 us */
+ udelay(10);
+
+ /* 3- deassert rstlvl2 */
+ tmp &= ~0x08;
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ WREG8(DAC_DATA, tmp);
+
+ /* 4- remove mask of scan request */
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x80;
+ WREG8(DAC_DATA, tmp);
+
+ /* 5- put back a 0 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+}
+
+
+void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ u32 addr;
+ int count;
+
+ while (RREG8(0x1fda) & 0x08);
+ while (!(RREG8(0x1fda) & 0x08));
+
+ count = RREG8(MGAREG_VCOUNT) + 2;
+ while (RREG8(MGAREG_VCOUNT) < count);
+
+ addr = offset >> 2;
+ WREG_CRT(0x0d, (u8)(addr & 0xff));
+ WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
+ WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf);
+}
+
+
+/* ast is different - we will force move buffers out of VRAM */
+static int mga_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct mga_framebuffer *mga_fb;
+ struct mgag200_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ mga_fb = to_mga_framebuffer(fb);
+ obj = mga_fb->obj;
+ bo = gem_to_mga_bo(obj);
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ mgag200_bo_push_sysram(bo);
+ mgag200_bo_unreserve(bo);
+ }
+
+ mga_fb = to_mga_framebuffer(crtc->fb);
+ obj = mga_fb->obj;
+ bo = gem_to_mga_bo(obj);
+
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = mgag200_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ mgag200_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&mdev->mfbdev->mfb == mga_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+
+ }
+ mgag200_bo_unreserve(bo);
+
+ DRM_INFO("mga base %llx\n", gpu_addr);
+
+ mga_set_start_address(crtc, (u32)gpu_addr);
+
+ return 0;
+}
+
+static int mga_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int mga_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ int hdisplay, hsyncstart, hsyncend, htotal;
+ int vdisplay, vsyncstart, vsyncend, vtotal;
+ int pitch;
+ int option = 0, option2 = 0;
+ int i;
+ unsigned char misc = 0;
+ unsigned char ext_vga[6];
+ unsigned char ext_vga_index24;
+ unsigned char dac_index90 = 0;
+ u8 bppshift;
+
+ static unsigned char dacvalue[] = {
+ /* 0x00: */ 0, 0, 0, 0, 0, 0, 0x00, 0,
+ /* 0x08: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x10: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x18: */ 0x00, 0, 0xC9, 0xFF, 0xBF, 0x20, 0x1F, 0x20,
+ /* 0x20: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x28: */ 0x00, 0x00, 0x00, 0x00, 0, 0, 0, 0x40,
+ /* 0x30: */ 0x00, 0xB0, 0x00, 0xC2, 0x34, 0x14, 0x02, 0x83,
+ /* 0x38: */ 0x00, 0x93, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3A,
+ /* 0x40: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0
+ };
+
+ bppshift = mdev->bpp_shifts[(crtc->fb->bits_per_pixel >> 3) - 1];
+
+ switch (mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ dacvalue[MGA1064_VREF_CTL] = 0x03;
+ dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_DAC_EN |
+ MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ if (mdev->has_sdram)
+ option = 0x40049120;
+ else
+ option = 0x4004d120;
+ option2 = 0x00008000;
+ break;
+ case G200_WB:
+ dacvalue[MGA1064_VREF_CTL] = 0x07;
+ option = 0x41049120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EV:
+ dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EH:
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ case G200_ER:
+ dac_index90 = 0;
+ break;
+ }
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits;
+ else
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits;
+ break;
+ case 24:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_24bits;
+ break;
+ case 32:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_32_24bits;
+ break;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ misc |= 0x40;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ misc |= 0x80;
+
+
+ for (i = 0; i < sizeof(dacvalue); i++) {
+ if ((i <= 0x03) ||
+ (i == 0x07) ||
+ (i == 0x0b) ||
+ (i == 0x0f) ||
+ ((i >= 0x13) && (i <= 0x17)) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)))
+ continue;
+ if (IS_G200_SE(mdev) &&
+ ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
+ continue;
+ if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) &&
+ (i >= 0x44) && (i <= 0x4e))
+ continue;
+
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ if (mdev->type == G200_ER) {
+ WREG_DAC(0x90, dac_index90);
+ }
+
+
+ if (option)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
+ if (option2)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
+
+ WREG_SEQ(2, 0xf);
+ WREG_SEQ(3, 0);
+ WREG_SEQ(4, 0xe);
+
+ pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
+ if (crtc->fb->bits_per_pixel == 24)
+ pitch = pitch >> (4 - bppshift);
+ else
+ pitch = pitch >> (4 - bppshift);
+
+ hdisplay = mode->hdisplay / 8 - 1;
+ hsyncstart = mode->hsync_start / 8 - 1;
+ hsyncend = mode->hsync_end / 8 - 1;
+ htotal = mode->htotal / 8 - 1;
+
+ /* Work around hardware quirk */
+ if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04)
+ htotal++;
+
+ vdisplay = mode->vdisplay - 1;
+ vsyncstart = mode->vsync_start - 1;
+ vsyncend = mode->vsync_end - 1;
+ vtotal = mode->vtotal - 2;
+
+ WREG_GFX(0, 0);
+ WREG_GFX(1, 0);
+ WREG_GFX(2, 0);
+ WREG_GFX(3, 0);
+ WREG_GFX(4, 0);
+ WREG_GFX(5, 0x40);
+ WREG_GFX(6, 0x5);
+ WREG_GFX(7, 0xf);
+ WREG_GFX(8, 0xf);
+
+ WREG_CRT(0, htotal - 4);
+ WREG_CRT(1, hdisplay);
+ WREG_CRT(2, hdisplay);
+ WREG_CRT(3, (htotal & 0x1F) | 0x80);
+ WREG_CRT(4, hsyncstart);
+ WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F));
+ WREG_CRT(6, vtotal & 0xFF);
+ WREG_CRT(7, ((vtotal & 0x100) >> 8) |
+ ((vdisplay & 0x100) >> 7) |
+ ((vsyncstart & 0x100) >> 6) |
+ ((vdisplay & 0x100) >> 5) |
+ ((vdisplay & 0x100) >> 4) | /* linecomp */
+ ((vtotal & 0x200) >> 4)|
+ ((vdisplay & 0x200) >> 3) |
+ ((vsyncstart & 0x200) >> 2));
+ WREG_CRT(9, ((vdisplay & 0x200) >> 4) |
+ ((vdisplay & 0x200) >> 3));
+ WREG_CRT(10, 0);
+ WREG_CRT(11, 0);
+ WREG_CRT(12, 0);
+ WREG_CRT(13, 0);
+ WREG_CRT(14, 0);
+ WREG_CRT(15, 0);
+ WREG_CRT(16, vsyncstart & 0xFF);
+ WREG_CRT(17, (vsyncend & 0x0F) | 0x20);
+ WREG_CRT(18, vdisplay & 0xFF);
+ WREG_CRT(19, pitch & 0xFF);
+ WREG_CRT(20, 0);
+ WREG_CRT(21, vdisplay & 0xFF);
+ WREG_CRT(22, (vtotal + 1) & 0xFF);
+ WREG_CRT(23, 0xc3);
+ WREG_CRT(24, vdisplay & 0xFF);
+
+ ext_vga[0] = 0;
+ ext_vga[5] = 0;
+
+ /* TODO interlace */
+
+ ext_vga[0] |= (pitch & 0x300) >> 4;
+ ext_vga[1] = (((htotal - 4) & 0x100) >> 8) |
+ ((hdisplay & 0x100) >> 7) |
+ ((hsyncstart & 0x100) >> 6) |
+ (htotal & 0x40);
+ ext_vga[2] = ((vtotal & 0xc00) >> 10) |
+ ((vdisplay & 0x400) >> 8) |
+ ((vdisplay & 0xc00) >> 7) |
+ ((vsyncstart & 0xc00) >> 5) |
+ ((vdisplay & 0x400) >> 3);
+ if (crtc->fb->bits_per_pixel == 24)
+ ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80;
+ else
+ ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
+ ext_vga[4] = 0;
+ if (mdev->type == G200_WB)
+ ext_vga[1] |= 0x88;
+
+ ext_vga_index24 = 0x05;
+
+ /* Set pixel clocks */
+ misc = 0x2d;
+ WREG8(MGA_MISC_OUT, misc);
+
+ mga_crtc_set_plls(mdev, mode->clock);
+
+ for (i = 0; i < 6; i++) {
+ WREG_ECRT(i, ext_vga[i]);
+ }
+
+ if (mdev->type == G200_ER)
+ WREG_ECRT(24, ext_vga_index24);
+
+ if (mdev->type == G200_EV) {
+ WREG_ECRT(6, 0);
+ }
+
+ WREG_ECRT(0, ext_vga[0]);
+ /* Enable mga pixel clock */
+ misc = 0x2d;
+
+ WREG8(MGA_MISC_OUT, misc);
+
+ if (adjusted_mode)
+ memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
+
+ mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+
+ /* reset tagfifo */
+ if (mdev->type == G200_ER) {
+ u32 mem_ctl = RREG32(MGAREG_MEMCTL);
+ u8 seq1;
+
+ /* screen off */
+ WREG8(MGAREG_SEQ_INDEX, 0x01);
+ seq1 = RREG8(MGAREG_SEQ_DATA) | 0x20;
+ WREG8(MGAREG_SEQ_DATA, seq1);
+
+ WREG32(MGAREG_MEMCTL, mem_ctl | 0x00200000);
+ udelay(1000);
+ WREG32(MGAREG_MEMCTL, mem_ctl & ~0x00200000);
+
+ WREG8(MGAREG_SEQ_DATA, seq1 & ~0x20);
+ }
+
+
+ if (IS_G200_SE(mdev)) {
+ if (mdev->reg_1e24 >= 0x02) {
+ u8 hi_pri_lvl;
+ u32 bpp;
+ u32 mb;
+
+ if (crtc->fb->bits_per_pixel > 16)
+ bpp = 32;
+ else if (crtc->fb->bits_per_pixel > 8)
+ bpp = 16;
+ else
+ bpp = 8;
+
+ mb = (mode->clock * bpp) / 1000;
+ if (mb > 3100)
+ hi_pri_lvl = 0;
+ else if (mb > 2600)
+ hi_pri_lvl = 1;
+ else if (mb > 1900)
+ hi_pri_lvl = 2;
+ else if (mb > 1160)
+ hi_pri_lvl = 3;
+ else if (mb > 440)
+ hi_pri_lvl = 4;
+ else
+ hi_pri_lvl = 5;
+
+ WREG8(0x1fde, 0x06);
+ WREG8(0x1fdf, hi_pri_lvl);
+ } else {
+ if (mdev->reg_1e24 >= 0x01)
+ WREG8(0x1fdf, 0x03);
+ else
+ WREG8(0x1fdf, 0x04);
+ }
+ }
+ return 0;
+}
+
+#if 0 /* code from mjg to attempt D3 on crtc dpms off - revisit later */
+static int mga_suspend(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ int option;
+
+ if (mdev->suspended)
+ return 0;
+
+ WREG_SEQ(1, 0x20);
+ WREG_ECRT(1, 0x30);
+ /* Disable the pixel clock */
+ WREG_DAC(0x1a, 0x05);
+ /* Power down the DAC */
+ WREG_DAC(0x1e, 0x18);
+ /* Power down the pixel PLL */
+ WREG_DAC(0x1a, 0x0d);
+
+ /* Disable PLLs and clocks */
+ pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+ option &= ~(0x1F8024);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+ pci_set_power_state(pdev, PCI_D3hot);
+ pci_disable_device(pdev);
+
+ mdev->suspended = true;
+
+ return 0;
+}
+
+static int mga_resume(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ int option;
+
+ if (!mdev->suspended)
+ return 0;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_device(pdev);
+
+ /* Disable sysclk */
+ pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+ option &= ~(0x4);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+
+ mdev->suspended = false;
+
+ return 0;
+}
+
+#endif
+
+static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ u8 seq1 = 0, crtcext1 = 0;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ seq1 = 0;
+ crtcext1 = 0;
+ mga_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ seq1 = 0x20;
+ crtcext1 = 0x10;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ seq1 = 0x20;
+ crtcext1 = 0x20;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ seq1 = 0x20;
+ crtcext1 = 0x30;
+ break;
+ }
+
+#if 0
+ if (mode == DRM_MODE_DPMS_OFF) {
+ mga_suspend(crtc);
+ }
+#endif
+ WREG8(MGAREG_SEQ_INDEX, 0x01);
+ seq1 |= RREG8(MGAREG_SEQ_DATA) & ~0x20;
+ mga_wait_vsync(mdev);
+ mga_wait_busy(mdev);
+ WREG8(MGAREG_SEQ_DATA, seq1);
+ msleep(20);
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x01);
+ crtcext1 |= RREG8(MGAREG_CRTCEXT_DATA) & ~0x30;
+ WREG8(MGAREG_CRTCEXT_DATA, crtcext1);
+
+#if 0
+ if (mode == DRM_MODE_DPMS_ON && mdev->suspended == true) {
+ mga_resume(crtc);
+ drm_helper_resume_force_mode(dev);
+ }
+#endif
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void mga_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ u8 tmp;
+
+ /* mga_resume(crtc);*/
+
+ WREG8(MGAREG_CRTC_INDEX, 0x11);
+ tmp = RREG8(MGAREG_CRTC_DATA);
+ WREG_CRT(0x11, tmp | 0x80);
+
+ if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+ WREG_SEQ(0, 1);
+ msleep(50);
+ WREG_SEQ(1, 0x20);
+ msleep(20);
+ } else {
+ WREG8(MGAREG_SEQ_INDEX, 0x1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+
+ /* start sync reset */
+ WREG_SEQ(0, 1);
+ WREG_SEQ(1, tmp | 0x20);
+ }
+
+ if (mdev->type == G200_WB)
+ mga_g200wb_prepare(crtc);
+
+ WREG_CRT(17, 0);
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void mga_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ u8 tmp;
+
+ if (mdev->type == G200_WB)
+ mga_g200wb_commit(crtc);
+
+ if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+ msleep(50);
+ WREG_SEQ(1, 0x0);
+ msleep(20);
+ WREG_SEQ(0, 0x3);
+ } else {
+ WREG8(MGAREG_SEQ_INDEX, 0x1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+
+ tmp &= ~0x20;
+ WREG_SEQ(0x1, tmp);
+ WREG_SEQ(0, 3);
+ }
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
+ int i;
+
+ for (i = start; i < end; i++) {
+ mga_crtc->lut_r[i] = red[i] >> 8;
+ mga_crtc->lut_g[i] = green[i] >> 8;
+ mga_crtc->lut_b[i] = blue[i] >> 8;
+ }
+ mga_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void mga_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(mga_crtc);
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs mga_crtc_funcs = {
+ .gamma_set = mga_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = mga_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs mga_helper_funcs = {
+ .dpms = mga_crtc_dpms,
+ .mode_fixup = mga_crtc_mode_fixup,
+ .mode_set = mga_crtc_mode_set,
+ .mode_set_base = mga_crtc_mode_set_base,
+ .prepare = mga_crtc_prepare,
+ .commit = mga_crtc_commit,
+ .load_lut = mga_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void mga_crtc_init(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mga_crtc *mga_crtc;
+ int i;
+
+ mga_crtc = kzalloc(sizeof(struct mga_crtc) +
+ (MGAG200FB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+
+ if (mga_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
+ mdev->mode_info.crtc = mga_crtc;
+
+ for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+ mga_crtc->lut_r[i] = i;
+ mga_crtc->lut_g[i] = i;
+ mga_crtc->lut_b[i] = i;
+ }
+
+ drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ mga_crtc->lut_r[regno] = red >> 8;
+ mga_crtc->lut_g[regno] = green >> 8;
+ mga_crtc->lut_b[regno] = blue >> 8;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ *red = (u16)mga_crtc->lut_r[regno] << 8;
+ *green = (u16)mga_crtc->lut_g[regno] << 8;
+ *blue = (u16)mga_crtc->lut_b[regno] << 8;
+}
+
+/*
+ * The encoder comes after the CRTC in the output pipeline, but before
+ * the connector. It's responsible for ensuring that the digital
+ * stream is appropriately converted into the output format. Setup is
+ * very simple in this case - all we have to do is inform qemu of the
+ * colour depth in order to ensure that it displays appropriately
+ */
+
+/*
+ * These functions are analagous to those in the CRTC code, but are intended
+ * to handle any encoder-specific limitations
+ */
+static bool mga_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mga_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void mga_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+ return;
+}
+
+static void mga_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void mga_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+void mga_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mga_encoder);
+}
+
+static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
+ .dpms = mga_encoder_dpms,
+ .mode_fixup = mga_encoder_mode_fixup,
+ .mode_set = mga_encoder_mode_set,
+ .prepare = mga_encoder_prepare,
+ .commit = mga_encoder_commit,
+};
+
+static const struct drm_encoder_funcs mga_encoder_encoder_funcs = {
+ .destroy = mga_encoder_destroy,
+};
+
+static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct mga_encoder *mga_encoder;
+
+ mga_encoder = kzalloc(sizeof(struct mga_encoder), GFP_KERNEL);
+ if (!mga_encoder)
+ return NULL;
+
+ encoder = &mga_encoder->base;
+ encoder->possible_crtcs = 0x1;
+
+ drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
+
+ return encoder;
+}
+
+
+static int mga_vga_get_modes(struct drm_connector *connector)
+{
+ struct mga_connector *mga_connector = to_mga_connector(connector);
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, &mga_connector->i2c->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+ return ret;
+}
+
+static int mga_vga_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* FIXME: Add bandwidth and g200se limitations */
+
+ if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
+ mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
+ mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
+ mode->crtc_vsync_end > 4096 || mode->crtc_vtotal > 4096) {
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+struct drm_encoder *mga_connector_best_encoder(struct drm_connector
+ *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj =
+ drm_mode_object_find(connector->dev, enc_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static enum drm_connector_status mga_vga_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void mga_connector_destroy(struct drm_connector *connector)
+{
+ struct mga_connector *mga_connector = to_mga_connector(connector);
+ mgag200_i2c_destroy(mga_connector->i2c);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
+ .get_modes = mga_vga_get_modes,
+ .mode_valid = mga_vga_mode_valid,
+ .best_encoder = mga_connector_best_encoder,
+};
+
+struct drm_connector_funcs mga_vga_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = mga_vga_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = mga_connector_destroy,
+};
+
+static struct drm_connector *mga_vga_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct mga_connector *mga_connector;
+
+ mga_connector = kzalloc(sizeof(struct mga_connector), GFP_KERNEL);
+ if (!mga_connector)
+ return NULL;
+
+ connector = &mga_connector->base;
+
+ drm_connector_init(dev, connector,
+ &mga_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
+
+ mga_connector->i2c = mgag200_i2c_create(dev);
+ if (!mga_connector->i2c)
+ DRM_ERROR("failed to add ddc bus\n");
+
+ return connector;
+}
+
+
+int mgag200_modeset_init(struct mga_device *mdev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ mdev->mode_info.mode_config_initialized = true;
+
+ mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
+ mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
+
+ mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
+
+ mga_crtc_init(mdev->dev);
+
+ encoder = mga_encoder_init(mdev->dev);
+ if (!encoder) {
+ DRM_ERROR("mga_encoder_init failed\n");
+ return -1;
+ }
+
+ connector = mga_vga_init(mdev->dev);
+ if (!connector) {
+ DRM_ERROR("mga_vga_init failed\n");
+ return -1;
+ }
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ret = mgag200_fbdev_init(mdev);
+ if (ret) {
+ DRM_ERROR("mga_fbdev_init failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void mgag200_modeset_fini(struct mga_device *mdev)
+{
+
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
new file mode 100644
index 000000000000..fb24d8655feb
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -0,0 +1,661 @@
+/*
+ * MGA Millennium (MGA2064W) functions
+ * MGA Mystique (MGA1064SG) functions
+ *
+ * Copyright 1996 The XFree86 Project, Inc.
+ *
+ * Authors
+ * Dirk Hohndel
+ * hohndel@XFree86.Org
+ * David Dawes
+ * dawes@XFree86.Org
+ * Contributors:
+ * Guy DESBIEF, Aix-en-provence, France
+ * g.desbief@aix.pacwan.net
+ * MGA1064SG Mystique register file
+ */
+
+
+#ifndef _MGA_REG_H_
+#define _MGA_REG_H_
+
+#define MGAREG_DWGCTL 0x1c00
+#define MGAREG_MACCESS 0x1c04
+/* the following is a mystique only register */
+#define MGAREG_MCTLWTST 0x1c08
+#define MGAREG_ZORG 0x1c0c
+
+#define MGAREG_PAT0 0x1c10
+#define MGAREG_PAT1 0x1c14
+#define MGAREG_PLNWT 0x1c1c
+
+#define MGAREG_BCOL 0x1c20
+#define MGAREG_FCOL 0x1c24
+
+#define MGAREG_SRC0 0x1c30
+#define MGAREG_SRC1 0x1c34
+#define MGAREG_SRC2 0x1c38
+#define MGAREG_SRC3 0x1c3c
+
+#define MGAREG_XYSTRT 0x1c40
+#define MGAREG_XYEND 0x1c44
+
+#define MGAREG_SHIFT 0x1c50
+/* the following is a mystique only register */
+#define MGAREG_DMAPAD 0x1c54
+#define MGAREG_SGN 0x1c58
+#define MGAREG_LEN 0x1c5c
+
+#define MGAREG_AR0 0x1c60
+#define MGAREG_AR1 0x1c64
+#define MGAREG_AR2 0x1c68
+#define MGAREG_AR3 0x1c6c
+#define MGAREG_AR4 0x1c70
+#define MGAREG_AR5 0x1c74
+#define MGAREG_AR6 0x1c78
+
+#define MGAREG_CXBNDRY 0x1c80
+#define MGAREG_FXBNDRY 0x1c84
+#define MGAREG_YDSTLEN 0x1c88
+#define MGAREG_PITCH 0x1c8c
+
+#define MGAREG_YDST 0x1c90
+#define MGAREG_YDSTORG 0x1c94
+#define MGAREG_YTOP 0x1c98
+#define MGAREG_YBOT 0x1c9c
+
+#define MGAREG_CXLEFT 0x1ca0
+#define MGAREG_CXRIGHT 0x1ca4
+#define MGAREG_FXLEFT 0x1ca8
+#define MGAREG_FXRIGHT 0x1cac
+
+#define MGAREG_XDST 0x1cb0
+
+#define MGAREG_DR0 0x1cc0
+#define MGAREG_DR1 0x1cc4
+#define MGAREG_DR2 0x1cc8
+#define MGAREG_DR3 0x1ccc
+
+#define MGAREG_DR4 0x1cd0
+#define MGAREG_DR5 0x1cd4
+#define MGAREG_DR6 0x1cd8
+#define MGAREG_DR7 0x1cdc
+
+#define MGAREG_DR8 0x1ce0
+#define MGAREG_DR9 0x1ce4
+#define MGAREG_DR10 0x1ce8
+#define MGAREG_DR11 0x1cec
+
+#define MGAREG_DR12 0x1cf0
+#define MGAREG_DR13 0x1cf4
+#define MGAREG_DR14 0x1cf8
+#define MGAREG_DR15 0x1cfc
+
+#define MGAREG_SRCORG 0x2cb4
+#define MGAREG_DSTORG 0x2cb8
+
+/* add or or this to one of the previous "power registers" to start
+ the drawing engine */
+
+#define MGAREG_EXEC 0x0100
+
+#define MGAREG_FIFOSTATUS 0x1e10
+#define MGAREG_Status 0x1e14
+#define MGAREG_CACHEFLUSH 0x1fff
+#define MGAREG_ICLEAR 0x1e18
+#define MGAREG_IEN 0x1e1c
+
+#define MGAREG_VCOUNT 0x1e20
+
+#define MGAREG_Reset 0x1e40
+
+#define MGAREG_OPMODE 0x1e54
+
+/* Warp Registers */
+#define MGAREG_WIADDR 0x1dc0
+#define MGAREG_WIADDR2 0x1dd8
+#define MGAREG_WGETMSB 0x1dc8
+#define MGAREG_WVRTXSZ 0x1dcc
+#define MGAREG_WACCEPTSEQ 0x1dd4
+#define MGAREG_WMISC 0x1e70
+
+#define MGAREG_MEMCTL 0x2e08
+
+/* OPMODE register additives */
+
+#define MGAOPM_DMA_GENERAL (0x00 << 2)
+#define MGAOPM_DMA_BLIT (0x01 << 2)
+#define MGAOPM_DMA_VECTOR (0x10 << 2)
+
+/* MACCESS register additives */
+#define MGAMAC_PW8 0x00
+#define MGAMAC_PW16 0x01
+#define MGAMAC_PW24 0x03 /* not a typo */
+#define MGAMAC_PW32 0x02 /* not a typo */
+#define MGAMAC_BYPASS332 0x10000000
+#define MGAMAC_NODITHER 0x40000000
+#define MGAMAC_DIT555 0x80000000
+
+/* DWGCTL register additives */
+
+/* Lines */
+
+#define MGADWG_LINE_OPEN 0x00
+#define MGADWG_AUTOLINE_OPEN 0x01
+#define MGADWG_LINE_CLOSE 0x02
+#define MGADWG_AUTOLINE_CLOSE 0x03
+
+/* Trapezoids */
+#define MGADWG_TRAP 0x04
+#define MGADWG_TEXTURE_TRAP 0x06
+
+/* BitBlts */
+
+#define MGADWG_BITBLT 0x08
+#define MGADWG_FBITBLT 0x0c
+#define MGADWG_ILOAD 0x09
+#define MGADWG_ILOAD_SCALE 0x0d
+#define MGADWG_ILOAD_FILTER 0x0f
+#define MGADWG_ILOAD_HIQH 0x07
+#define MGADWG_ILOAD_HIQHV 0x0e
+#define MGADWG_IDUMP 0x0a
+
+/* atype access to WRAM */
+
+#define MGADWG_RPL ( 0x00 << 4 )
+#define MGADWG_RSTR ( 0x01 << 4 )
+#define MGADWG_ZI ( 0x03 << 4 )
+#define MGADWG_BLK ( 0x04 << 4 )
+#define MGADWG_I ( 0x07 << 4 )
+
+/* specifies whether bit blits are linear or xy */
+#define MGADWG_LINEAR ( 0x01 << 7 )
+
+/* z drawing mode. use MGADWG_NOZCMP for always */
+
+#define MGADWG_NOZCMP ( 0x00 << 8 )
+#define MGADWG_ZE ( 0x02 << 8 )
+#define MGADWG_ZNE ( 0x03 << 8 )
+#define MGADWG_ZLT ( 0x04 << 8 )
+#define MGADWG_ZLTE ( 0x05 << 8 )
+#define MGADWG_GT ( 0x06 << 8 )
+#define MGADWG_GTE ( 0x07 << 8 )
+
+/* use this to force colour expansion circuitry to do its stuff */
+
+#define MGADWG_SOLID ( 0x01 << 11 )
+
+/* ar register at zero */
+
+#define MGADWG_ARZERO ( 0x01 << 12 )
+
+#define MGADWG_SGNZERO ( 0x01 << 13 )
+
+#define MGADWG_SHIFTZERO ( 0x01 << 14 )
+
+/* See table on 4-43 for bop ALU operations */
+
+/* See table on 4-44 for translucidity masks */
+
+#define MGADWG_BMONOLEF ( 0x00 << 25 )
+#define MGADWG_BMONOWF ( 0x04 << 25 )
+#define MGADWG_BPLAN ( 0x01 << 25 )
+
+/* note that if bfcol is specified and you're doing a bitblt, it causes
+ a fbitblt to be performed, so check that you obey the fbitblt rules */
+
+#define MGADWG_BFCOL ( 0x02 << 25 )
+#define MGADWG_BUYUV ( 0x0e << 25 )
+#define MGADWG_BU32BGR ( 0x03 << 25 )
+#define MGADWG_BU32RGB ( 0x07 << 25 )
+#define MGADWG_BU24BGR ( 0x0b << 25 )
+#define MGADWG_BU24RGB ( 0x0f << 25 )
+
+#define MGADWG_PATTERN ( 0x01 << 29 )
+#define MGADWG_TRANSC ( 0x01 << 30 )
+#define MGAREG_MISC_WRITE 0x3c2
+#define MGAREG_MISC_READ 0x3cc
+#define MGAREG_MEM_MISC_WRITE 0x1fc2
+#define MGAREG_MEM_MISC_READ 0x1fcc
+
+#define MGAREG_MISC_IOADSEL (0x1 << 0)
+#define MGAREG_MISC_RAMMAPEN (0x1 << 1)
+#define MGAREG_MISC_CLK_SEL_VGA25 (0x0 << 2)
+#define MGAREG_MISC_CLK_SEL_VGA28 (0x1 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_PIX (0x2 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_MSK (0x3 << 2)
+#define MGAREG_MISC_VIDEO_DIS (0x1 << 4)
+#define MGAREG_MISC_HIGH_PG_SEL (0x1 << 5)
+
+/* MMIO VGA registers */
+#define MGAREG_SEQ_INDEX 0x1fc4
+#define MGAREG_SEQ_DATA 0x1fc5
+#define MGAREG_CRTC_INDEX 0x1fd4
+#define MGAREG_CRTC_DATA 0x1fd5
+#define MGAREG_CRTCEXT_INDEX 0x1fde
+#define MGAREG_CRTCEXT_DATA 0x1fdf
+
+
+
+/* MGA bits for registers PCI_OPTION_REG */
+#define MGA1064_OPT_SYS_CLK_PCI ( 0x00 << 0 )
+#define MGA1064_OPT_SYS_CLK_PLL ( 0x01 << 0 )
+#define MGA1064_OPT_SYS_CLK_EXT ( 0x02 << 0 )
+#define MGA1064_OPT_SYS_CLK_MSK ( 0x03 << 0 )
+
+#define MGA1064_OPT_SYS_CLK_DIS ( 0x01 << 2 )
+#define MGA1064_OPT_G_CLK_DIV_1 ( 0x01 << 3 )
+#define MGA1064_OPT_M_CLK_DIV_1 ( 0x01 << 4 )
+
+#define MGA1064_OPT_SYS_PLL_PDN ( 0x01 << 5 )
+#define MGA1064_OPT_VGA_ION ( 0x01 << 8 )
+
+/* MGA registers in PCI config space */
+#define PCI_MGA_INDEX 0x44
+#define PCI_MGA_DATA 0x48
+#define PCI_MGA_OPTION 0x40
+#define PCI_MGA_OPTION2 0x50
+#define PCI_MGA_OPTION3 0x54
+
+#define RAMDAC_OFFSET 0x3c00
+
+/* TVP3026 direct registers */
+
+#define TVP3026_INDEX 0x00
+#define TVP3026_WADR_PAL 0x00
+#define TVP3026_COL_PAL 0x01
+#define TVP3026_PIX_RD_MSK 0x02
+#define TVP3026_RADR_PAL 0x03
+#define TVP3026_CUR_COL_ADDR 0x04
+#define TVP3026_CUR_COL_DATA 0x05
+#define TVP3026_DATA 0x0a
+#define TVP3026_CUR_RAM 0x0b
+#define TVP3026_CUR_XLOW 0x0c
+#define TVP3026_CUR_XHI 0x0d
+#define TVP3026_CUR_YLOW 0x0e
+#define TVP3026_CUR_YHI 0x0f
+
+/* TVP3026 indirect registers */
+
+#define TVP3026_SILICON_REV 0x01
+#define TVP3026_CURSOR_CTL 0x06
+#define TVP3026_LATCH_CTL 0x0f
+#define TVP3026_TRUE_COLOR_CTL 0x18
+#define TVP3026_MUX_CTL 0x19
+#define TVP3026_CLK_SEL 0x1a
+#define TVP3026_PAL_PAGE 0x1c
+#define TVP3026_GEN_CTL 0x1d
+#define TVP3026_MISC_CTL 0x1e
+#define TVP3026_GEN_IO_CTL 0x2a
+#define TVP3026_GEN_IO_DATA 0x2b
+#define TVP3026_PLL_ADDR 0x2c
+#define TVP3026_PIX_CLK_DATA 0x2d
+#define TVP3026_MEM_CLK_DATA 0x2e
+#define TVP3026_LOAD_CLK_DATA 0x2f
+#define TVP3026_KEY_RED_LOW 0x32
+#define TVP3026_KEY_RED_HI 0x33
+#define TVP3026_KEY_GREEN_LOW 0x34
+#define TVP3026_KEY_GREEN_HI 0x35
+#define TVP3026_KEY_BLUE_LOW 0x36
+#define TVP3026_KEY_BLUE_HI 0x37
+#define TVP3026_KEY_CTL 0x38
+#define TVP3026_MCLK_CTL 0x39
+#define TVP3026_SENSE_TEST 0x3a
+#define TVP3026_TEST_DATA 0x3b
+#define TVP3026_CRC_LSB 0x3c
+#define TVP3026_CRC_MSB 0x3d
+#define TVP3026_CRC_CTL 0x3e
+#define TVP3026_ID 0x3f
+#define TVP3026_RESET 0xff
+
+
+/* MGA1064 DAC Register file */
+/* MGA1064 direct registers */
+
+#define MGA1064_INDEX 0x00
+#define MGA1064_WADR_PAL 0x00
+#define MGA1064_SPAREREG 0x00
+#define MGA1064_COL_PAL 0x01
+#define MGA1064_PIX_RD_MSK 0x02
+#define MGA1064_RADR_PAL 0x03
+#define MGA1064_DATA 0x0a
+
+#define MGA1064_CUR_XLOW 0x0c
+#define MGA1064_CUR_XHI 0x0d
+#define MGA1064_CUR_YLOW 0x0e
+#define MGA1064_CUR_YHI 0x0f
+
+/* MGA1064 indirect registers */
+#define MGA1064_DVI_PIPE_CTL 0x03
+#define MGA1064_CURSOR_BASE_ADR_LOW 0x04
+#define MGA1064_CURSOR_BASE_ADR_HI 0x05
+#define MGA1064_CURSOR_CTL 0x06
+#define MGA1064_CURSOR_COL0_RED 0x08
+#define MGA1064_CURSOR_COL0_GREEN 0x09
+#define MGA1064_CURSOR_COL0_BLUE 0x0a
+
+#define MGA1064_CURSOR_COL1_RED 0x0c
+#define MGA1064_CURSOR_COL1_GREEN 0x0d
+#define MGA1064_CURSOR_COL1_BLUE 0x0e
+
+#define MGA1064_CURSOR_COL2_RED 0x010
+#define MGA1064_CURSOR_COL2_GREEN 0x011
+#define MGA1064_CURSOR_COL2_BLUE 0x012
+
+#define MGA1064_VREF_CTL 0x018
+
+#define MGA1064_MUL_CTL 0x19
+#define MGA1064_MUL_CTL_8bits 0x0
+#define MGA1064_MUL_CTL_15bits 0x01
+#define MGA1064_MUL_CTL_16bits 0x02
+#define MGA1064_MUL_CTL_24bits 0x03
+#define MGA1064_MUL_CTL_32bits 0x04
+#define MGA1064_MUL_CTL_2G8V16bits 0x05
+#define MGA1064_MUL_CTL_G16V16bits 0x06
+#define MGA1064_MUL_CTL_32_24bits 0x07
+
+#define MGA1064_PIX_CLK_CTL 0x1a
+#define MGA1064_PIX_CLK_CTL_CLK_DIS ( 0x01 << 2 )
+#define MGA1064_PIX_CLK_CTL_CLK_POW_DOWN ( 0x01 << 3 )
+#define MGA1064_PIX_CLK_CTL_SEL_PCI ( 0x00 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_PLL ( 0x01 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_EXT ( 0x02 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_MSK ( 0x03 << 0 )
+
+#define MGA1064_GEN_CTL 0x1d
+#define MGA1064_GEN_CTL_SYNC_ON_GREEN_DIS (0x01 << 5)
+#define MGA1064_MISC_CTL 0x1e
+#define MGA1064_MISC_CTL_DAC_EN ( 0x01 << 0 )
+#define MGA1064_MISC_CTL_VGA ( 0x01 << 1 )
+#define MGA1064_MISC_CTL_DIS_CON ( 0x03 << 1 )
+#define MGA1064_MISC_CTL_MAFC ( 0x02 << 1 )
+#define MGA1064_MISC_CTL_VGA8 ( 0x01 << 3 )
+#define MGA1064_MISC_CTL_DAC_RAM_CS ( 0x01 << 4 )
+
+#define MGA1064_GEN_IO_CTL2 0x29
+#define MGA1064_GEN_IO_CTL 0x2a
+#define MGA1064_GEN_IO_DATA 0x2b
+#define MGA1064_SYS_PLL_M 0x2c
+#define MGA1064_SYS_PLL_N 0x2d
+#define MGA1064_SYS_PLL_P 0x2e
+#define MGA1064_SYS_PLL_STAT 0x2f
+
+#define MGA1064_REMHEADCTL 0x30
+#define MGA1064_REMHEADCTL_CLKDIS ( 0x01 << 0 )
+#define MGA1064_REMHEADCTL_CLKSL_OFF ( 0x00 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PLL ( 0x01 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PCI ( 0x02 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_MSK ( 0x03 << 1 )
+
+#define MGA1064_REMHEADCTL2 0x31
+
+#define MGA1064_ZOOM_CTL 0x38
+#define MGA1064_SENSE_TST 0x3a
+
+#define MGA1064_CRC_LSB 0x3c
+#define MGA1064_CRC_MSB 0x3d
+#define MGA1064_CRC_CTL 0x3e
+#define MGA1064_COL_KEY_MSK_LSB 0x40
+#define MGA1064_COL_KEY_MSK_MSB 0x41
+#define MGA1064_COL_KEY_LSB 0x42
+#define MGA1064_COL_KEY_MSB 0x43
+#define MGA1064_PIX_PLLA_M 0x44
+#define MGA1064_PIX_PLLA_N 0x45
+#define MGA1064_PIX_PLLA_P 0x46
+#define MGA1064_PIX_PLLB_M 0x48
+#define MGA1064_PIX_PLLB_N 0x49
+#define MGA1064_PIX_PLLB_P 0x4a
+#define MGA1064_PIX_PLLC_M 0x4c
+#define MGA1064_PIX_PLLC_N 0x4d
+#define MGA1064_PIX_PLLC_P 0x4e
+
+#define MGA1064_PIX_PLL_STAT 0x4f
+
+/*Added for G450 dual head*/
+
+#define MGA1064_VID_PLL_STAT 0x8c
+#define MGA1064_VID_PLL_P 0x8D
+#define MGA1064_VID_PLL_M 0x8E
+#define MGA1064_VID_PLL_N 0x8F
+
+/* Modified PLL for G200 Winbond (G200WB) */
+#define MGA1064_WB_PIX_PLLC_M 0xb7
+#define MGA1064_WB_PIX_PLLC_N 0xb6
+#define MGA1064_WB_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 Maxim (G200EV) */
+#define MGA1064_EV_PIX_PLLC_M 0xb6
+#define MGA1064_EV_PIX_PLLC_N 0xb7
+#define MGA1064_EV_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 EH */
+#define MGA1064_EH_PIX_PLLC_M 0xb6
+#define MGA1064_EH_PIX_PLLC_N 0xb7
+#define MGA1064_EH_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 Maxim (G200ER) */
+#define MGA1064_ER_PIX_PLLC_M 0xb7
+#define MGA1064_ER_PIX_PLLC_N 0xb6
+#define MGA1064_ER_PIX_PLLC_P 0xb8
+
+#define MGA1064_DISP_CTL 0x8a
+#define MGA1064_DISP_CTL_DAC1OUTSEL_MASK 0x01
+#define MGA1064_DISP_CTL_DAC1OUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_DAC1OUTSEL_EN 0x01
+#define MGA1064_DISP_CTL_DAC2OUTSEL_MASK (0x03 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC1 (0x01 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC2 (0x02 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_TVE (0x03 << 2)
+#define MGA1064_DISP_CTL_PANOUTSEL_MASK (0x03 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC1 (0x01 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2RGB (0x02 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2656 (0x03 << 5)
+
+#define MGA1064_SYNC_CTL 0x8b
+
+#define MGA1064_PWR_CTL 0xa0
+#define MGA1064_PWR_CTL_DAC2_EN (0x01 << 0)
+#define MGA1064_PWR_CTL_VID_PLL_EN (0x01 << 1)
+#define MGA1064_PWR_CTL_PANEL_EN (0x01 << 2)
+#define MGA1064_PWR_CTL_RFIFO_EN (0x01 << 3)
+#define MGA1064_PWR_CTL_CFIFO_EN (0x01 << 4)
+
+#define MGA1064_PAN_CTL 0xa2
+
+/* Using crtc2 */
+#define MGAREG2_C2CTL 0x10
+#define MGAREG2_C2HPARAM 0x14
+#define MGAREG2_C2HSYNC 0x18
+#define MGAREG2_C2VPARAM 0x1c
+#define MGAREG2_C2VSYNC 0x20
+#define MGAREG2_C2STARTADD0 0x28
+
+#define MGAREG2_C2OFFSET 0x40
+#define MGAREG2_C2DATACTL 0x4c
+
+#define MGAREG_C2CTL 0x3c10
+#define MGAREG_C2CTL_C2_EN 0x01
+
+#define MGAREG_C2_HIPRILVL_M (0x07 << 4)
+#define MGAREG_C2_MAXHIPRI_M (0x07 << 8)
+
+#define MGAREG_C2CTL_PIXCLKSEL_MASK (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSELH_MASK (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_PCICLK 0x00
+#define MGAREG_C2CTL_PIXCLKSEL_VDOCLK (0x01 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_PIXELPLL (0x02 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VIDEOPLL (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VDCLK (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKSEL_CRISTAL (0x01 << 1) | (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_SYSTEMPLL (0x02 << 1) | (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKDIS_MASK (0x01 << 3)
+#define MGAREG_C2CTL_PIXCLKDIS_DISABLE (0x01 << 3)
+
+#define MGAREG_C2CTL_CRTCDACSEL_MASK (0x01 << 20)
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC1 0x00
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC2 (0x01 << 20)
+
+#define MGAREG_C2HPARAM 0x3c14
+#define MGAREG_C2HSYNC 0x3c18
+#define MGAREG_C2VPARAM 0x3c1c
+#define MGAREG_C2VSYNC 0x3c20
+#define MGAREG_C2STARTADD0 0x3c28
+
+#define MGAREG_C2OFFSET 0x3c40
+#define MGAREG_C2DATACTL 0x3c4c
+
+/* video register */
+
+#define MGAREG_BESA1C3ORG 0x3d60
+#define MGAREG_BESA1CORG 0x3d10
+#define MGAREG_BESA1ORG 0x3d00
+#define MGAREG_BESCTL 0x3d20
+#define MGAREG_BESGLOBCTL 0x3dc0
+#define MGAREG_BESHCOORD 0x3d28
+#define MGAREG_BESHISCAL 0x3d30
+#define MGAREG_BESHSRCEND 0x3d3c
+#define MGAREG_BESHSRCLST 0x3d50
+#define MGAREG_BESHSRCST 0x3d38
+#define MGAREG_BESLUMACTL 0x3d40
+#define MGAREG_BESPITCH 0x3d24
+#define MGAREG_BESV1SRCLST 0x3d54
+#define MGAREG_BESV1WGHT 0x3d48
+#define MGAREG_BESVCOORD 0x3d2c
+#define MGAREG_BESVISCAL 0x3d34
+
+/* texture engine registers */
+
+#define MGAREG_TMR0 0x2c00
+#define MGAREG_TMR1 0x2c04
+#define MGAREG_TMR2 0x2c08
+#define MGAREG_TMR3 0x2c0c
+#define MGAREG_TMR4 0x2c10
+#define MGAREG_TMR5 0x2c14
+#define MGAREG_TMR6 0x2c18
+#define MGAREG_TMR7 0x2c1c
+#define MGAREG_TMR8 0x2c20
+#define MGAREG_TEXORG 0x2c24
+#define MGAREG_TEXWIDTH 0x2c28
+#define MGAREG_TEXHEIGHT 0x2c2c
+#define MGAREG_TEXCTL 0x2c30
+# define MGA_TW4 (0x00000000)
+# define MGA_TW8 (0x00000001)
+# define MGA_TW15 (0x00000002)
+# define MGA_TW16 (0x00000003)
+# define MGA_TW12 (0x00000004)
+# define MGA_TW32 (0x00000006)
+# define MGA_TW8A (0x00000007)
+# define MGA_TW8AL (0x00000008)
+# define MGA_TW422 (0x0000000A)
+# define MGA_TW422UYVY (0x0000000B)
+# define MGA_PITCHLIN (0x00000100)
+# define MGA_NOPERSPECTIVE (0x00200000)
+# define MGA_TAKEY (0x02000000)
+# define MGA_TAMASK (0x04000000)
+# define MGA_CLAMPUV (0x18000000)
+# define MGA_TEXMODULATE (0x20000000)
+#define MGAREG_TEXCTL2 0x2c3c
+# define MGA_G400_TC2_MAGIC (0x00008000)
+# define MGA_TC2_DECALBLEND (0x00000001)
+# define MGA_TC2_IDECAL (0x00000002)
+# define MGA_TC2_DECALDIS (0x00000004)
+# define MGA_TC2_CKSTRANSDIS (0x00000010)
+# define MGA_TC2_BORDEREN (0x00000020)
+# define MGA_TC2_SPECEN (0x00000040)
+# define MGA_TC2_DUALTEX (0x00000080)
+# define MGA_TC2_TABLEFOG (0x00000100)
+# define MGA_TC2_BUMPMAP (0x00000200)
+# define MGA_TC2_SELECT_TMU1 (0x80000000)
+#define MGAREG_TEXTRANS 0x2c34
+#define MGAREG_TEXTRANSHIGH 0x2c38
+#define MGAREG_TEXFILTER 0x2c58
+# define MGA_MIN_NRST (0x00000000)
+# define MGA_MIN_BILIN (0x00000002)
+# define MGA_MIN_ANISO (0x0000000D)
+# define MGA_MAG_NRST (0x00000000)
+# define MGA_MAG_BILIN (0x00000020)
+# define MGA_FILTERALPHA (0x00100000)
+#define MGAREG_ALPHASTART 0x2c70
+#define MGAREG_ALPHAXINC 0x2c74
+#define MGAREG_ALPHAYINC 0x2c78
+#define MGAREG_ALPHACTRL 0x2c7c
+# define MGA_SRC_ZERO (0x00000000)
+# define MGA_SRC_ONE (0x00000001)
+# define MGA_SRC_DST_COLOR (0x00000002)
+# define MGA_SRC_ONE_MINUS_DST_COLOR (0x00000003)
+# define MGA_SRC_ALPHA (0x00000004)
+# define MGA_SRC_ONE_MINUS_SRC_ALPHA (0x00000005)
+# define MGA_SRC_DST_ALPHA (0x00000006)
+# define MGA_SRC_ONE_MINUS_DST_ALPHA (0x00000007)
+# define MGA_SRC_SRC_ALPHA_SATURATE (0x00000008)
+# define MGA_SRC_BLEND_MASK (0x0000000f)
+# define MGA_DST_ZERO (0x00000000)
+# define MGA_DST_ONE (0x00000010)
+# define MGA_DST_SRC_COLOR (0x00000020)
+# define MGA_DST_ONE_MINUS_SRC_COLOR (0x00000030)
+# define MGA_DST_SRC_ALPHA (0x00000040)
+# define MGA_DST_ONE_MINUS_SRC_ALPHA (0x00000050)
+# define MGA_DST_DST_ALPHA (0x00000060)
+# define MGA_DST_ONE_MINUS_DST_ALPHA (0x00000070)
+# define MGA_DST_BLEND_MASK (0x00000070)
+# define MGA_ALPHACHANNEL (0x00000100)
+# define MGA_VIDEOALPHA (0x00000200)
+# define MGA_DIFFUSEDALPHA (0x01000000)
+# define MGA_MODULATEDALPHA (0x02000000)
+#define MGAREG_TDUALSTAGE0 (0x2CF8)
+#define MGAREG_TDUALSTAGE1 (0x2CFC)
+# define MGA_TDS_COLOR_ARG2_DIFFUSE (0x00000000)
+# define MGA_TDS_COLOR_ARG2_SPECULAR (0x00000001)
+# define MGA_TDS_COLOR_ARG2_FCOL (0x00000002)
+# define MGA_TDS_COLOR_ARG2_PREVSTAGE (0x00000003)
+# define MGA_TDS_COLOR_ALPHA_DIFFUSE (0x00000000)
+# define MGA_TDS_COLOR_ALPHA_FCOL (0x00000004)
+# define MGA_TDS_COLOR_ALPHA_CURRTEX (0x00000008)
+# define MGA_TDS_COLOR_ALPHA_PREVTEX (0x0000000c)
+# define MGA_TDS_COLOR_ALPHA_PREVSTAGE (0x00000010)
+# define MGA_TDS_COLOR_ARG1_REPLICATEALPHA (0x00000020)
+# define MGA_TDS_COLOR_ARG1_INV (0x00000040)
+# define MGA_TDS_COLOR_ARG2_REPLICATEALPHA (0x00000080)
+# define MGA_TDS_COLOR_ARG2_INV (0x00000100)
+# define MGA_TDS_COLOR_ALPHA1INV (0x00000200)
+# define MGA_TDS_COLOR_ALPHA2INV (0x00000400)
+# define MGA_TDS_COLOR_ARG1MUL_ALPHA1 (0x00000800)
+# define MGA_TDS_COLOR_ARG2MUL_ALPHA2 (0x00001000)
+# define MGA_TDS_COLOR_ARG1ADD_MULOUT (0x00002000)
+# define MGA_TDS_COLOR_ARG2ADD_MULOUT (0x00004000)
+# define MGA_TDS_COLOR_MODBRIGHT_2X (0x00008000)
+# define MGA_TDS_COLOR_MODBRIGHT_4X (0x00010000)
+# define MGA_TDS_COLOR_ADD_SUB (0x00000000)
+# define MGA_TDS_COLOR_ADD_ADD (0x00020000)
+# define MGA_TDS_COLOR_ADD2X (0x00040000)
+# define MGA_TDS_COLOR_ADDBIAS (0x00080000)
+# define MGA_TDS_COLOR_BLEND (0x00100000)
+# define MGA_TDS_COLOR_SEL_ARG1 (0x00000000)
+# define MGA_TDS_COLOR_SEL_ARG2 (0x00200000)
+# define MGA_TDS_COLOR_SEL_ADD (0x00400000)
+# define MGA_TDS_COLOR_SEL_MUL (0x00600000)
+# define MGA_TDS_ALPHA_ARG1_INV (0x00800000)
+# define MGA_TDS_ALPHA_ARG2_DIFFUSE (0x00000000)
+# define MGA_TDS_ALPHA_ARG2_FCOL (0x01000000)
+# define MGA_TDS_ALPHA_ARG2_PREVTEX (0x02000000)
+# define MGA_TDS_ALPHA_ARG2_PREVSTAGE (0x03000000)
+# define MGA_TDS_ALPHA_ARG2_INV (0x04000000)
+# define MGA_TDS_ALPHA_ADD (0x08000000)
+# define MGA_TDS_ALPHA_ADDBIAS (0x10000000)
+# define MGA_TDS_ALPHA_ADD2X (0x20000000)
+# define MGA_TDS_ALPHA_SEL_ARG1 (0x00000000)
+# define MGA_TDS_ALPHA_SEL_ARG2 (0x40000000)
+# define MGA_TDS_ALPHA_SEL_ADD (0x80000000)
+# define MGA_TDS_ALPHA_SEL_MUL (0xc0000000)
+
+#define MGAREG_DWGSYNC 0x2c4c
+
+#define MGAREG_AGP_PLL 0x1e4c
+#define MGA_AGP2XPLL_ENABLE 0x1
+#define MGA_AGP2XPLL_DISABLE 0x0
+
+#endif
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
new file mode 100644
index 000000000000..b223dcb7a710
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "mgag200_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct mga_device *
+mgag200_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct mga_device, ttm.bdev);
+}
+
+static int
+mgag200_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+mgag200_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int mgag200_ttm_global_init(struct mga_device *ast)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &ast->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &mgag200_ttm_mem_global_init;
+ global_ref->release = &mgag200_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ ast->ttm.bo_global_ref.mem_glob =
+ ast->ttm.mem_global_ref.object;
+ global_ref = &ast->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+void
+mgag200_ttm_global_release(struct mga_device *ast)
+{
+ if (ast->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct mgag200_bo *bo;
+
+ bo = container_of(tbo, struct mgag200_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &mgag200_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+mgag200_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct mgag200_bo *mgabo = mgag200_bo(bo);
+
+ if (!mgag200_ttm_bo_is_mgag200_bo(bo))
+ return;
+
+ mgag200_ttm_placement(mgabo, TTM_PL_FLAG_SYSTEM);
+ *pl = mgabo->placement;
+}
+
+static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct mga_device *mdev = mgag200_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(mdev->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int mgag200_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func mgag200_tt_backend_func = {
+ .destroy = &mgag200_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &mgag200_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int mgag200_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver mgag200_bo_driver = {
+ .ttm_tt_create = mgag200_ttm_tt_create,
+ .ttm_tt_populate = mgag200_ttm_tt_populate,
+ .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
+ .init_mem_type = mgag200_bo_init_mem_type,
+ .evict_flags = mgag200_bo_evict_flags,
+ .move = mgag200_bo_move,
+ .verify_access = mgag200_bo_verify_access,
+ .io_mem_reserve = &mgag200_ttm_io_mem_reserve,
+ .io_mem_free = &mgag200_ttm_io_mem_free,
+};
+
+int mgag200_mm_init(struct mga_device *mdev)
+{
+ int ret;
+ struct drm_device *dev = mdev->dev;
+ struct ttm_bo_device *bdev = &mdev->ttm.bdev;
+
+ ret = mgag200_ttm_global_init(mdev);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&mdev->ttm.bdev,
+ mdev->ttm.bo_global_ref.ref.object,
+ &mgag200_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, mdev->mc.vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ mdev->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+
+ return 0;
+}
+
+void mgag200_mm_fini(struct mga_device *mdev)
+{
+ struct drm_device *dev = mdev->dev;
+ ttm_bo_device_release(&mdev->ttm.bdev);
+
+ mgag200_ttm_global_release(mdev);
+
+ if (mdev->fb_mtrr >= 0) {
+ drm_mtrr_del(mdev->fb_mtrr,
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+ mdev->fb_mtrr = -1;
+ }
+}
+
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+void mgag200_bo_unreserve(struct mgag200_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct mgag200_bo **pmgabo)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mgag200_bo *mgabo;
+ size_t acc_size;
+ int ret;
+
+ mgabo = kzalloc(sizeof(struct mgag200_bo), GFP_KERNEL);
+ if (!mgabo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &mgabo->gem, size);
+ if (ret) {
+ kfree(mgabo);
+ return ret;
+ }
+
+ mgabo->gem.driver_private = NULL;
+ mgabo->bo.bdev = &mdev->ttm.bdev;
+
+ mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&mdev->ttm.bdev, size,
+ sizeof(struct mgag200_bo));
+
+ ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
+ ttm_bo_type_device, &mgabo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ NULL, mgag200_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pmgabo = mgabo;
+ return 0;
+}
+
+static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = mgag200_bo_gpu_offset(bo);
+ }
+
+ mgag200_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = mgag200_bo_gpu_offset(bo);
+ return 0;
+}
+
+int mgag200_bo_unpin(struct mgag200_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int mgag200_bo_push_sysram(struct mgag200_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct mga_device *mdev;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ mdev = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1a2ad7eb1734..fe5267d06ab5 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -16,10 +16,13 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
nv50_fb.o nvc0_fb.o \
- nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
+ nv04_fifo.o nv10_fifo.o nv17_fifo.o nv40_fifo.o nv50_fifo.o \
+ nv84_fifo.o nvc0_fifo.o nve0_fifo.o \
+ nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \
+ nv04_software.o nv50_software.o nvc0_software.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
- nv40_graph.o nv50_graph.o nvc0_graph.o \
- nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
+ nv40_graph.o nv50_graph.o nvc0_graph.o nve0_graph.o \
+ nv40_grctx.o nv50_grctx.o nvc0_grctx.o nve0_grctx.o \
nv84_crypt.o nv98_crypt.o \
nva3_copy.o nvc0_copy.o \
nv31_mpeg.o nv50_mpeg.o \
@@ -37,7 +40,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv50_calc.o \
nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
nv50_vram.o nvc0_vram.o \
- nv50_vm.o nvc0_vm.o
+ nv50_vm.o nvc0_vm.o nouveau_prime.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 284bd25d5d21..fc841e87b343 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -338,7 +338,8 @@ void nouveau_switcheroo_optimus_dsm(void)
void nouveau_unregister_dsm_handler(void)
{
- vga_switcheroo_unregister_handler();
+ if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
+ vga_switcheroo_unregister_handler();
}
/* retrieve the ROM in 4k blocks */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0be4a815e706..2f11e16a81a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -30,6 +30,7 @@
#include "nouveau_gpio.h"
#include <linux/io-mapping.h>
+#include <linux/firmware.h>
/* these defines are made up */
#define NV_CIO_CRE_44_HEADA 0x0
@@ -195,35 +196,24 @@ static void
bios_shadow_acpi(struct nvbios *bios)
{
struct pci_dev *pdev = bios->dev->pdev;
- int ptr, len, ret;
- u8 data[3];
+ int cnt = 65536 / ROM_BIOS_PAGE;
+ int ret;
if (!nouveau_acpi_rom_supported(pdev))
return;
- ret = nouveau_acpi_get_bios_chunk(data, 0, sizeof(data));
- if (ret != sizeof(data))
- return;
-
- bios->length = min(data[2] * 512, 65536);
- bios->data = kmalloc(bios->length, GFP_KERNEL);
+ bios->data = kmalloc(cnt * ROM_BIOS_PAGE, GFP_KERNEL);
if (!bios->data)
return;
- len = bios->length;
- ptr = 0;
- while (len) {
- int size = (len > ROM_BIOS_PAGE) ? ROM_BIOS_PAGE : len;
-
- ret = nouveau_acpi_get_bios_chunk(bios->data, ptr, size);
- if (ret != size) {
- kfree(bios->data);
- bios->data = NULL;
+ bios->length = 0;
+ while (cnt--) {
+ ret = nouveau_acpi_get_bios_chunk(bios->data, bios->length,
+ ROM_BIOS_PAGE);
+ if (ret != ROM_BIOS_PAGE)
return;
- }
- len -= size;
- ptr += size;
+ bios->length += ROM_BIOS_PAGE;
}
}
@@ -249,8 +239,12 @@ bios_shadow(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->vbios;
struct methods *mthd, *best;
+ const struct firmware *fw;
+ char fname[32];
+ int ret;
if (nouveau_vbios) {
+ /* try to match one of the built-in methods */
mthd = shadow_methods;
do {
if (strcasecmp(nouveau_vbios, mthd->desc))
@@ -263,6 +257,22 @@ bios_shadow(struct drm_device *dev)
return true;
} while ((++mthd)->shadow);
+ /* attempt to load firmware image */
+ snprintf(fname, sizeof(fname), "nouveau/%s", nouveau_vbios);
+ ret = request_firmware(&fw, fname, &dev->pdev->dev);
+ if (ret == 0) {
+ bios->length = fw->size;
+ bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ release_firmware(fw);
+
+ NV_INFO(dev, "VBIOS image: %s\n", nouveau_vbios);
+ if (score_vbios(bios, 1))
+ return true;
+
+ kfree(bios->data);
+ bios->data = NULL;
+ }
+
NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
}
@@ -273,6 +283,7 @@ bios_shadow(struct drm_device *dev)
mthd->score = score_vbios(bios, mthd->rw);
mthd->size = bios->length;
mthd->data = bios->data;
+ bios->data = NULL;
} while (mthd->score != 3 && (++mthd)->shadow);
mthd = shadow_methods;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7d15a774f9c9..7f80ed523562 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -35,6 +35,8 @@
#include "nouveau_dma.h"
#include "nouveau_mm.h"
#include "nouveau_vm.h"
+#include "nouveau_fence.h"
+#include "nouveau_ramht.h"
#include <linux/log2.h>
#include <linux/slab.h>
@@ -89,12 +91,17 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
int
nouveau_bo_new(struct drm_device *dev, int size, int align,
uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
+ struct sg_table *sg,
struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
size_t acc_size;
int ret;
+ int type = ttm_bo_type_device;
+
+ if (sg)
+ type = ttm_bo_type_sg;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
@@ -120,8 +127,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
sizeof(struct nouveau_bo));
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
- ttm_bo_type_device, &nvbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ type, &nvbo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -473,7 +480,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(chan, &fence, true);
+ ret = nouveau_fence_new(chan, &fence);
if (ret)
return ret;
@@ -484,6 +491,76 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
}
static int
+nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ int ret = RING_SPACE(chan, 10);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
+ OUT_RING (chan, upper_32_bits(node->vma[0].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[0].offset));
+ OUT_RING (chan, upper_32_bits(node->vma[1].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[1].offset));
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, new_mem->num_pages);
+ BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
+ }
+ return ret;
+}
+
+static int
+nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ int ret = RING_SPACE(chan, 2);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ }
+ return ret;
+}
+
+static int
+nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ u64 src_offset = node->vma[0].offset;
+ u64 dst_offset = node->vma[1].offset;
+ u32 page_count = new_mem->num_pages;
+ int ret;
+
+ page_count = new_mem->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 8191) ? 8191 : page_count;
+
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
+ OUT_RING (chan, upper_32_bits(src_offset));
+ OUT_RING (chan, lower_32_bits(src_offset));
+ OUT_RING (chan, upper_32_bits(dst_offset));
+ OUT_RING (chan, lower_32_bits(dst_offset));
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, line_count);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
+ OUT_RING (chan, 0x00000110);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return 0;
+}
+
+static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
@@ -501,17 +578,17 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
OUT_RING (chan, upper_32_bits(dst_offset));
OUT_RING (chan, lower_32_bits(dst_offset));
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
+ BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
OUT_RING (chan, upper_32_bits(src_offset));
OUT_RING (chan, lower_32_bits(src_offset));
OUT_RING (chan, PAGE_SIZE); /* src_pitch */
OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
OUT_RING (chan, PAGE_SIZE); /* line_length */
OUT_RING (chan, line_count);
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
OUT_RING (chan, 0x00100110);
page_count -= line_count;
@@ -523,6 +600,102 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
}
static int
+nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ u64 src_offset = node->vma[0].offset;
+ u64 dst_offset = node->vma[1].offset;
+ u32 page_count = new_mem->num_pages;
+ int ret;
+
+ page_count = new_mem->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 8191) ? 8191 : page_count;
+
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
+
+ BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
+ OUT_RING (chan, upper_32_bits(src_offset));
+ OUT_RING (chan, lower_32_bits(src_offset));
+ OUT_RING (chan, upper_32_bits(dst_offset));
+ OUT_RING (chan, lower_32_bits(dst_offset));
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, line_count);
+ BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
+ OUT_RING (chan, 0x00000110);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return 0;
+}
+
+static int
+nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ int ret = RING_SPACE(chan, 7);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
+ OUT_RING (chan, upper_32_bits(node->vma[0].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[0].offset));
+ OUT_RING (chan, upper_32_bits(node->vma[1].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[1].offset));
+ OUT_RING (chan, 0x00000000 /* COPY */);
+ OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
+ }
+ return ret;
+}
+
+static int
+nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ int ret = RING_SPACE(chan, 7);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
+ OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
+ OUT_RING (chan, upper_32_bits(node->vma[0].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[0].offset));
+ OUT_RING (chan, upper_32_bits(node->vma[1].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[1].offset));
+ OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
+ }
+ return ret;
+}
+
+static int
+nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
+ &chan->m2mf_ntfy);
+ if (ret == 0) {
+ ret = RING_SPACE(chan, 6);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
+ OUT_RING (chan, NvNotify0);
+ OUT_RING (chan, NvDmaFB);
+ OUT_RING (chan, NvDmaFB);
+ } else {
+ nouveau_ramht_remove(chan, NvNotify0);
+ }
+ }
+
+ return ret;
+}
+
+static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
@@ -546,7 +719,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
+ BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
OUT_RING (chan, stride);
@@ -559,7 +732,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
+ BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
OUT_RING (chan, 1);
}
if (old_mem->mem_type == TTM_PL_VRAM &&
@@ -568,7 +741,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
+ BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
OUT_RING (chan, stride);
@@ -581,7 +754,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
+ BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
OUT_RING (chan, 1);
}
@@ -589,10 +762,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
+ BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
OUT_RING (chan, upper_32_bits(src_offset));
OUT_RING (chan, upper_32_bits(dst_offset));
- BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
+ BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
OUT_RING (chan, lower_32_bits(src_offset));
OUT_RING (chan, lower_32_bits(dst_offset));
OUT_RING (chan, stride);
@@ -601,7 +774,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, height);
OUT_RING (chan, 0x00000101);
OUT_RING (chan, 0x00000000);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+ BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
OUT_RING (chan, 0);
length -= amount;
@@ -612,6 +785,24 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return 0;
}
+static int
+nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
+ &chan->m2mf_ntfy);
+ if (ret == 0) {
+ ret = RING_SPACE(chan, 4);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
+ OUT_RING (chan, NvNotify0);
+ }
+ }
+
+ return ret;
+}
+
static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
struct nouveau_channel *chan, struct ttm_mem_reg *mem)
@@ -634,7 +825,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
+ BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
@@ -646,7 +837,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF,
+ BEGIN_NV04(chan, NvSubCopy,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
OUT_RING (chan, src_offset);
OUT_RING (chan, dst_offset);
@@ -656,7 +847,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, line_count);
OUT_RING (chan, 0x00000101);
OUT_RING (chan, 0x00000000);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+ BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
OUT_RING (chan, 0);
page_count -= line_count;
@@ -716,13 +907,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
goto out;
}
- if (dev_priv->card_type < NV_50)
- ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
- else
- if (dev_priv->card_type < NV_C0)
- ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
- else
- ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+ ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
if (ret == 0) {
ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
no_wait_reserve,
@@ -734,6 +919,49 @@ out:
return ret;
}
+void
+nouveau_bo_move_init(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ static const struct {
+ const char *name;
+ int engine;
+ u32 oclass;
+ int (*exec)(struct nouveau_channel *,
+ struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
+ int (*init)(struct nouveau_channel *, u32 handle);
+ } _methods[] = {
+ { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
+ { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
+ { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
+ { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
+ { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
+ {},
+ { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
+ }, *mthd = _methods;
+ const char *name = "CPU";
+ int ret;
+
+ do {
+ u32 handle = (mthd->engine << 16) | mthd->oclass;
+ ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass);
+ if (ret == 0) {
+ ret = mthd->init(chan, handle);
+ if (ret == 0) {
+ dev_priv->ttm.move = mthd->exec;
+ name = mthd->name;
+ break;
+ }
+ }
+ } while ((++mthd)->exec);
+
+ NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
+}
+
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
@@ -817,9 +1045,14 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
} else
if (new_mem && new_mem->mem_type == TTM_PL_TT &&
nvbo->page_shift == vma->vm->spg_shift) {
- nouveau_vm_map_sg(vma, 0, new_mem->
- num_pages << PAGE_SHIFT,
- new_mem->mm_node);
+ if (((struct nouveau_mem *)new_mem->mm_node)->sg)
+ nouveau_vm_map_sg_table(vma, 0, new_mem->
+ num_pages << PAGE_SHIFT,
+ new_mem->mm_node);
+ else
+ nouveau_vm_map_sg(vma, 0, new_mem->
+ num_pages << PAGE_SHIFT,
+ new_mem->mm_node);
} else {
nouveau_vm_unmap(vma);
}
@@ -885,8 +1118,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
goto out;
}
- /* Software copy if the card isn't up and running yet. */
- if (!dev_priv->channel) {
+ /* CPU copy if we have no accelerated method available */
+ if (!dev_priv->ttm.move) {
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@@ -1030,26 +1263,10 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
- nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
+ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
return nouveau_bo_validate(nvbo, false, true, false);
}
-void
-nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
-{
- struct nouveau_fence *old_fence;
-
- if (likely(fence))
- nouveau_fence_ref(fence);
-
- spin_lock(&nvbo->bo.bdev->fence_lock);
- old_fence = nvbo->bo.sync_obj;
- nvbo->bo.sync_obj = fence;
- spin_unlock(&nvbo->bo.bdev->fence_lock);
-
- nouveau_fence_unref(&old_fence);
-}
-
static int
nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
@@ -1058,10 +1275,19 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
struct drm_device *dev;
unsigned i;
int r;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
return 0;
+ if (slave && ttm->sg) {
+ /* make userspace faulting work */
+ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+ ttm_dma->dma_address, ttm->num_pages);
+ ttm->state = tt_unbound;
+ return 0;
+ }
+
dev_priv = nouveau_bdev(ttm->bdev);
dev = dev_priv->dev;
@@ -1106,6 +1332,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
struct drm_nouveau_private *dev_priv;
struct drm_device *dev;
unsigned i;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+ if (slave)
+ return;
dev_priv = nouveau_bdev(ttm->bdev);
dev = dev_priv->dev;
@@ -1134,6 +1364,52 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
ttm_pool_unpopulate(ttm);
}
+void
+nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
+{
+ struct nouveau_fence *old_fence = NULL;
+
+ if (likely(fence))
+ nouveau_fence_ref(fence);
+
+ spin_lock(&nvbo->bo.bdev->fence_lock);
+ old_fence = nvbo->bo.sync_obj;
+ nvbo->bo.sync_obj = fence;
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+ nouveau_fence_unref(&old_fence);
+}
+
+static void
+nouveau_bo_fence_unref(void **sync_obj)
+{
+ nouveau_fence_unref((struct nouveau_fence **)sync_obj);
+}
+
+static void *
+nouveau_bo_fence_ref(void *sync_obj)
+{
+ return nouveau_fence_ref(sync_obj);
+}
+
+static bool
+nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
+{
+ return nouveau_fence_done(sync_obj);
+}
+
+static int
+nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+{
+ return nouveau_fence_wait(sync_obj, lazy, intr);
+}
+
+static int
+nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
+{
+ return 0;
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
@@ -1144,11 +1420,11 @@ struct ttm_bo_driver nouveau_bo_driver = {
.move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
- .sync_obj_signaled = __nouveau_fence_signalled,
- .sync_obj_wait = __nouveau_fence_wait,
- .sync_obj_flush = __nouveau_fence_flush,
- .sync_obj_unref = __nouveau_fence_unref,
- .sync_obj_ref = __nouveau_fence_ref,
+ .sync_obj_signaled = nouveau_bo_fence_signalled,
+ .sync_obj_wait = nouveau_bo_fence_wait,
+ .sync_obj_flush = nouveau_bo_fence_flush,
+ .sync_obj_unref = nouveau_bo_fence_unref,
+ .sync_obj_ref = nouveau_bo_fence_ref,
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
@@ -1181,9 +1457,12 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
- else
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- nouveau_vm_map_sg(vma, 0, size, node);
+ else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
+ if (node->sg)
+ nouveau_vm_map_sg_table(vma, 0, size, node);
+ else
+ nouveau_vm_map_sg(vma, 0, size, node);
+ }
list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 846afb0bfef4..629d8a2df5bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -27,7 +27,10 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_dma.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+#include "nouveau_software.h"
static int
nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
@@ -38,7 +41,7 @@ nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
int ret;
/* allocate buffer object */
- ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo);
+ ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, NULL, &chan->pushbuf_bo);
if (ret)
goto out;
@@ -117,8 +120,9 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
struct drm_file *file_priv,
uint32_t vram_handle, uint32_t gart_handle)
{
+ struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE);
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_channel *chan;
unsigned long flags;
@@ -155,10 +159,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
}
NV_DEBUG(dev, "initialising channel %d\n", chan->id);
- INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
- INIT_LIST_HEAD(&chan->nvsw.flip);
- INIT_LIST_HEAD(&chan->fence.pending);
- spin_lock_init(&chan->fence.lock);
/* setup channel's memory and vm */
ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
@@ -188,20 +188,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
chan->user_put = 0x40;
chan->user_get = 0x44;
if (dev_priv->card_type >= NV_50)
- chan->user_get_hi = 0x60;
+ chan->user_get_hi = 0x60;
- /* disable the fifo caches */
- pfifo->reassign(dev, false);
-
- /* Construct initial RAMFC for new channel */
- ret = pfifo->create_context(chan);
+ /* create fifo context */
+ ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO);
if (ret) {
nouveau_channel_put(&chan);
return ret;
}
- pfifo->reassign(dev, true);
-
/* Insert NOPs for NOUVEAU_DMA_SKIPS */
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
if (ret) {
@@ -211,9 +206,28 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
OUT_RING (chan, 0x00000000);
+
+ ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev));
+ if (ret) {
+ nouveau_channel_put(&chan);
+ return ret;
+ }
+
+ if (dev_priv->card_type < NV_C0) {
+ ret = RING_SPACE(chan, 2);
+ if (ret) {
+ nouveau_channel_put(&chan);
+ return ret;
+ }
+
+ BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
+ OUT_RING (chan, NvSw);
+ FIRE_RING (chan);
+ }
+
FIRE_RING(chan);
- ret = nouveau_fence_channel_init(chan);
+ ret = fence->context_new(chan, NVOBJ_ENGINE_FENCE);
if (ret) {
nouveau_channel_put(&chan);
return ret;
@@ -268,7 +282,6 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
struct nouveau_channel *chan = *pchan;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
unsigned long flags;
int i;
@@ -285,24 +298,12 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
/* give it chance to idle */
nouveau_channel_idle(chan);
- /* ensure all outstanding fences are signaled. they should be if the
- * above attempts at idling were OK, but if we failed this'll tell TTM
- * we're done with the buffers.
- */
- nouveau_fence_channel_fini(chan);
-
- /* boot it off the hardware */
- pfifo->reassign(dev, false);
-
/* destroy the engine specific contexts */
- pfifo->destroy_context(chan);
- for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
+ for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) {
if (chan->engctx[i])
dev_priv->eng[i]->context_del(chan, i);
}
- pfifo->reassign(dev, true);
-
/* aside from its resources, the channel should now be dead,
* remove it from the channel list
*/
@@ -354,38 +355,37 @@ nouveau_channel_ref(struct nouveau_channel *chan,
*pchan = chan;
}
-void
+int
nouveau_channel_idle(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_fence *fence = NULL;
int ret;
- nouveau_fence_update(chan);
-
- if (chan->fence.sequence != chan->fence.sequence_ack) {
- ret = nouveau_fence_new(chan, &fence, true);
- if (!ret) {
- ret = nouveau_fence_wait(fence, false, false);
- nouveau_fence_unref(&fence);
- }
-
- if (ret)
- NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+ ret = nouveau_fence_new(chan, &fence);
+ if (!ret) {
+ ret = nouveau_fence_wait(fence, false, false);
+ nouveau_fence_unref(&fence);
}
+
+ if (ret)
+ NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+ return ret;
}
/* cleans up all the fifos from file_priv */
void
nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_channel *chan;
int i;
+ if (!pfifo)
+ return;
+
NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
- for (i = 0; i < engine->fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
chan = nouveau_channel_get(file_priv, i);
if (IS_ERR(chan))
continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fa860358add1..7b11edb077d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -654,7 +654,13 @@ nouveau_connector_detect_depth(struct drm_connector *connector)
if (nv_connector->edid && connector->display_info.bpc)
return;
- /* if not, we're out of options unless we're LVDS, default to 8bpc */
+ /* EDID 1.4 is *supposed* to be supported on eDP, but, Apple... */
+ if (nv_connector->type == DCB_CONNECTOR_eDP) {
+ connector->display_info.bpc = 6;
+ return;
+ }
+
+ /* we're out of options unless we're LVDS, default to 8bpc */
if (nv_encoder->dcb->type != OUTPUT_LVDS) {
connector->display_info.bpc = 8;
return;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index fa2ec491f6a7..188c92b327e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -67,8 +67,6 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
nvchan_rd32(chan, 0x8c));
}
- seq_printf(m, "last fence : %d\n", chan->fence.sequence);
- seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index a85e112863d1..69688ef5cf46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -33,7 +33,9 @@
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
#include "nouveau_connector.h"
+#include "nouveau_software.h"
#include "nouveau_gpio.h"
+#include "nouveau_fence.h"
#include "nv50_display.h"
static void
@@ -300,7 +302,7 @@ nouveau_display_create(struct drm_device *dev)
disp->color_vibrance_property->values[1] = 200; /* -100..+100 */
}
- dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+ dev->mode_config.funcs = &nouveau_mode_config_funcs;
dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
dev->mode_config.min_width = 0;
@@ -325,14 +327,21 @@ nouveau_display_create(struct drm_device *dev)
ret = disp->create(dev);
if (ret)
- return ret;
+ goto disp_create_err;
if (dev->mode_config.num_crtc) {
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret)
- return ret;
+ goto vblank_err;
}
+ return 0;
+
+vblank_err:
+ disp->destroy(dev);
+disp_create_err:
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
return ret;
}
@@ -425,6 +434,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_page_flip_state *s,
struct nouveau_fence **pfence)
{
+ struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
unsigned long flags;
@@ -432,7 +442,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
/* Queue it to the pending list */
spin_lock_irqsave(&dev->event_lock, flags);
- list_add_tail(&s->head, &chan->nvsw.flip);
+ list_add_tail(&s->head, &swch->flip);
spin_unlock_irqrestore(&dev->event_lock, flags);
/* Synchronize with the old framebuffer */
@@ -446,17 +456,17 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
goto fail;
if (dev_priv->card_type < NV_C0) {
- BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+ BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
OUT_RING (chan, 0x00000000);
OUT_RING (chan, 0x00000000);
} else {
- BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1);
- OUT_RING (chan, ++chan->fence.sequence);
- BEGIN_NVC0(chan, 8, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
+ BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
+ OUT_RING (chan, 0);
+ BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
}
FIRE_RING (chan);
- ret = nouveau_fence_new(chan, pfence, true);
+ ret = nouveau_fence_new(chan, pfence);
if (ret)
goto fail;
@@ -477,7 +487,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
struct nouveau_page_flip_state *s;
- struct nouveau_channel *chan;
+ struct nouveau_channel *chan = NULL;
struct nouveau_fence *fence;
int ret;
@@ -500,7 +510,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
new_bo->bo.offset };
/* Choose the channel the flip will be handled in */
- chan = nouveau_fence_channel(new_bo->bo.sync_obj);
+ fence = new_bo->bo.sync_obj;
+ if (fence)
+ chan = nouveau_channel_get_unlocked(fence->channel);
if (!chan)
chan = nouveau_channel_get_unlocked(dev_priv->channel);
mutex_lock(&chan->mutex);
@@ -540,20 +552,20 @@ int
nouveau_finish_page_flip(struct nouveau_channel *chan,
struct nouveau_page_flip_state *ps)
{
+ struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
struct drm_device *dev = chan->dev;
struct nouveau_page_flip_state *s;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- if (list_empty(&chan->nvsw.flip)) {
+ if (list_empty(&swch->flip)) {
NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
spin_unlock_irqrestore(&dev->event_lock, flags);
return -EINVAL;
}
- s = list_first_entry(&chan->nvsw.flip,
- struct nouveau_page_flip_state, head);
+ s = list_first_entry(&swch->flip, struct nouveau_page_flip_state, head);
if (s->event) {
struct drm_pending_vblank_event *e = s->event;
struct timeval now;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 23d4edf992b7..8db68be9544f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -48,12 +48,12 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
/* Hardcoded object assignments to subchannels (subchannel id). */
enum {
- NvSubM2MF = 0,
+ NvSubCtxSurf2D = 0,
NvSubSw = 1,
- NvSub2D = 2,
- NvSubCtxSurf2D = 2,
+ NvSubImageBlit = 2,
+ NvSub2D = 3,
NvSubGdiRect = 3,
- NvSubImageBlit = 4
+ NvSubCopy = 4,
};
/* Object handles. */
@@ -73,6 +73,7 @@ enum {
NvSema = 0x8000000f,
NvEvoSema0 = 0x80000010,
NvEvoSema1 = 0x80000011,
+ NvNotify1 = 0x80000012,
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
@@ -127,15 +128,33 @@ extern void
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
static inline void
-BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size)
+BEGIN_NV04(struct nouveau_channel *chan, int subc, int mthd, int size)
{
- OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2));
+ OUT_RING(chan, 0x00000000 | (subc << 13) | (size << 18) | mthd);
}
static inline void
-BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
+BEGIN_NI04(struct nouveau_channel *chan, int subc, int mthd, int size)
{
- OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
+ OUT_RING(chan, 0x40000000 | (subc << 13) | (size << 18) | mthd);
+}
+
+static inline void
+BEGIN_NVC0(struct nouveau_channel *chan, int subc, int mthd, int size)
+{
+ OUT_RING(chan, 0x20000000 | (size << 16) | (subc << 13) | (mthd >> 2));
+}
+
+static inline void
+BEGIN_NIC0(struct nouveau_channel *chan, int subc, int mthd, int size)
+{
+ OUT_RING(chan, 0x60000000 | (size << 16) | (subc << 13) | (mthd >> 2));
+}
+
+static inline void
+BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
+{
+ OUT_RING(chan, 0x80000000 | (data << 16) | (subc << 13) | (mthd >> 2));
}
#define WRITE_PUT(val) do { \
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index d996134b1b28..7e289d2ad8e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -510,6 +510,25 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
nouveau_dp_link_train(encoder, datarate, func);
}
+static void
+nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_chan *auxch,
+ u8 *dpcd)
+{
+ u8 buf[3];
+
+ if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ if (!auxch_tx(dev, auxch->drive, 9, DP_SINK_OUI, buf, 3))
+ NV_DEBUG_KMS(dev, "Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (!auxch_tx(dev, auxch->drive, 9, DP_BRANCH_OUI, buf, 3))
+ NV_DEBUG_KMS(dev, "Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+}
+
bool
nouveau_dp_detect(struct drm_encoder *encoder)
{
@@ -544,6 +563,8 @@ nouveau_dp_detect(struct drm_encoder *encoder)
NV_DEBUG_KMS(dev, "maximum: %dx%d\n",
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
+ nouveau_dp_probe_oui(dev, auxch, dpcd);
+
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 4f2030bd5676..cad254c8e387 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -33,6 +33,7 @@
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
#include "nouveau_pm.h"
+#include "nouveau_fifo.h"
#include "nv50_display.h"
#include "drm_pciids.h"
@@ -175,7 +176,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_channel *chan;
struct drm_crtc *crtc;
int ret, i, e;
@@ -214,17 +215,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
NV_INFO(dev, "Idling channels...\n");
- for (i = 0; i < pfifo->channels; i++) {
+ for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
chan = dev_priv->channels.ptr[i];
if (chan && chan->pushbuf_bo)
nouveau_channel_idle(chan);
}
- pfifo->reassign(dev, false);
- pfifo->disable(dev);
- pfifo->unload_context(dev);
-
for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
if (!dev_priv->eng[e])
continue;
@@ -265,8 +262,6 @@ out_abort:
if (dev_priv->eng[e])
dev_priv->eng[e]->init(dev, e);
}
- pfifo->enable(dev);
- pfifo->reassign(dev, true);
return ret;
}
@@ -274,6 +269,7 @@ int
nouveau_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
struct drm_crtc *crtc;
@@ -321,7 +317,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
if (dev_priv->eng[i])
dev_priv->eng[i]->init(dev, i);
}
- engine->fifo.init(dev);
nouveau_irq_postinstall(dev);
@@ -330,7 +325,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
struct nouveau_channel *chan;
int j;
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
chan = dev_priv->channels.ptr[i];
if (!chan || !chan->pushbuf_bo)
continue;
@@ -408,7 +403,7 @@ static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
- DRIVER_MODESET,
+ DRIVER_MODESET | DRIVER_PRIME,
.load = nouveau_load,
.firstopen = nouveau_firstopen,
.lastclose = nouveau_lastclose,
@@ -430,6 +425,12 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = nouveau_ioctls,
.fops = &nouveau_driver_fops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = nouveau_gem_prime_export,
+ .gem_prime_import = nouveau_gem_prime_import,
+
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
.gem_open_object = nouveau_gem_object_open,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 3aef353a926c..634d222c93de 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -70,7 +70,7 @@ struct nouveau_mem;
#define MAX_NUM_DCB_ENTRIES 16
-#define NOUVEAU_MAX_CHANNEL_NR 128
+#define NOUVEAU_MAX_CHANNEL_NR 4096
#define NOUVEAU_MAX_TILE_NR 15
struct nouveau_mem {
@@ -86,6 +86,7 @@ struct nouveau_mem {
u32 memtype;
u64 offset;
u64 size;
+ struct sg_table *sg;
};
struct nouveau_tile_reg {
@@ -164,8 +165,10 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
#define NVOBJ_ENGINE_BSP 6
#define NVOBJ_ENGINE_VP 7
-#define NVOBJ_ENGINE_DISPLAY 15
+#define NVOBJ_ENGINE_FIFO 14
+#define NVOBJ_ENGINE_FENCE 15
#define NVOBJ_ENGINE_NR 16
+#define NVOBJ_ENGINE_DISPLAY (NVOBJ_ENGINE_NR + 0) /*XXX*/
#define NVOBJ_FLAG_DONT_MAP (1 << 0)
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
@@ -233,17 +236,6 @@ struct nouveau_channel {
uint32_t user_get_hi;
uint32_t user_put;
- /* Fencing */
- struct {
- /* lock protects the pending list only */
- spinlock_t lock;
- struct list_head pending;
- uint32_t sequence;
- uint32_t sequence_ack;
- atomic_t last_sequence_irq;
- struct nouveau_vma vma;
- } fence;
-
/* DMA push buffer */
struct nouveau_gpuobj *pushbuf;
struct nouveau_bo *pushbuf_bo;
@@ -257,8 +249,6 @@ struct nouveau_channel {
/* PFIFO context */
struct nouveau_gpuobj *ramfc;
- struct nouveau_gpuobj *cache;
- void *fifo_priv;
/* Execution engine contexts */
void *engctx[NVOBJ_ENGINE_NR];
@@ -292,18 +282,6 @@ struct nouveau_channel {
int ib_put;
} dma;
- uint32_t sw_subchannel[8];
-
- struct nouveau_vma dispc_vma[4];
- struct {
- struct nouveau_gpuobj *vblsem;
- uint32_t vblsem_head;
- uint32_t vblsem_offset;
- uint32_t vblsem_rval;
- struct list_head vbl_wait;
- struct list_head flip;
- } nvsw;
-
struct {
bool active;
char name[32];
@@ -366,30 +344,6 @@ struct nouveau_fb_engine {
void (*free_tile_region)(struct drm_device *dev, int i);
};
-struct nouveau_fifo_engine {
- void *priv;
- int channels;
-
- struct nouveau_gpuobj *playlist[2];
- int cur_playlist;
-
- int (*init)(struct drm_device *);
- void (*takedown)(struct drm_device *);
-
- void (*disable)(struct drm_device *);
- void (*enable)(struct drm_device *);
- bool (*reassign)(struct drm_device *, bool enable);
- bool (*cache_pull)(struct drm_device *dev, bool enable);
-
- int (*channel_id)(struct drm_device *);
-
- int (*create_context)(struct nouveau_channel *);
- void (*destroy_context)(struct nouveau_channel *);
- int (*load_context)(struct nouveau_channel *);
- int (*unload_context)(struct drm_device *);
- void (*tlb_flush)(struct drm_device *dev);
-};
-
struct nouveau_display_engine {
void *priv;
int (*early_init)(struct drm_device *);
@@ -597,7 +551,6 @@ struct nouveau_engine {
struct nouveau_mc_engine mc;
struct nouveau_timer_engine timer;
struct nouveau_fb_engine fb;
- struct nouveau_fifo_engine fifo;
struct nouveau_display_engine display;
struct nouveau_gpio_engine gpio;
struct nouveau_pm_engine pm;
@@ -740,6 +693,9 @@ struct drm_nouveau_private {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
atomic_t validate_sequence;
+ int (*move)(struct nouveau_channel *,
+ struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
} ttm;
struct {
@@ -977,7 +933,7 @@ extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
extern void nouveau_channel_put(struct nouveau_channel **);
extern void nouveau_channel_ref(struct nouveau_channel *chan,
struct nouveau_channel **pchan);
-extern void nouveau_channel_idle(struct nouveau_channel *chan);
+extern int nouveau_channel_idle(struct nouveau_channel *chan);
/* nouveau_object.c */
#define NVOBJ_ENGINE_ADD(d, e, p) do { \
@@ -1209,56 +1165,6 @@ extern void nv50_fb_vm_trap(struct drm_device *, int display);
extern int nvc0_fb_init(struct drm_device *);
extern void nvc0_fb_takedown(struct drm_device *);
-/* nv04_fifo.c */
-extern int nv04_fifo_init(struct drm_device *);
-extern void nv04_fifo_fini(struct drm_device *);
-extern void nv04_fifo_disable(struct drm_device *);
-extern void nv04_fifo_enable(struct drm_device *);
-extern bool nv04_fifo_reassign(struct drm_device *, bool);
-extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
-extern int nv04_fifo_channel_id(struct drm_device *);
-extern int nv04_fifo_create_context(struct nouveau_channel *);
-extern void nv04_fifo_destroy_context(struct nouveau_channel *);
-extern int nv04_fifo_load_context(struct nouveau_channel *);
-extern int nv04_fifo_unload_context(struct drm_device *);
-extern void nv04_fifo_isr(struct drm_device *);
-
-/* nv10_fifo.c */
-extern int nv10_fifo_init(struct drm_device *);
-extern int nv10_fifo_channel_id(struct drm_device *);
-extern int nv10_fifo_create_context(struct nouveau_channel *);
-extern int nv10_fifo_load_context(struct nouveau_channel *);
-extern int nv10_fifo_unload_context(struct drm_device *);
-
-/* nv40_fifo.c */
-extern int nv40_fifo_init(struct drm_device *);
-extern int nv40_fifo_create_context(struct nouveau_channel *);
-extern int nv40_fifo_load_context(struct nouveau_channel *);
-extern int nv40_fifo_unload_context(struct drm_device *);
-
-/* nv50_fifo.c */
-extern int nv50_fifo_init(struct drm_device *);
-extern void nv50_fifo_takedown(struct drm_device *);
-extern int nv50_fifo_channel_id(struct drm_device *);
-extern int nv50_fifo_create_context(struct nouveau_channel *);
-extern void nv50_fifo_destroy_context(struct nouveau_channel *);
-extern int nv50_fifo_load_context(struct nouveau_channel *);
-extern int nv50_fifo_unload_context(struct drm_device *);
-extern void nv50_fifo_tlb_flush(struct drm_device *dev);
-
-/* nvc0_fifo.c */
-extern int nvc0_fifo_init(struct drm_device *);
-extern void nvc0_fifo_takedown(struct drm_device *);
-extern void nvc0_fifo_disable(struct drm_device *);
-extern void nvc0_fifo_enable(struct drm_device *);
-extern bool nvc0_fifo_reassign(struct drm_device *, bool);
-extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
-extern int nvc0_fifo_channel_id(struct drm_device *);
-extern int nvc0_fifo_create_context(struct nouveau_channel *);
-extern void nvc0_fifo_destroy_context(struct nouveau_channel *);
-extern int nvc0_fifo_load_context(struct nouveau_channel *);
-extern int nvc0_fifo_unload_context(struct drm_device *);
-
/* nv04_graph.c */
extern int nv04_graph_create(struct drm_device *);
extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
@@ -1277,18 +1183,23 @@ extern int nv20_graph_create(struct drm_device *);
/* nv40_graph.c */
extern int nv40_graph_create(struct drm_device *);
-extern void nv40_grctx_init(struct nouveau_grctx *);
+extern void nv40_grctx_init(struct drm_device *, u32 *size);
+extern void nv40_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
/* nv50_graph.c */
extern int nv50_graph_create(struct drm_device *);
-extern int nv50_grctx_init(struct nouveau_grctx *);
extern struct nouveau_enum nv50_data_error_names[];
extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst);
+extern int nv50_grctx_init(struct drm_device *, u32 *, u32, u32 *, u32 *);
+extern void nv50_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
/* nvc0_graph.c */
extern int nvc0_graph_create(struct drm_device *);
extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
+/* nve0_graph.c */
+extern int nve0_graph_create(struct drm_device *);
+
/* nv84_crypt.c */
extern int nv84_crypt_create(struct drm_device *);
@@ -1414,9 +1325,12 @@ extern int nv04_crtc_create(struct drm_device *, int index);
/* nouveau_bo.c */
extern struct ttm_bo_driver nouveau_bo_driver;
+extern void nouveau_bo_move_init(struct nouveau_channel *);
extern int nouveau_bo_new(struct drm_device *, int size, int align,
uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, struct nouveau_bo **);
+ uint32_t tile_flags,
+ struct sg_table *sg,
+ struct nouveau_bo **);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1437,50 +1351,6 @@ extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
struct nouveau_vma *);
extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
-/* nouveau_fence.c */
-struct nouveau_fence;
-extern int nouveau_fence_init(struct drm_device *);
-extern void nouveau_fence_fini(struct drm_device *);
-extern int nouveau_fence_channel_init(struct nouveau_channel *);
-extern void nouveau_fence_channel_fini(struct nouveau_channel *);
-extern void nouveau_fence_update(struct nouveau_channel *);
-extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
- bool emit);
-extern int nouveau_fence_emit(struct nouveau_fence *);
-extern void nouveau_fence_work(struct nouveau_fence *fence,
- void (*work)(void *priv, bool signalled),
- void *priv);
-struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
-
-extern bool __nouveau_fence_signalled(void *obj, void *arg);
-extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
-extern int __nouveau_fence_flush(void *obj, void *arg);
-extern void __nouveau_fence_unref(void **obj);
-extern void *__nouveau_fence_ref(void *obj);
-
-static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
-{
- return __nouveau_fence_signalled(obj, NULL);
-}
-static inline int
-nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
-{
- return __nouveau_fence_wait(obj, NULL, lazy, intr);
-}
-extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
-static inline int nouveau_fence_flush(struct nouveau_fence *obj)
-{
- return __nouveau_fence_flush(obj, NULL);
-}
-static inline void nouveau_fence_unref(struct nouveau_fence **obj)
-{
- __nouveau_fence_unref((void **)obj);
-}
-static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
-{
- return __nouveau_fence_ref(obj);
-}
-
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, int size, int align,
uint32_t domain, uint32_t tile_mode,
@@ -1501,6 +1371,11 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
+extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
/* nouveau_display.c */
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
@@ -1772,6 +1647,7 @@ nv44_graph_class(struct drm_device *dev)
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
+#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
#define NV10_SUBCHAN_REF_CNT 0x00000050
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8113e9201ed9..153b9a15469b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -153,7 +153,7 @@ nouveau_fbcon_sync(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- int ret, i;
+ int ret;
if (!chan || !chan->accel_done || in_interrupt() ||
info->state != FBINFO_STATE_RUNNING ||
@@ -163,38 +163,8 @@ nouveau_fbcon_sync(struct fb_info *info)
if (!mutex_trylock(&chan->mutex))
return 0;
- ret = RING_SPACE(chan, 4);
- if (ret) {
- mutex_unlock(&chan->mutex);
- nouveau_fbcon_gpu_lockup(info);
- return 0;
- }
-
- if (dev_priv->card_type >= NV_C0) {
- BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
- OUT_RING (chan, 0);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
- OUT_RING (chan, 0);
- } else {
- BEGIN_RING(chan, 0, 0x0104, 1);
- OUT_RING (chan, 0);
- BEGIN_RING(chan, 0, 0x0100, 1);
- OUT_RING (chan, 0);
- }
-
- nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
- FIRE_RING(chan);
+ ret = nouveau_channel_idle(chan);
mutex_unlock(&chan->mutex);
-
- ret = -EBUSY;
- for (i = 0; i < 100000; i++) {
- if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
- ret = 0;
- break;
- }
- DRM_UDELAY(1);
- }
-
if (ret) {
nouveau_fbcon_gpu_lockup(info);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index c1dc20f6cb85..3c180493dab8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -32,220 +32,100 @@
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+#include "nouveau_software.h"
#include "nouveau_dma.h"
-#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
-#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
-
-struct nouveau_fence {
- struct nouveau_channel *channel;
- struct kref refcount;
- struct list_head entry;
-
- uint32_t sequence;
- bool signalled;
-
- void (*work)(void *priv, bool signalled);
- void *priv;
-};
-
-struct nouveau_semaphore {
- struct kref ref;
- struct drm_device *dev;
- struct drm_mm_node *mem;
-};
-
-static inline struct nouveau_fence *
-nouveau_fence(void *sync_obj)
+void
+nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
- return (struct nouveau_fence *)sync_obj;
+ struct nouveau_fence *fence, *fnext;
+ spin_lock(&fctx->lock);
+ list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
+ if (fence->work)
+ fence->work(fence->priv, false);
+ fence->channel = NULL;
+ list_del(&fence->head);
+ nouveau_fence_unref(&fence);
+ }
+ spin_unlock(&fctx->lock);
}
-static void
-nouveau_fence_del(struct kref *ref)
+void
+nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
{
- struct nouveau_fence *fence =
- container_of(ref, struct nouveau_fence, refcount);
-
- nouveau_channel_ref(NULL, &fence->channel);
- kfree(fence);
+ INIT_LIST_HEAD(&fctx->pending);
+ spin_lock_init(&fctx->lock);
}
void
nouveau_fence_update(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
- struct nouveau_fence *tmp, *fence;
- uint32_t sequence;
+ struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
+ struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ struct nouveau_fence *fence, *fnext;
- spin_lock(&chan->fence.lock);
-
- /* Fetch the last sequence if the channel is still up and running */
- if (likely(!list_empty(&chan->fence.pending))) {
- if (USE_REFCNT(dev))
- sequence = nvchan_rd32(chan, 0x48);
- else
- sequence = atomic_read(&chan->fence.last_sequence_irq);
-
- if (chan->fence.sequence_ack == sequence)
- goto out;
- chan->fence.sequence_ack = sequence;
- }
-
- list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
- if (fence->sequence > chan->fence.sequence_ack)
+ spin_lock(&fctx->lock);
+ list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
+ if (priv->read(chan) < fence->sequence)
break;
- fence->signalled = true;
- list_del(&fence->entry);
if (fence->work)
fence->work(fence->priv, true);
-
- kref_put(&fence->refcount, nouveau_fence_del);
- }
-
-out:
- spin_unlock(&chan->fence.lock);
-}
-
-int
-nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
- bool emit)
-{
- struct nouveau_fence *fence;
- int ret = 0;
-
- fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (!fence)
- return -ENOMEM;
- kref_init(&fence->refcount);
- nouveau_channel_ref(chan, &fence->channel);
-
- if (emit)
- ret = nouveau_fence_emit(fence);
-
- if (ret)
+ fence->channel = NULL;
+ list_del(&fence->head);
nouveau_fence_unref(&fence);
- *pfence = fence;
- return ret;
-}
-
-struct nouveau_channel *
-nouveau_fence_channel(struct nouveau_fence *fence)
-{
- return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
+ }
+ spin_unlock(&fctx->lock);
}
int
-nouveau_fence_emit(struct nouveau_fence *fence)
+nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct nouveau_channel *chan = fence->channel;
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
+ struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
int ret;
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
- if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
- nouveau_fence_update(chan);
+ fence->channel = chan;
+ fence->timeout = jiffies + (3 * DRM_HZ);
+ fence->sequence = ++fctx->sequence;
- BUG_ON(chan->fence.sequence ==
- chan->fence.sequence_ack - 1);
+ ret = priv->emit(fence);
+ if (!ret) {
+ kref_get(&fence->kref);
+ spin_lock(&fctx->lock);
+ list_add_tail(&fence->head, &fctx->pending);
+ spin_unlock(&fctx->lock);
}
- fence->sequence = ++chan->fence.sequence;
-
- kref_get(&fence->refcount);
- spin_lock(&chan->fence.lock);
- list_add_tail(&fence->entry, &chan->fence.pending);
- spin_unlock(&chan->fence.lock);
-
- if (USE_REFCNT(dev)) {
- if (dev_priv->card_type < NV_C0)
- BEGIN_RING(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
- else
- BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1);
- } else {
- BEGIN_RING(chan, NvSubSw, 0x0150, 1);
- }
- OUT_RING (chan, fence->sequence);
- FIRE_RING(chan);
-
- return 0;
-}
-
-void
-nouveau_fence_work(struct nouveau_fence *fence,
- void (*work)(void *priv, bool signalled),
- void *priv)
-{
- BUG_ON(fence->work);
-
- spin_lock(&fence->channel->fence.lock);
-
- if (fence->signalled) {
- work(priv, true);
- } else {
- fence->work = work;
- fence->priv = priv;
- }
-
- spin_unlock(&fence->channel->fence.lock);
-}
-
-void
-__nouveau_fence_unref(void **sync_obj)
-{
- struct nouveau_fence *fence = nouveau_fence(*sync_obj);
-
- if (fence)
- kref_put(&fence->refcount, nouveau_fence_del);
- *sync_obj = NULL;
-}
-
-void *
-__nouveau_fence_ref(void *sync_obj)
-{
- struct nouveau_fence *fence = nouveau_fence(sync_obj);
-
- kref_get(&fence->refcount);
- return sync_obj;
+ return ret;
}
bool
-__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
+nouveau_fence_done(struct nouveau_fence *fence)
{
- struct nouveau_fence *fence = nouveau_fence(sync_obj);
- struct nouveau_channel *chan = fence->channel;
-
- if (fence->signalled)
- return true;
-
- nouveau_fence_update(chan);
- return fence->signalled;
+ if (fence->channel)
+ nouveau_fence_update(fence->channel);
+ return !fence->channel;
}
int
-__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
{
- unsigned long timeout = jiffies + (3 * DRM_HZ);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
ktime_t t;
int ret = 0;
- while (1) {
- if (__nouveau_fence_signalled(sync_obj, sync_arg))
- break;
-
- if (time_after_eq(jiffies, timeout)) {
+ while (!nouveau_fence_done(fence)) {
+ if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
ret = -EBUSY;
break;
}
- __set_current_state(intr ? TASK_INTERRUPTIBLE
- : TASK_UNINTERRUPTIBLE);
+ __set_current_state(intr ? TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
if (lazy) {
t = ktime_set(0, sleep_time);
schedule_hrtimeout(&t, HRTIMER_MODE_REL);
@@ -261,354 +141,72 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
}
__set_current_state(TASK_RUNNING);
-
return ret;
}
-static struct nouveau_semaphore *
-semaphore_alloc(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_semaphore *sema;
- int size = (dev_priv->chipset < 0x84) ? 4 : 16;
- int ret, i;
-
- if (!USE_SEMA(dev))
- return NULL;
-
- sema = kmalloc(sizeof(*sema), GFP_KERNEL);
- if (!sema)
- goto fail;
-
- ret = drm_mm_pre_get(&dev_priv->fence.heap);
- if (ret)
- goto fail;
-
- spin_lock(&dev_priv->fence.lock);
- sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
- if (sema->mem)
- sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
- spin_unlock(&dev_priv->fence.lock);
-
- if (!sema->mem)
- goto fail;
-
- kref_init(&sema->ref);
- sema->dev = dev;
- for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
- nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
-
- return sema;
-fail:
- kfree(sema);
- return NULL;
-}
-
-static void
-semaphore_free(struct kref *ref)
-{
- struct nouveau_semaphore *sema =
- container_of(ref, struct nouveau_semaphore, ref);
- struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
-
- spin_lock(&dev_priv->fence.lock);
- drm_mm_put_block(sema->mem);
- spin_unlock(&dev_priv->fence.lock);
-
- kfree(sema);
-}
-
-static void
-semaphore_work(void *priv, bool signalled)
-{
- struct nouveau_semaphore *sema = priv;
- struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
-
- if (unlikely(!signalled))
- nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
-
- kref_put(&sema->ref, semaphore_free);
-}
-
-static int
-semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_fence *fence = NULL;
- u64 offset = chan->fence.vma.offset + sema->mem->start;
- int ret;
-
- if (dev_priv->chipset < 0x84) {
- ret = RING_SPACE(chan, 4);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
- OUT_RING (chan, NvSema);
- OUT_RING (chan, offset);
- OUT_RING (chan, 1);
- } else
- if (dev_priv->chipset < 0xc0) {
- ret = RING_SPACE(chan, 7);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram_handle);
- BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 1);
- OUT_RING (chan, 1); /* ACQUIRE_EQ */
- } else {
- ret = RING_SPACE(chan, 5);
- if (ret)
- return ret;
-
- BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 1);
- OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
- }
-
- /* Delay semaphore destruction until its work is done */
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret)
- return ret;
-
- kref_get(&sema->ref);
- nouveau_fence_work(fence, semaphore_work, sema);
- nouveau_fence_unref(&fence);
- return 0;
-}
-
-static int
-semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_fence *fence = NULL;
- u64 offset = chan->fence.vma.offset + sema->mem->start;
- int ret;
-
- if (dev_priv->chipset < 0x84) {
- ret = RING_SPACE(chan, 5);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
- OUT_RING (chan, NvSema);
- OUT_RING (chan, offset);
- BEGIN_RING(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
- OUT_RING (chan, 1);
- } else
- if (dev_priv->chipset < 0xc0) {
- ret = RING_SPACE(chan, 7);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram_handle);
- BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 1);
- OUT_RING (chan, 2); /* RELEASE */
- } else {
- ret = RING_SPACE(chan, 5);
- if (ret)
- return ret;
-
- BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 1);
- OUT_RING (chan, 0x1002); /* RELEASE */
- }
-
- /* Delay semaphore destruction until its work is done */
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret)
- return ret;
-
- kref_get(&sema->ref);
- nouveau_fence_work(fence, semaphore_work, sema);
- nouveau_fence_unref(&fence);
- return 0;
-}
-
int
-nouveau_fence_sync(struct nouveau_fence *fence,
- struct nouveau_channel *wchan)
+nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct nouveau_channel *chan = nouveau_fence_channel(fence);
- struct drm_device *dev = wchan->dev;
- struct nouveau_semaphore *sema;
+ struct drm_device *dev = chan->dev;
+ struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
+ struct nouveau_channel *prev;
int ret = 0;
- if (likely(!chan || chan == wchan ||
- nouveau_fence_signalled(fence)))
- goto out;
-
- sema = semaphore_alloc(dev);
- if (!sema) {
- /* Early card or broken userspace, fall back to
- * software sync. */
- ret = nouveau_fence_wait(fence, true, false);
- goto out;
- }
-
- /* try to take chan's mutex, if we can't take it right away
- * we have to fallback to software sync to prevent locking
- * order issues
- */
- if (!mutex_trylock(&chan->mutex)) {
- ret = nouveau_fence_wait(fence, true, false);
- goto out_unref;
+ prev = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
+ if (prev) {
+ if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
+ ret = priv->sync(fence, prev, chan);
+ if (unlikely(ret))
+ ret = nouveau_fence_wait(fence, true, false);
+ }
+ nouveau_channel_put_unlocked(&prev);
}
- /* Make wchan wait until it gets signalled */
- ret = semaphore_acquire(wchan, sema);
- if (ret)
- goto out_unlock;
-
- /* Signal the semaphore from chan */
- ret = semaphore_release(chan, sema);
-
-out_unlock:
- mutex_unlock(&chan->mutex);
-out_unref:
- kref_put(&sema->ref, semaphore_free);
-out:
- if (chan)
- nouveau_channel_put_unlocked(&chan);
return ret;
}
-int
-__nouveau_fence_flush(void *sync_obj, void *sync_arg)
+static void
+nouveau_fence_del(struct kref *kref)
{
- return 0;
+ struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref);
+ kfree(fence);
}
-int
-nouveau_fence_channel_init(struct nouveau_channel *chan)
+void
+nouveau_fence_unref(struct nouveau_fence **pfence)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- if (dev_priv->card_type < NV_C0) {
- /* Create an NV_SW object for various sync purposes */
- ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
- if (ret)
- return ret;
-
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
- OUT_RING (chan, NvSw);
- FIRE_RING (chan);
- }
-
- /* Setup area of memory shared between all channels for x-chan sync */
- if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
- struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
-
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
- mem->start << PAGE_SHIFT,
- mem->size, NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VRAM, &obj);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_insert(chan, NvSema, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret)
- return ret;
- } else
- if (USE_SEMA(dev)) {
- /* map fence bo into channel's vm */
- ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
- &chan->fence.vma);
- if (ret)
- return ret;
- }
-
- atomic_set(&chan->fence.last_sequence_irq, 0);
- return 0;
+ if (*pfence)
+ kref_put(&(*pfence)->kref, nouveau_fence_del);
+ *pfence = NULL;
}
-void
-nouveau_fence_channel_fini(struct nouveau_channel *chan)
+struct nouveau_fence *
+nouveau_fence_ref(struct nouveau_fence *fence)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_fence *tmp, *fence;
-
- spin_lock(&chan->fence.lock);
- list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
- fence->signalled = true;
- list_del(&fence->entry);
-
- if (unlikely(fence->work))
- fence->work(fence->priv, false);
-
- kref_put(&fence->refcount, nouveau_fence_del);
- }
- spin_unlock(&chan->fence.lock);
-
- nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
+ kref_get(&fence->kref);
+ return fence;
}
int
-nouveau_fence_init(struct drm_device *dev)
+nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
- int ret;
-
- /* Create a shared VRAM heap for cross-channel sync. */
- if (USE_SEMA(dev)) {
- ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
- 0, 0, &dev_priv->fence.bo);
- if (ret)
- return ret;
+ struct nouveau_fence *fence;
+ int ret = 0;
- ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
- if (ret)
- goto fail;
+ if (unlikely(!chan->engctx[NVOBJ_ENGINE_FENCE]))
+ return -ENODEV;
- ret = nouveau_bo_map(dev_priv->fence.bo);
- if (ret)
- goto fail;
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return -ENOMEM;
+ kref_init(&fence->kref);
- ret = drm_mm_init(&dev_priv->fence.heap, 0,
- dev_priv->fence.bo->bo.mem.size);
+ if (chan) {
+ ret = nouveau_fence_emit(fence, chan);
if (ret)
- goto fail;
-
- spin_lock_init(&dev_priv->fence.lock);
+ nouveau_fence_unref(&fence);
}
- return 0;
-fail:
- nouveau_bo_unmap(dev_priv->fence.bo);
- nouveau_bo_ref(NULL, &dev_priv->fence.bo);
+ *pfence = fence;
return ret;
}
-
-void
-nouveau_fence_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (USE_SEMA(dev)) {
- drm_mm_takedown(&dev_priv->fence.heap);
- nouveau_bo_unmap(dev_priv->fence.bo);
- nouveau_bo_unpin(dev_priv->fence.bo);
- nouveau_bo_ref(NULL, &dev_priv->fence.bo);
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
new file mode 100644
index 000000000000..82ba733393ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -0,0 +1,52 @@
+#ifndef __NOUVEAU_FENCE_H__
+#define __NOUVEAU_FENCE_H__
+
+struct nouveau_fence {
+ struct list_head head;
+ struct kref kref;
+
+ struct nouveau_channel *channel;
+ unsigned long timeout;
+ u32 sequence;
+
+ void (*work)(void *priv, bool signalled);
+ void *priv;
+};
+
+int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **);
+struct nouveau_fence *
+nouveau_fence_ref(struct nouveau_fence *);
+void nouveau_fence_unref(struct nouveau_fence **);
+
+int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
+bool nouveau_fence_done(struct nouveau_fence *);
+int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
+int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
+void nouveau_fence_idle(struct nouveau_channel *);
+void nouveau_fence_update(struct nouveau_channel *);
+
+struct nouveau_fence_chan {
+ struct list_head pending;
+ spinlock_t lock;
+ u32 sequence;
+};
+
+struct nouveau_fence_priv {
+ struct nouveau_exec_engine engine;
+ int (*emit)(struct nouveau_fence *);
+ int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
+ struct nouveau_channel *);
+ u32 (*read)(struct nouveau_channel *);
+};
+
+void nouveau_fence_context_new(struct nouveau_fence_chan *);
+void nouveau_fence_context_del(struct nouveau_fence_chan *);
+
+int nv04_fence_create(struct drm_device *dev);
+int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
+
+int nv10_fence_create(struct drm_device *dev);
+int nv84_fence_create(struct drm_device *dev);
+int nvc0_fence_create(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fifo.h b/drivers/gpu/drm/nouveau/nouveau_fifo.h
new file mode 100644
index 000000000000..ce99cab2f257
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fifo.h
@@ -0,0 +1,32 @@
+#ifndef __NOUVEAU_FIFO_H__
+#define __NOUVEAU_FIFO_H__
+
+struct nouveau_fifo_priv {
+ struct nouveau_exec_engine base;
+ u32 channels;
+};
+
+struct nouveau_fifo_chan {
+};
+
+bool nv04_fifo_cache_pull(struct drm_device *, bool);
+void nv04_fifo_context_del(struct nouveau_channel *, int);
+int nv04_fifo_fini(struct drm_device *, int, bool);
+int nv04_fifo_init(struct drm_device *, int);
+void nv04_fifo_isr(struct drm_device *);
+void nv04_fifo_destroy(struct drm_device *, int);
+
+void nv50_fifo_playlist_update(struct drm_device *);
+void nv50_fifo_destroy(struct drm_device *, int);
+void nv50_fifo_tlb_flush(struct drm_device *, int);
+
+int nv04_fifo_create(struct drm_device *);
+int nv10_fifo_create(struct drm_device *);
+int nv17_fifo_create(struct drm_device *);
+int nv40_fifo_create(struct drm_device *);
+int nv50_fifo_create(struct drm_device *);
+int nv84_fifo_create(struct drm_device *);
+int nvc0_fifo_create(struct drm_device *);
+int nve0_fifo_create(struct drm_device *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index ed52a6f41613..30f542316944 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -23,12 +23,14 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <linux/dma-buf.h>
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_dma.h"
+#include "nouveau_fence.h"
#define nouveau_gem_pushbuf_sync(chan) 0
@@ -53,6 +55,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
nouveau_bo_unpin(nvbo);
}
+ if (gem->import_attach)
+ drm_prime_gem_destroy(gem, nvbo->bo.sg);
+
ttm_bo_unref(&bo);
drm_gem_object_release(gem);
@@ -139,7 +144,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
flags |= TTM_PL_FLAG_SYSTEM;
ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
- tile_flags, pnvbo);
+ tile_flags, NULL, pnvbo);
if (ret)
return ret;
nvbo = *pnvbo;
@@ -704,7 +709,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
if (chan->dma.ib_max) {
- ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
+ ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
NV_INFO(dev, "nv50cal_space: %d\n", ret);
goto out;
@@ -774,7 +779,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
}
- ret = nouveau_fence_new(chan, &fence, true);
+ ret = nouveau_fence_new(chan, &fence);
if (ret) {
NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
index a580cc62337a..82c19e82ff02 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.c
@@ -387,7 +387,7 @@ nouveau_gpio_reset(struct drm_device *dev)
if (dev_priv->card_type >= NV_D0) {
nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
if (unk1--)
- nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line);
+ nv_mask(dev, 0x00d740 + (unk1 * 4), 0xff, line);
} else
if (dev_priv->card_type >= NV_50) {
static const u32 regs[] = { 0xe100, 0xe28c };
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
index 86c2e374e938..b0795ececbda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -18,7 +18,6 @@ struct nouveau_grctx {
uint32_t ctxvals_base;
};
-#ifdef CP_CTX
static inline void
cp_out(struct nouveau_grctx *ctx, uint32_t inst)
{
@@ -88,10 +87,8 @@ _cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
(state ? 0 : CP_BRA_IF_CLEAR));
}
#define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
-#ifdef CP_BRA_MOD
#define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
-#endif
static inline void
_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
@@ -128,6 +125,5 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
nv_wo32(ctx->data, reg * 4, val);
}
-#endif
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index ba896e54b799..b87ad3bd7739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -1018,11 +1018,6 @@ nv_load_state_ext(struct drm_device *dev, int head,
}
NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
-
- /* Enable vblank interrupts. */
- NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
- (dev->vblank_enabled[head] ? 1 : 0));
- NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index b08065f981df..5b498ea32e14 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -39,6 +39,8 @@
#include "nouveau_pm.h"
#include "nouveau_mm.h"
#include "nouveau_vm.h"
+#include "nouveau_fifo.h"
+#include "nouveau_fence.h"
/*
* NV10-NV40 tiling helpers
@@ -50,7 +52,6 @@ nv10_mem_update_tile_region(struct drm_device *dev,
uint32_t size, uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
int i = tile - dev_priv->tile.reg, j;
unsigned long save;
@@ -64,8 +65,8 @@ nv10_mem_update_tile_region(struct drm_device *dev,
pfb->init_tile_region(dev, i, addr, size, pitch, flags);
spin_lock_irqsave(&dev_priv->context_switch_lock, save);
- pfifo->reassign(dev, false);
- pfifo->cache_pull(dev, false);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+ nv04_fifo_cache_pull(dev, false);
nouveau_wait_for_idle(dev);
@@ -75,8 +76,8 @@ nv10_mem_update_tile_region(struct drm_device *dev,
dev_priv->eng[j]->set_tile_region(dev, i);
}
- pfifo->cache_pull(dev, true);
- pfifo->reassign(dev, true);
+ nv04_fifo_cache_pull(dev, true);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
}
@@ -89,7 +90,7 @@ nv10_mem_get_tile_region(struct drm_device *dev, int i)
spin_lock(&dev_priv->tile.lock);
if (!tile->used &&
- (!tile->fence || nouveau_fence_signalled(tile->fence)))
+ (!tile->fence || nouveau_fence_done(tile->fence)))
tile->used = true;
else
tile = NULL;
@@ -416,7 +417,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (dev_priv->card_type < NV_50) {
ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
- 0, 0, &dev_priv->vga_ram);
+ 0, 0, NULL, &dev_priv->vga_ram);
if (ret == 0)
ret = nouveau_bo_pin(dev_priv->vga_ram,
TTM_PL_FLAG_VRAM);
@@ -843,6 +844,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
break;
case NV_C0:
+ case NV_D0:
ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
break;
default:
@@ -977,6 +979,8 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
break;
case NV_MEM_TYPE_DDR3:
tDLLK = 12000;
+ tCKSRE = 2000;
+ tXS = 1000;
mr1_dlloff = 0x00000001;
break;
case NV_MEM_TYPE_GDDR3:
@@ -1023,6 +1027,7 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
exec->refresh_self(exec, false);
exec->refresh_auto(exec, true);
exec->wait(exec, tXS);
+ exec->wait(exec, tXS);
/* update MRs */
if (mr[2] != info->mr[2]) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index cc419fae794b..b190cc01c820 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -34,9 +34,10 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
+#include "nouveau_software.h"
#include "nouveau_vm.h"
-#include "nv50_display.h"
struct nouveau_gpuobj_method {
struct list_head head;
@@ -120,12 +121,13 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
u32 class, u32 mthd, u32 data)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_channel *chan = NULL;
unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (chid >= 0 && chid < dev_priv->engine.fifo.channels)
+ if (chid >= 0 && chid < pfifo->channels)
chan = dev_priv->channels.ptr[chid];
if (chan)
ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
@@ -133,37 +135,6 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
return ret;
}
-/* NVidia uses context objects to drive drawing operations.
-
- Context objects can be selected into 8 subchannels in the FIFO,
- and then used via DMA command buffers.
-
- A context object is referenced by a user defined handle (CARD32). The HW
- looks up graphics objects in a hash table in the instance RAM.
-
- An entry in the hash table consists of 2 CARD32. The first CARD32 contains
- the handle, the second one a bitfield, that contains the address of the
- object in instance RAM.
-
- The format of the second CARD32 seems to be:
-
- NV4 to NV30:
-
- 15: 0 instance_addr >> 4
- 17:16 engine (here uses 1 = graphics)
- 28:24 channel id (here uses 0)
- 31 valid (use 1)
-
- NV40:
-
- 15: 0 instance_addr >> 4 (maybe 19-0)
- 21:20 engine (here uses 1 = graphics)
- I'm unsure about the other bits, but using 0 seems to work.
-
- The key into the hash table depends on the object handle and channel id and
- is given as:
-*/
-
int
nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
uint32_t size, int align, uint32_t flags,
@@ -267,7 +238,7 @@ nouveau_gpuobj_takedown(struct drm_device *dev)
kfree(oc);
}
- BUG_ON(!list_empty(&dev_priv->gpuobj_list));
+ WARN_ON(!list_empty(&dev_priv->gpuobj_list));
}
@@ -361,34 +332,6 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
return 0;
}
-/*
- DMA objects are used to reference a piece of memory in the
- framebuffer, PCI or AGP address space. Each object is 16 bytes big
- and looks as follows:
-
- entry[0]
- 11:0 class (seems like I can always use 0 here)
- 12 page table present?
- 13 page entry linear?
- 15:14 access: 0 rw, 1 ro, 2 wo
- 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
- 31:20 dma adjust (bits 0-11 of the address)
- entry[1]
- dma limit (size of transfer)
- entry[X]
- 1 0 readonly, 1 readwrite
- 31:12 dma frame address of the page (bits 12-31 of the address)
- entry[N]
- page table terminator, same value as the first pte, as does nvidia
- rivatv uses 0xffffffff
-
- Non linear page tables need a list of frame addresses afterwards,
- the rivatv project has some info on this.
-
- The method below creates a DMA object in instance RAM and returns a handle
- to it that can be used to set up context objects.
-*/
-
void
nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
u64 base, u64 size, int target, int access,
@@ -540,82 +483,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
return 0;
}
-/* Context objects in the instance RAM have the following structure.
- * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
-
- NV4 - NV30:
-
- entry[0]
- 11:0 class
- 12 chroma key enable
- 13 user clip enable
- 14 swizzle enable
- 17:15 patch config:
- scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
- 18 synchronize enable
- 19 endian: 1 big, 0 little
- 21:20 dither mode
- 23 single step enable
- 24 patch status: 0 invalid, 1 valid
- 25 context_surface 0: 1 valid
- 26 context surface 1: 1 valid
- 27 context pattern: 1 valid
- 28 context rop: 1 valid
- 29,30 context beta, beta4
- entry[1]
- 7:0 mono format
- 15:8 color format
- 31:16 notify instance address
- entry[2]
- 15:0 dma 0 instance address
- 31:16 dma 1 instance address
- entry[3]
- dma method traps
-
- NV40:
- No idea what the exact format is. Here's what can be deducted:
-
- entry[0]:
- 11:0 class (maybe uses more bits here?)
- 17 user clip enable
- 21:19 patch config
- 25 patch status valid ?
- entry[1]:
- 15:0 DMA notifier (maybe 20:0)
- entry[2]:
- 15:0 DMA 0 instance (maybe 20:0)
- 24 big endian
- entry[3]:
- 15:0 DMA 1 instance (maybe 20:0)
- entry[4]:
- entry[5]:
- set to 0?
-*/
-static int
-nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_gpuobj *gpuobj;
- int ret;
-
- gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
- if (!gpuobj)
- return -ENOMEM;
- gpuobj->dev = chan->dev;
- gpuobj->engine = NVOBJ_ENGINE_SW;
- gpuobj->class = class;
- kref_init(&gpuobj->refcount);
- gpuobj->cinst = 0x40;
-
- spin_lock(&dev_priv->ramin_lock);
- list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
- spin_unlock(&dev_priv->ramin_lock);
-
- ret = nouveau_ramht_insert(chan, handle, gpuobj);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return ret;
-}
-
int
nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
{
@@ -632,9 +499,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
if (oc->id != class)
continue;
- if (oc->engine == NVOBJ_ENGINE_SW)
- return nouveau_gpuobj_sw_new(chan, handle, class);
-
if (!chan->engctx[oc->engine]) {
ret = eng->context_new(chan, oc->engine);
if (ret)
@@ -644,7 +508,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
return eng->object_new(chan, oc->engine, handle, class);
}
- NV_ERROR(dev, "illegal object class: 0x%x\n", class);
return -EINVAL;
}
@@ -693,11 +556,10 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
static int
nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *pgd = NULL;
struct nouveau_vm_pgd *vpgd;
- int ret, i;
+ int ret;
ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
if (ret)
@@ -722,19 +584,6 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
nv_wo32(chan->ramin, 0x0208, 0xffffffff);
nv_wo32(chan->ramin, 0x020c, 0x000000ff);
- /* map display semaphore buffers into channel's vm */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo;
- if (dev_priv->card_type >= NV_D0)
- bo = nvd0_display_crtc_sema(dev, i);
- else
- bo = nv50_display(dev)->crtc[i].sem.bo;
-
- ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -747,7 +596,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
- int ret, i;
+ int ret;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
if (dev_priv->card_type >= NV_C0)
@@ -795,25 +644,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
nouveau_gpuobj_ref(NULL, &ramht);
if (ret)
return ret;
-
- /* dma objects for display sync channel semaphore blocks */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_gpuobj *sem = NULL;
- struct nv50_display_crtc *dispc =
- &nv50_display(dev)->crtc[i];
- u64 offset = dispc->sem.bo->bo.offset;
-
- ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
- NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VRAM, &sem);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
- nouveau_gpuobj_ref(NULL, &sem);
- if (ret)
- return ret;
- }
}
/* VRAM ctxdma */
@@ -873,25 +703,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
void
nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- if (dev_priv->card_type >= NV_D0) {
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
- }
- } else
- if (dev_priv->card_type >= NV_50) {
- struct nv50_display *disp = nv50_display(dev);
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nv50_display_crtc *dispc = &disp->crtc[i];
- nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
- }
- }
+ NV_DEBUG(chan->dev, "ch%d\n", chan->id);
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
@@ -956,6 +768,17 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
if (init->handle == ~0)
return -EINVAL;
+ /* compatibility with userspace that assumes 506e for all chipsets */
+ if (init->class == 0x506e) {
+ init->class = nouveau_software_class(dev);
+ if (init->class == 0x906e)
+ return 0;
+ } else
+ if (init->class == 0x906e) {
+ NV_ERROR(dev, "906e not supported yet\n");
+ return -EINVAL;
+ }
+
chan = nouveau_channel_get(file_priv, init->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 69a528d106e6..ea6acf1c4a78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -83,7 +83,7 @@ nouveau_perf_entry(struct drm_device *dev, int idx,
return NULL;
}
-static u8 *
+u8 *
nouveau_perf_rammap(struct drm_device *dev, u32 freq,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 3f82dfea61dd..07cac72c72b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -61,8 +61,10 @@ int nouveau_voltage_gpio_set(struct drm_device *, int voltage);
/* nouveau_perf.c */
void nouveau_perf_init(struct drm_device *);
void nouveau_perf_fini(struct drm_device *);
-u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
+u8 *nouveau_perf_rammap(struct drm_device *, u32 freq, u8 *ver,
+ u8 *hdr, u8 *cnt, u8 *len);
u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
+u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
/* nouveau_mem.c */
void nouveau_mem_timing_init(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
new file mode 100644
index 000000000000..c58aab7370c5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -0,0 +1,163 @@
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+
+#include <linux/dma-buf.h>
+
+static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct nouveau_bo *nvbo = attachment->dmabuf->priv;
+ struct drm_device *dev = nvbo->gem->dev;
+ int npages = nvbo->bo.num_pages;
+ struct sg_table *sg;
+ int nents;
+
+ mutex_lock(&dev->struct_mutex);
+ sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
+ nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ mutex_unlock(&dev->struct_mutex);
+ return sg;
+}
+
+static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct nouveau_bo *nvbo = dma_buf->priv;
+
+ if (nvbo->gem->export_dma_buf == dma_buf) {
+ nvbo->gem->export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(nvbo->gem);
+ }
+}
+
+static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static const struct dma_buf_ops nouveau_dmabuf_ops = {
+ .map_dma_buf = nouveau_gem_map_dma_buf,
+ .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
+ .release = nouveau_gem_dmabuf_release,
+ .kmap = nouveau_gem_kmap,
+ .kmap_atomic = nouveau_gem_kmap_atomic,
+ .kunmap = nouveau_gem_kunmap,
+ .kunmap_atomic = nouveau_gem_kunmap_atomic,
+};
+
+static int
+nouveau_prime_new(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg,
+ struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ u32 flags = 0;
+ int ret;
+
+ flags = TTM_PL_FLAG_TT;
+
+ ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
+ sg, pnvbo);
+ if (ret)
+ return ret;
+ nvbo = *pnvbo;
+
+ /* we restrict allowed domains on nv50+ to only the types
+ * that were requested at creation time. not possibly on
+ * earlier chips without busting the ABI.
+ */
+ nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
+ nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
+ if (!nvbo->gem) {
+ nouveau_bo_ref(NULL, pnvbo);
+ return -ENOMEM;
+ }
+
+ nvbo->gem->driver_private = nvbo;
+ return 0;
+}
+
+struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
+ int ret = 0;
+
+ /* pin buffer into GTT */
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
+}
+
+struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ if (dma_buf->ops == &nouveau_dmabuf_ops) {
+ nvbo = dma_buf->priv;
+ if (nvbo->gem) {
+ if (nvbo->gem->dev == dev) {
+ drm_gem_object_reference(nvbo->gem);
+ return nvbo->gem;
+ }
+ }
+ }
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_PTR(PTR_ERR(attach));
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
+ if (ret)
+ goto fail_unmap;
+
+ nvbo->gem->import_attach = attach;
+
+ return nvbo->gem;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 47f245edf538..38483a042bc2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -290,7 +290,10 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
struct nouveau_mem *node = mem->mm_node;
/* noop: bound in move_notify() */
- node->pages = nvbe->ttm.dma_address;
+ if (ttm->sg) {
+ node->sg = ttm->sg;
+ } else
+ node->pages = nvbe->ttm.dma_address;
return 0;
}
@@ -338,10 +341,10 @@ nouveau_sgdma_init(struct drm_device *dev)
u32 aper_size, align;
int ret;
- if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev))
+ if (dev_priv->card_type >= NV_40)
aper_size = 512 * 1024 * 1024;
else
- aper_size = 64 * 1024 * 1024;
+ aper_size = 128 * 1024 * 1024;
/* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
* christmas. The cards before it have them, the cards after
diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h
new file mode 100644
index 000000000000..e60bc6ce9003
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_software.h
@@ -0,0 +1,69 @@
+#ifndef __NOUVEAU_SOFTWARE_H__
+#define __NOUVEAU_SOFTWARE_H__
+
+struct nouveau_software_priv {
+ struct nouveau_exec_engine base;
+ struct list_head vblank;
+};
+
+struct nouveau_software_chan {
+ struct list_head flip;
+ struct {
+ struct list_head list;
+ struct nouveau_bo *bo;
+ u32 offset;
+ u32 value;
+ u32 head;
+ } vblank;
+};
+
+static inline void
+nouveau_software_vblank(struct drm_device *dev, int crtc)
+{
+ struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
+ struct nouveau_software_chan *pch, *tmp;
+
+ list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
+ if (pch->vblank.head != crtc)
+ continue;
+
+ nouveau_bo_wr32(pch->vblank.bo, pch->vblank.offset,
+ pch->vblank.value);
+ list_del(&pch->vblank.list);
+ drm_vblank_put(dev, crtc);
+ }
+}
+
+static inline void
+nouveau_software_context_new(struct nouveau_software_chan *pch)
+{
+ INIT_LIST_HEAD(&pch->flip);
+}
+
+static inline void
+nouveau_software_create(struct nouveau_software_priv *psw)
+{
+ INIT_LIST_HEAD(&psw->vblank);
+}
+
+static inline u16
+nouveau_software_class(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->card_type <= NV_04)
+ return 0x006e;
+ if (dev_priv->card_type <= NV_40)
+ return 0x016e;
+ if (dev_priv->card_type <= NV_50)
+ return 0x506e;
+ if (dev_priv->card_type <= NV_E0)
+ return 0x906e;
+ return 0x0000;
+}
+
+int nv04_software_create(struct drm_device *);
+int nv50_software_create(struct drm_device *);
+int nvc0_software_create(struct drm_device *);
+u64 nvc0_software_crtc(struct nouveau_channel *, int crtc);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index c2a8511e855a..19706f0532ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -39,6 +39,9 @@
#include "nouveau_gpio.h"
#include "nouveau_pm.h"
#include "nv50_display.h"
+#include "nouveau_fifo.h"
+#include "nouveau_fence.h"
+#include "nouveau_software.h"
static void nouveau_stub_takedown(struct drm_device *dev) {}
static int nouveau_stub_init(struct drm_device *dev) { return 0; }
@@ -66,18 +69,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv04_fb_init;
engine->fb.takedown = nv04_fb_takedown;
- engine->fifo.channels = 16;
- engine->fifo.init = nv04_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv04_fifo_channel_id;
- engine->fifo.create_context = nv04_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv04_fifo_load_context;
- engine->fifo.unload_context = nv04_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -111,18 +102,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fb.init_tile_region = nv10_fb_init_tile_region;
engine->fb.set_tile_region = nv10_fb_set_tile_region;
engine->fb.free_tile_region = nv10_fb_free_tile_region;
- engine->fifo.channels = 32;
- engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv10_fifo_channel_id;
- engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv10_fifo_load_context;
- engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -162,18 +141,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fb.init_tile_region = nv20_fb_init_tile_region;
engine->fb.set_tile_region = nv20_fb_set_tile_region;
engine->fb.free_tile_region = nv20_fb_free_tile_region;
- engine->fifo.channels = 32;
- engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv10_fifo_channel_id;
- engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv10_fifo_load_context;
- engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -209,18 +176,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fb.init_tile_region = nv30_fb_init_tile_region;
engine->fb.set_tile_region = nv10_fb_set_tile_region;
engine->fb.free_tile_region = nv30_fb_free_tile_region;
- engine->fifo.channels = 32;
- engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv10_fifo_channel_id;
- engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv10_fifo_load_context;
- engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -259,18 +214,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fb.init_tile_region = nv30_fb_init_tile_region;
engine->fb.set_tile_region = nv40_fb_set_tile_region;
engine->fb.free_tile_region = nv30_fb_free_tile_region;
- engine->fifo.channels = 32;
- engine->fifo.init = nv40_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv10_fifo_channel_id;
- engine->fifo.create_context = nv40_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv40_fifo_load_context;
- engine->fifo.unload_context = nv40_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -317,18 +260,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv50_fb_init;
engine->fb.takedown = nv50_fb_takedown;
- engine->fifo.channels = 128;
- engine->fifo.init = nv50_fifo_init;
- engine->fifo.takedown = nv50_fifo_takedown;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.channel_id = nv50_fifo_channel_id;
- engine->fifo.create_context = nv50_fifo_create_context;
- engine->fifo.destroy_context = nv50_fifo_destroy_context;
- engine->fifo.load_context = nv50_fifo_load_context;
- engine->fifo.unload_context = nv50_fifo_unload_context;
- engine->fifo.tlb_flush = nv50_fifo_tlb_flush;
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
@@ -392,17 +323,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nvc0_fb_init;
engine->fb.takedown = nvc0_fb_takedown;
- engine->fifo.channels = 128;
- engine->fifo.init = nvc0_fifo_init;
- engine->fifo.takedown = nvc0_fifo_takedown;
- engine->fifo.disable = nvc0_fifo_disable;
- engine->fifo.enable = nvc0_fifo_enable;
- engine->fifo.reassign = nvc0_fifo_reassign;
- engine->fifo.channel_id = nvc0_fifo_channel_id;
- engine->fifo.create_context = nvc0_fifo_create_context;
- engine->fifo.destroy_context = nvc0_fifo_destroy_context;
- engine->fifo.load_context = nvc0_fifo_load_context;
- engine->fifo.unload_context = nvc0_fifo_unload_context;
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
@@ -445,17 +365,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nvc0_fb_init;
engine->fb.takedown = nvc0_fb_takedown;
- engine->fifo.channels = 128;
- engine->fifo.init = nvc0_fifo_init;
- engine->fifo.takedown = nvc0_fifo_takedown;
- engine->fifo.disable = nvc0_fifo_disable;
- engine->fifo.enable = nvc0_fifo_enable;
- engine->fifo.reassign = nvc0_fifo_reassign;
- engine->fifo.channel_id = nvc0_fifo_channel_id;
- engine->fifo.create_context = nvc0_fifo_create_context;
- engine->fifo.destroy_context = nvc0_fifo_destroy_context;
- engine->fifo.load_context = nvc0_fifo_load_context;
- engine->fifo.unload_context = nvc0_fifo_unload_context;
engine->display.early_init = nouveau_stub_init;
engine->display.late_takedown = nouveau_stub_takedown;
engine->display.create = nvd0_display_create;
@@ -496,13 +405,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nvc0_fb_init;
engine->fb.takedown = nvc0_fb_takedown;
- engine->fifo.channels = 0;
- engine->fifo.init = nouveau_stub_init;
- engine->fifo.takedown = nouveau_stub_takedown;
- engine->fifo.disable = nvc0_fifo_disable;
- engine->fifo.enable = nvc0_fifo_enable;
- engine->fifo.reassign = nvc0_fifo_reassign;
- engine->fifo.unload_context = nouveau_stub_init;
engine->display.early_init = nouveau_stub_init;
engine->display.late_takedown = nouveau_stub_takedown;
engine->display.create = nvd0_display_create;
@@ -607,61 +509,24 @@ nouveau_card_channel_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
- int ret, oclass;
+ int ret;
ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
dev_priv->channel = chan;
if (ret)
return ret;
-
mutex_unlock(&dev_priv->channel->mutex);
- if (dev_priv->card_type <= NV_50) {
- if (dev_priv->card_type < NV_50)
- oclass = 0x0039;
- else
- oclass = 0x5039;
-
- ret = nouveau_gpuobj_gr_new(chan, NvM2MF, oclass);
- if (ret)
- goto error;
-
- ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
- &chan->m2mf_ntfy);
- if (ret)
- goto error;
-
- ret = RING_SPACE(chan, 6);
- if (ret)
- goto error;
-
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
- OUT_RING (chan, NvM2MF);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
- OUT_RING (chan, NvNotify0);
- OUT_RING (chan, chan->vram_handle);
- OUT_RING (chan, chan->gart_handle);
- } else
- if (dev_priv->card_type <= NV_D0) {
- ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
- if (ret)
- goto error;
-
- ret = RING_SPACE(chan, 2);
- if (ret)
- goto error;
-
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
- OUT_RING (chan, 0x00009039);
- }
-
- FIRE_RING (chan);
-error:
- if (ret)
- nouveau_card_channel_fini(dev);
- return ret;
+ nouveau_bo_move_init(chan);
+ return 0;
}
+static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
+ .set_gpu_state = nouveau_switcheroo_set_state,
+ .reprobe = nouveau_switcheroo_reprobe,
+ .can_switch = nouveau_switcheroo_can_switch,
+};
+
int
nouveau_card_init(struct drm_device *dev)
{
@@ -670,9 +535,7 @@ nouveau_card_init(struct drm_device *dev)
int ret, e = 0;
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
- vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
- nouveau_switcheroo_reprobe,
- nouveau_switcheroo_can_switch);
+ vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
/* Initialise internal driver API hooks */
ret = nouveau_init_engine_ptrs(dev);
@@ -745,6 +608,81 @@ nouveau_card_init(struct drm_device *dev)
if (!dev_priv->noaccel) {
switch (dev_priv->card_type) {
case NV_04:
+ nv04_fifo_create(dev);
+ break;
+ case NV_10:
+ case NV_20:
+ case NV_30:
+ if (dev_priv->chipset < 0x17)
+ nv10_fifo_create(dev);
+ else
+ nv17_fifo_create(dev);
+ break;
+ case NV_40:
+ nv40_fifo_create(dev);
+ break;
+ case NV_50:
+ if (dev_priv->chipset == 0x50)
+ nv50_fifo_create(dev);
+ else
+ nv84_fifo_create(dev);
+ break;
+ case NV_C0:
+ case NV_D0:
+ nvc0_fifo_create(dev);
+ break;
+ case NV_E0:
+ nve0_fifo_create(dev);
+ break;
+ default:
+ break;
+ }
+
+ switch (dev_priv->card_type) {
+ case NV_04:
+ nv04_fence_create(dev);
+ break;
+ case NV_10:
+ case NV_20:
+ case NV_30:
+ case NV_40:
+ case NV_50:
+ if (dev_priv->chipset < 0x84)
+ nv10_fence_create(dev);
+ else
+ nv84_fence_create(dev);
+ break;
+ case NV_C0:
+ case NV_D0:
+ case NV_E0:
+ nvc0_fence_create(dev);
+ break;
+ default:
+ break;
+ }
+
+ switch (dev_priv->card_type) {
+ case NV_04:
+ case NV_10:
+ case NV_20:
+ case NV_30:
+ case NV_40:
+ nv04_software_create(dev);
+ break;
+ case NV_50:
+ nv50_software_create(dev);
+ break;
+ case NV_C0:
+ case NV_D0:
+ case NV_E0:
+ nvc0_software_create(dev);
+ break;
+ default:
+ break;
+ }
+
+ switch (dev_priv->card_type) {
+ case NV_04:
nv04_graph_create(dev);
break;
case NV_10:
@@ -764,6 +702,9 @@ nouveau_card_init(struct drm_device *dev)
case NV_D0:
nvc0_graph_create(dev);
break;
+ case NV_E0:
+ nve0_graph_create(dev);
+ break;
default:
break;
}
@@ -796,8 +737,9 @@ nouveau_card_init(struct drm_device *dev)
}
break;
case NV_C0:
- nvc0_copy_create(dev, 0);
nvc0_copy_create(dev, 1);
+ case NV_D0:
+ nvc0_copy_create(dev, 0);
break;
default:
break;
@@ -830,16 +772,11 @@ nouveau_card_init(struct drm_device *dev)
goto out_engine;
}
}
-
- /* PFIFO */
- ret = engine->fifo.init(dev);
- if (ret)
- goto out_engine;
}
ret = nouveau_irq_init(dev);
if (ret)
- goto out_fifo;
+ goto out_engine;
ret = nouveau_display_create(dev);
if (ret)
@@ -848,14 +785,10 @@ nouveau_card_init(struct drm_device *dev)
nouveau_backlight_init(dev);
nouveau_pm_init(dev);
- ret = nouveau_fence_init(dev);
- if (ret)
- goto out_pm;
-
if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
ret = nouveau_card_channel_init(dev);
if (ret)
- goto out_fence;
+ goto out_pm;
}
if (dev->mode_config.num_crtc) {
@@ -870,17 +803,12 @@ nouveau_card_init(struct drm_device *dev)
out_chan:
nouveau_card_channel_fini(dev);
-out_fence:
- nouveau_fence_fini(dev);
out_pm:
nouveau_pm_fini(dev);
nouveau_backlight_exit(dev);
nouveau_display_destroy(dev);
out_irq:
nouveau_irq_fini(dev);
-out_fifo:
- if (!dev_priv->noaccel)
- engine->fifo.takedown(dev);
out_engine:
if (!dev_priv->noaccel) {
for (e = e - 1; e >= 0; e--) {
@@ -912,6 +840,7 @@ out_bios:
out_display_early:
engine->display.late_takedown(dev);
out:
+ vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
return ret;
}
@@ -928,13 +857,11 @@ static void nouveau_card_takedown(struct drm_device *dev)
}
nouveau_card_channel_fini(dev);
- nouveau_fence_fini(dev);
nouveau_pm_fini(dev);
nouveau_backlight_exit(dev);
nouveau_display_destroy(dev);
if (!dev_priv->noaccel) {
- engine->fifo.takedown(dev);
for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
if (dev_priv->eng[e]) {
dev_priv->eng[e]->fini(dev, e, false);
@@ -969,6 +896,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
nouveau_irq_fini(dev);
+ vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
@@ -1176,7 +1104,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
goto err_priv;
}
- NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
+ NV_INFO(dev, "Detected an NV%02x generation card (0x%08x)\n",
dev_priv->card_type, reg0);
/* map the mmio regs, limiting the amount to preserve vmap space */
@@ -1219,6 +1147,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
if (nouveau_noaccel == -1) {
switch (dev_priv->chipset) {
case 0xd9: /* known broken */
+ case 0xe4: /* needs binary driver firmware */
+ case 0xe7: /* needs binary driver firmware */
NV_INFO(dev, "acceleration disabled by default, pass "
"noaccel=0 to force enable\n");
dev_priv->noaccel = true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 2bf6c0350b4b..11edd5e91a0a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -77,6 +77,63 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
}
void
+nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
+ struct nouveau_mem *mem)
+{
+ struct nouveau_vm *vm = vma->vm;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ unsigned m, sglen;
+ u32 end, len;
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+ sglen = sg_dma_len(sg) >> PAGE_SHIFT;
+
+ end = pte + sglen;
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ for (m = 0; m < len; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+
+ vm->map_sg(vma, pgt, mem, pte, 1, &addr);
+ num--;
+ pte++;
+
+ if (num == 0)
+ goto finish;
+ }
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ if (m < sglen) {
+ for (; m < sglen; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+
+ vm->map_sg(vma, pgt, mem, pte, 1, &addr);
+ num--;
+ pte++;
+ if (num == 0)
+ goto finish;
+ }
+ }
+
+ }
+finish:
+ vm->flush(vm);
+}
+
+void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 4fb6e728734d..a8246e7e4a89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -72,6 +72,9 @@ struct nouveau_vm {
u64 phys, u64 delta);
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
+
+ void (*map_sg_table)(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
void (*flush)(struct nouveau_vm *);
};
@@ -90,7 +93,8 @@ void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
struct nouveau_mem *);
-
+void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
+ struct nouveau_mem *mem);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 728d07584d39..4c31c63e5528 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -1047,7 +1047,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 7047d37e8dab..44488e3a257d 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -98,6 +98,13 @@ nv04_display_early_init(struct drm_device *dev)
NVSetOwner(dev, 0);
}
+ /* ensure vblank interrupts are off, they can't be enabled until
+ * drm_vblank has been initialised
+ */
+ NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
+ if (nv_two_heads(dev))
+ NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
+
return 0;
}
@@ -246,6 +253,10 @@ nv04_display_init(struct drm_device *dev)
void
nv04_display_fini(struct drm_device *dev)
{
+ /* disable vblank interrupts */
+ NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
+ if (nv_two_heads(dev))
+ NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 7a1189371096..7cd7857347ef 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -41,7 +41,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
if (ret)
return ret;
- BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0300, 3);
OUT_RING(chan, (region->sy << 16) | region->sx);
OUT_RING(chan, (region->dy << 16) | region->dx);
OUT_RING(chan, (region->height << 16) | region->width);
@@ -62,15 +62,15 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
if (ret)
return ret;
- BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
- BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x03fc, 1);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR)
OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
else
OUT_RING(chan, rect->color);
- BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0400, 2);
OUT_RING(chan, (rect->dx << 16) | rect->dy);
OUT_RING(chan, (rect->width << 16) | rect->height);
FIRE_RING(chan);
@@ -110,7 +110,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
bg = image->bg_color;
}
- BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0be4, 7);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
OUT_RING(chan, ((image->dy + image->height) << 16) |
((image->dx + image->width) & 0xffff));
@@ -127,7 +127,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0c00, iter_len);
OUT_RINGp(chan, data, iter_len);
data += iter_len;
dsize -= iter_len;
@@ -209,25 +209,25 @@ nv04_fbcon_accel_init(struct fb_info *info)
return 0;
}
- BEGIN_RING(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, sub, 0x0000, 1);
OUT_RING(chan, NvCtxSurf2D);
- BEGIN_RING(chan, sub, 0x0184, 2);
+ BEGIN_NV04(chan, sub, 0x0184, 2);
OUT_RING(chan, NvDmaFB);
OUT_RING(chan, NvDmaFB);
- BEGIN_RING(chan, sub, 0x0300, 4);
+ BEGIN_NV04(chan, sub, 0x0300, 4);
OUT_RING(chan, surface_fmt);
OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
- BEGIN_RING(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, sub, 0x0000, 1);
OUT_RING(chan, NvRop);
- BEGIN_RING(chan, sub, 0x0300, 1);
+ BEGIN_NV04(chan, sub, 0x0300, 1);
OUT_RING(chan, 0x55);
- BEGIN_RING(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, sub, 0x0000, 1);
OUT_RING(chan, NvImagePatt);
- BEGIN_RING(chan, sub, 0x0300, 8);
+ BEGIN_NV04(chan, sub, 0x0300, 8);
OUT_RING(chan, pattern_fmt);
#ifdef __BIG_ENDIAN
OUT_RING(chan, 2);
@@ -241,31 +241,31 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, ~0);
OUT_RING(chan, ~0);
- BEGIN_RING(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, sub, 0x0000, 1);
OUT_RING(chan, NvClipRect);
- BEGIN_RING(chan, sub, 0x0300, 2);
+ BEGIN_NV04(chan, sub, 0x0300, 2);
OUT_RING(chan, 0);
OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
- BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1);
OUT_RING(chan, NvImageBlit);
- BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1);
OUT_RING(chan, NvCtxSurf2D);
- BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
OUT_RING(chan, 3);
- BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
OUT_RING(chan, NvGdiRect);
- BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1);
OUT_RING(chan, NvCtxSurf2D);
- BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2);
OUT_RING(chan, NvImagePatt);
OUT_RING(chan, NvRop);
- BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1);
OUT_RING(chan, rect_fmt);
- BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
OUT_RING(chan, 3);
FIRE_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
new file mode 100644
index 000000000000..abe89db6de24
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+
+struct nv04_fence_chan {
+ struct nouveau_fence_chan base;
+ atomic_t sequence;
+};
+
+struct nv04_fence_priv {
+ struct nouveau_fence_priv base;
+};
+
+static int
+nv04_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ int ret = RING_SPACE(chan, 2);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
+ OUT_RING (chan, fence->sequence);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
+static int
+nv04_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ return -ENODEV;
+}
+
+int
+nv04_fence_mthd(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ atomic_set(&fctx->sequence, data);
+ return 0;
+}
+
+static u32
+nv04_fence_read(struct nouveau_channel *chan)
+{
+ struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ return atomic_read(&fctx->sequence);
+}
+
+static void
+nv04_fence_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv04_fence_chan *fctx = chan->engctx[engine];
+ nouveau_fence_context_del(&fctx->base);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv04_fence_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (fctx) {
+ nouveau_fence_context_new(&fctx->base);
+ atomic_set(&fctx->sequence, 0);
+ chan->engctx[engine] = fctx;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+static int
+nv04_fence_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static int
+nv04_fence_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static void
+nv04_fence_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fence_priv *priv = nv_engine(dev, engine);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv04_fence_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fence_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.engine.destroy = nv04_fence_destroy;
+ priv->base.engine.init = nv04_fence_init;
+ priv->base.engine.fini = nv04_fence_fini;
+ priv->base.engine.context_new = nv04_fence_context_new;
+ priv->base.engine.context_del = nv04_fence_context_del;
+ priv->base.emit = nv04_fence_emit;
+ priv->base.sync = nv04_fence_sync;
+ priv->base.read = nv04_fence_read;
+ dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index db465a3ee1b2..a6295cd00ec7 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
+ * Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
@@ -27,49 +27,38 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
+#include "nouveau_fifo.h"
#include "nouveau_util.h"
-
-#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
-#define NV04_RAMFC__SIZE 32
-#define NV04_RAMFC_DMA_PUT 0x00
-#define NV04_RAMFC_DMA_GET 0x04
-#define NV04_RAMFC_DMA_INSTANCE 0x08
-#define NV04_RAMFC_DMA_STATE 0x0C
-#define NV04_RAMFC_DMA_FETCH 0x10
-#define NV04_RAMFC_ENGINE 0x14
-#define NV04_RAMFC_PULL1_ENGINE 0x18
-
-#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val))
-#define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset)
-
-void
-nv04_fifo_disable(struct drm_device *dev)
-{
- uint32_t tmp;
-
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
- tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1);
-}
-
-void
-nv04_fifo_enable(struct drm_device *dev)
-{
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-}
-
-bool
-nv04_fifo_reassign(struct drm_device *dev, bool enable)
-{
- uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
-
- nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
- return (reassign == 1);
-}
+#include "nouveau_ramht.h"
+#include "nouveau_software.h"
+
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv04_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+};
+
+struct nv04_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv04_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
bool
nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
@@ -86,13 +75,13 @@ nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
* invalidate the most recently calculated instance.
*/
if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
- NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
+ NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
- NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
+ NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_CACHE_ERROR);
+ NV_PFIFO_INTR_CACHE_ERROR);
nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
}
@@ -100,242 +89,182 @@ nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
return pull & 1;
}
-int
-nv04_fifo_channel_id(struct drm_device *dev)
-{
- return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
- NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
-}
-
-#ifdef __BIG_ENDIAN
-#define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN
-#else
-#define DMA_FETCH_ENDIANNESS 0
-#endif
-
-int
-nv04_fifo_create_context(struct nouveau_channel *chan)
+static int
+nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv04_fifo_chan *fctx;
unsigned long flags;
int ret;
- ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
- NV04_RAMFC__SIZE,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE,
- &chan->ramfc);
- if (ret)
- return ret;
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ /* map channel control registers */
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user)
- return -ENOMEM;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- /* Setup initial state */
- RAMFC_WR(DMA_PUT, chan->pushbuf_base);
- RAMFC_WR(DMA_GET, chan->pushbuf_base);
- RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4);
- RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
- DMA_FETCH_ENDIANNESS));
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
- /* enable the fifo dma operation */
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 32, ~0, 32,
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x08, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x0c, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nv_wo32(fctx->ramfc, 0x14, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x18, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- return 0;
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
}
void
-nv04_fifo_destroy_context(struct nouveau_channel *chan)
+nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
+ struct nv04_fifo_chan *fctx = chan->engctx[engine];
+ struct ramfc_desc *c = priv->ramfc_desc;
unsigned long flags;
+ int chid;
+ /* prevent fifo context switches */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- pfifo->reassign(dev, false);
-
- /* Unload the context if it's the currently active one */
- if (pfifo->channel_id(dev) == chan->id) {
- pfifo->disable(dev);
- pfifo->unload_context(dev);
- pfifo->enable(dev);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+ /* if this channel is active, replace it with a null context */
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
+ if (chid == chan->id) {
+ nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
+ nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+
+ do {
+ u32 mask = ((1ULL << c->bits) - 1) << c->regs;
+ nv_mask(dev, c->regp, mask, 0x00000000);
+ } while ((++c)->bits);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
}
- /* Keep it from being rescheduled */
+ /* restore normal operation, after disabling dma mode */
nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
-
- pfifo->reassign(dev, true);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- /* Free the channel resources */
+ /* clean up */
+ nouveau_gpuobj_ref(NULL, &fctx->ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
if (chan->user) {
iounmap(chan->user);
chan->user = NULL;
}
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
-static void
-nv04_fifo_do_load_context(struct drm_device *dev, int chid)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fc = NV04_RAMFC(chid), tmp;
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
- tmp = nv_ri32(dev, fc + 8);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
- nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
-}
-
-int
-nv04_fifo_load_context(struct nouveau_channel *chan)
-{
- uint32_t tmp;
-
- nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
- NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
- nv04_fifo_do_load_context(chan->dev, chan->id);
- nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
-
- /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
- tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
- nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
-
- return 0;
}
int
-nv04_fifo_unload_context(struct drm_device *dev)
+nv04_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nouveau_channel *chan = NULL;
- uint32_t tmp;
- int chid;
-
- chid = pfifo->channel_id(dev);
- if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
- return 0;
-
- chan = dev_priv->channels.ptr[chid];
- if (!chan) {
- NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
- return -EINVAL;
- }
-
- RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
- RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
- tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
- RAMFC_WR(DMA_INSTANCE, tmp);
- RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
- RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
- RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
- RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
-
- nv04_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
- return 0;
-}
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
-static void
-nv04_fifo_init_reset(struct drm_device *dev)
-{
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
-
- nv_wr32(dev, 0x003224, 0x000f0078);
- nv_wr32(dev, 0x002044, 0x0101ffff);
- nv_wr32(dev, 0x002040, 0x000000ff);
- nv_wr32(dev, 0x002500, 0x00000000);
- nv_wr32(dev, 0x003000, 0x00000000);
- nv_wr32(dev, 0x003050, 0x00000000);
- nv_wr32(dev, 0x003200, 0x00000000);
- nv_wr32(dev, 0x003250, 0x00000000);
- nv_wr32(dev, 0x003220, 0x00000000);
-
- nv_wr32(dev, 0x003250, 0x00000000);
- nv_wr32(dev, 0x003270, 0x00000000);
- nv_wr32(dev, 0x003210, 0x00000000);
-}
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
-static void
-nv04_fifo_init_ramxx(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
(dev_priv->ramht->gpuobj->pinst >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
-}
-static void
-nv04_fifo_init_intr(struct drm_device *dev)
-{
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (dev_priv->channels.ptr[i])
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
+ }
+
+ return 0;
}
int
-nv04_fifo_init(struct drm_device *dev)
+nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- int i;
-
- nv04_fifo_init_reset(dev);
- nv04_fifo_init_ramxx(dev);
-
- nv04_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+ struct nouveau_channel *chan;
+ int chid;
- nv04_fifo_init_intr(dev);
- pfifo->enable(dev);
- pfifo->reassign(dev, true);
+ /* prevent context switches and halt fifo operation */
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->channels.ptr[i]) {
- uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
- nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
- }
+ /* store current fifo context in ramfc */
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
+ chan = dev_priv->channels.ptr[chid];
+ if (suspend && chid != priv->base.channels && chan) {
+ struct nv04_fifo_chan *fctx = chan->engctx[engine];
+ struct nouveau_gpuobj *ctx = fctx->ramfc;
+ struct ramfc_desc *c = priv->ramfc_desc;
+ do {
+ u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+ u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+ u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
+ u32 cv = (nv_ro32(ctx, c->ctxp) & ~cm);
+ nv_wo32(ctx, c->ctxp, cv | (rv << c->ctxs));
+ } while ((++c)->bits);
}
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
return 0;
}
-void
-nv04_fifo_fini(struct drm_device *dev)
-{
- nv_wr32(dev, 0x2140, 0x00000000);
- nouveau_irq_unregister(dev, 8);
-}
-
static bool
nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
struct nouveau_gpuobj *obj;
@@ -346,7 +275,7 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
u32 engine;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
+ if (likely(chid >= 0 && chid < pfifo->channels))
chan = dev_priv->channels.ptr[chid];
if (unlikely(!chan))
goto out;
@@ -357,7 +286,6 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
break;
- chan->sw_subchannel[subc] = obj->class;
engine = 0x0000000f << (subc * 4);
nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
@@ -368,7 +296,7 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
break;
- if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
+ if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
mthd, data))
handled = true;
break;
@@ -391,8 +319,8 @@ static const char *nv_dma_state_err(u32 state)
void
nv04_fifo_isr(struct drm_device *dev)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
uint32_t status, reassign;
int cnt = 0;
@@ -402,7 +330,7 @@ nv04_fifo_isr(struct drm_device *dev)
nv_wr32(dev, NV03_PFIFO_CACHES, 0);
- chid = engine->fifo.channel_id(dev);
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
@@ -541,3 +469,38 @@ nv04_fifo_isr(struct drm_device *dev)
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
}
+
+void
+nv04_fifo_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, 8);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv04_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv04_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv04_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 15;
+ priv->ramfc_desc = nv04_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index dbdea8ed3925..72f1a62903b3 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -356,12 +356,12 @@ static struct nouveau_channel *
nv04_graph_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chid = dev_priv->engine.fifo.channels;
+ int chid = 15;
if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
- if (chid >= dev_priv->engine.fifo.channels)
+ if (chid > 15)
return NULL;
return dev_priv->channels.ptr[chid];
@@ -404,7 +404,6 @@ nv04_graph_load_context(struct nouveau_channel *chan)
static int
nv04_graph_unload_context(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
struct graph_state *ctx;
uint32_t tmp;
@@ -420,7 +419,7 @@ nv04_graph_unload_context(struct drm_device *dev)
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+ tmp |= 15 << 24;
nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
return 0;
}
@@ -495,7 +494,6 @@ nv04_graph_object_new(struct nouveau_channel *chan, int engine,
static int
nv04_graph_init(struct drm_device *dev, int engine)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -527,7 +525,7 @@ nv04_graph_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+ tmp |= 15 << 24;
nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
/* These don't belong here, they're part of a per-channel context */
@@ -550,28 +548,6 @@ nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
return 0;
}
-static int
-nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- atomic_set(&chan->fence.last_sequence_irq, data);
- return 0;
-}
-
-int
-nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- struct drm_device *dev = chan->dev;
- struct nouveau_page_flip_state s;
-
- if (!nouveau_finish_page_flip(chan, &s))
- nv_set_crtc_base(dev, s.crtc,
- s.offset + s.y * s.pitch + s.x * s.bpp / 8);
-
- return 0;
-}
-
/*
* Software methods, why they are needed, and how they all work:
*
@@ -1020,7 +996,8 @@ nv04_graph_context_switch(struct drm_device *dev)
nv04_graph_unload_context(dev);
/* Load context for next channel */
- chid = dev_priv->engine.fifo.channel_id(dev);
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
+ NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
chan = dev_priv->channels.ptr[chid];
if (chan)
nv04_graph_load_context(chan);
@@ -1345,9 +1322,5 @@ nv04_graph_create(struct drm_device *dev)
NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
- /* nvsw */
- NVOBJ_CLASS(dev, 0x506e, SW);
- NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index c1248e0740a3..ef7a934a499a 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -1,6 +1,8 @@
#include "drmP.h"
#include "drm.h"
+
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
/* returns the size of fifo context */
@@ -10,12 +12,15 @@ nouveau_fifo_ctx_size(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (dev_priv->chipset >= 0x40)
- return 128;
+ return 128 * 32;
else
if (dev_priv->chipset >= 0x17)
- return 64;
+ return 64 * 32;
+ else
+ if (dev_priv->chipset >= 0x10)
+ return 32 * 32;
- return 32;
+ return 32 * 16;
}
int nv04_instmem_init(struct drm_device *dev)
@@ -39,14 +44,10 @@ int nv04_instmem_init(struct drm_device *dev)
else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
else rsvd = 0x4a40 * vs;
rsvd += 16 * 1024;
- rsvd *= dev_priv->engine.fifo.channels;
-
- /* pciegart table */
- if (pci_is_pcie(dev->pdev))
- rsvd += 512 * 1024;
+ rsvd *= 32; /* per-channel */
- /* object storage */
- rsvd += 512 * 1024;
+ rsvd += 512 * 1024; /* pci(e)gart table */
+ rsvd += 512 * 1024; /* object storage */
dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
} else {
@@ -71,7 +72,7 @@ int nv04_instmem_init(struct drm_device *dev)
return ret;
/* And RAMFC */
- length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev);
+ length = nouveau_fifo_ctx_size(dev);
switch (dev_priv->card_type) {
case NV_40:
offset = 0x20000;
diff --git a/drivers/gpu/drm/nouveau/nv04_software.c b/drivers/gpu/drm/nouveau/nv04_software.c
new file mode 100644
index 000000000000..0c41abf48774
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_software.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+#include "nouveau_software.h"
+#include "nouveau_hw.h"
+
+struct nv04_software_priv {
+ struct nouveau_software_priv base;
+};
+
+struct nv04_software_chan {
+ struct nouveau_software_chan base;
+};
+
+static int
+mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+
+ struct nouveau_page_flip_state state;
+
+ if (!nouveau_finish_page_flip(chan, &state)) {
+ nv_set_crtc_base(chan->dev, state.crtc, state.offset +
+ state.y * state.pitch +
+ state.x * state.bpp / 8);
+ }
+
+ return 0;
+}
+
+static int
+nv04_software_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv04_software_chan *pch;
+
+ pch = kzalloc(sizeof(*pch), GFP_KERNEL);
+ if (!pch)
+ return -ENOMEM;
+
+ nouveau_software_context_new(&pch->base);
+ chan->engctx[engine] = pch;
+ return 0;
+}
+
+static void
+nv04_software_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv04_software_chan *pch = chan->engctx[engine];
+ chan->engctx[engine] = NULL;
+ kfree(pch);
+}
+
+static int
+nv04_software_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 0;
+ obj->class = class;
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
+static int
+nv04_software_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static int
+nv04_software_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nv04_software_destroy(struct drm_device *dev, int engine)
+{
+ struct nv04_software_priv *psw = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, SW);
+ kfree(psw);
+}
+
+int
+nv04_software_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_software_priv *psw;
+
+ psw = kzalloc(sizeof(*psw), GFP_KERNEL);
+ if (!psw)
+ return -ENOMEM;
+
+ psw->base.base.destroy = nv04_software_destroy;
+ psw->base.base.init = nv04_software_init;
+ psw->base.base.fini = nv04_software_fini;
+ psw->base.base.context_new = nv04_software_context_new;
+ psw->base.base.context_del = nv04_software_context_del;
+ psw->base.base.object_new = nv04_software_object_new;
+ nouveau_software_create(&psw->base);
+
+ NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
+ if (dev_priv->card_type <= NV_04) {
+ NVOBJ_CLASS(dev, 0x006e, SW);
+ NVOBJ_MTHD (dev, 0x006e, 0x0150, nv04_fence_mthd);
+ NVOBJ_MTHD (dev, 0x006e, 0x0500, mthd_flip);
+ } else {
+ NVOBJ_CLASS(dev, 0x016e, SW);
+ NVOBJ_MTHD (dev, 0x016e, 0x0500, mthd_flip);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
new file mode 100644
index 000000000000..8a1b75009185
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+
+struct nv10_fence_chan {
+ struct nouveau_fence_chan base;
+};
+
+struct nv10_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_bo *bo;
+ spinlock_t lock;
+ u32 sequence;
+};
+
+static int
+nv10_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ int ret = RING_SPACE(chan, 2);
+ if (ret == 0) {
+ BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
+ OUT_RING (chan, fence->sequence);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
+
+static int
+nv10_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ return -ENODEV;
+}
+
+static int
+nv17_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ struct nv10_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
+ u32 value;
+ int ret;
+
+ if (!mutex_trylock(&prev->mutex))
+ return -EBUSY;
+
+ spin_lock(&priv->lock);
+ value = priv->sequence;
+ priv->sequence += 2;
+ spin_unlock(&priv->lock);
+
+ ret = RING_SPACE(prev, 5);
+ if (!ret) {
+ BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+ OUT_RING (prev, NvSema);
+ OUT_RING (prev, 0);
+ OUT_RING (prev, value + 0);
+ OUT_RING (prev, value + 1);
+ FIRE_RING (prev);
+ }
+
+ if (!ret && !(ret = RING_SPACE(chan, 5))) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+ OUT_RING (chan, NvSema);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, value + 1);
+ OUT_RING (chan, value + 2);
+ FIRE_RING (chan);
+ }
+
+ mutex_unlock(&prev->mutex);
+ return 0;
+}
+
+static u32
+nv10_fence_read(struct nouveau_channel *chan)
+{
+ return nvchan_rd32(chan, 0x0048);
+}
+
+static void
+nv10_fence_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv10_fence_chan *fctx = chan->engctx[engine];
+ nouveau_fence_context_del(&fctx->base);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv10_fence_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv10_fence_priv *priv = nv_engine(chan->dev, engine);
+ struct nv10_fence_chan *fctx;
+ struct nouveau_gpuobj *obj;
+ int ret = 0;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ nouveau_fence_context_new(&fctx->base);
+
+ if (priv->bo) {
+ struct ttm_mem_reg *mem = &priv->bo->bo.mem;
+
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
+ mem->start * PAGE_SIZE, mem->size,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &obj);
+ if (!ret) {
+ ret = nouveau_ramht_insert(chan, NvSema, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ }
+ }
+
+ if (ret)
+ nv10_fence_context_del(chan, engine);
+ return ret;
+}
+
+static int
+nv10_fence_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static int
+nv10_fence_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static void
+nv10_fence_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fence_priv *priv = nv_engine(dev, engine);
+
+ nouveau_bo_ref(NULL, &priv->bo);
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv10_fence_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fence_priv *priv;
+ int ret = 0;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.engine.destroy = nv10_fence_destroy;
+ priv->base.engine.init = nv10_fence_init;
+ priv->base.engine.fini = nv10_fence_fini;
+ priv->base.engine.context_new = nv10_fence_context_new;
+ priv->base.engine.context_del = nv10_fence_context_del;
+ priv->base.emit = nv10_fence_emit;
+ priv->base.read = nv10_fence_read;
+ priv->base.sync = nv10_fence_sync;
+ dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
+ spin_lock_init(&priv->lock);
+
+ if (dev_priv->chipset >= 0x17) {
+ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &priv->bo);
+ if (!ret) {
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo);
+ }
+
+ if (ret == 0) {
+ nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
+ priv->base.sync = nv17_fence_sync;
+ }
+ }
+
+ if (ret)
+ nv10_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index d2ecbff4bee1..f1fe7d758241 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
+ * Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
@@ -27,220 +27,112 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
+#include "nouveau_util.h"
#include "nouveau_ramht.h"
-#define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE))
-#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
-
-int
-nv10_fifo_channel_id(struct drm_device *dev)
-{
- return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
- NV10_PFIFO_CACHE1_PUSH1_CHID_MASK;
-}
-
-int
-nv10_fifo_create_context(struct nouveau_channel *chan)
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv10_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+};
+
+struct nv10_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv10_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static int
+nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- uint32_t fc = NV10_RAMFC(chan->id);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv10_fifo_chan *fctx;
+ unsigned long flags;
int ret;
- ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
- NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
- if (ret)
- return ret;
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ /* map channel control registers */
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user)
- return -ENOMEM;
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
- /* Fill entries that are seen filled in dumps of nvidia driver just
- * after channel's is put into DMA mode
- */
- nv_wi32(dev, fc + 0, chan->pushbuf_base);
- nv_wi32(dev, fc + 4, chan->pushbuf_base);
- nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
- nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 32, ~0, 32,
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x08, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x10, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
- 0);
-
- /* enable the fifo dma operation */
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
- return 0;
-}
-
-static void
-nv10_fifo_do_load_context(struct drm_device *dev, int chid)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fc = NV10_RAMFC(chid), tmp;
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
- nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
-
- tmp = nv_ri32(dev, fc + 12);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20));
- nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24));
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28));
-
- if (dev_priv->chipset < 0x17)
- goto out;
-
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32));
- tmp = nv_ri32(dev, fc + 36);
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40));
- nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44));
- nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
-
-out:
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
-}
-
-int
-nv10_fifo_load_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- uint32_t tmp;
-
- nv10_fifo_do_load_context(dev, chan->id);
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nv_wo32(fctx->ramfc, 0x18, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
- NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
-
- return 0;
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
}
int
-nv10_fifo_unload_context(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- uint32_t fc, tmp;
- int chid;
-
- chid = pfifo->channel_id(dev);
- if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
- return 0;
- fc = NV10_RAMFC(chid);
-
- nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
- nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
- nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
- tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
- nv_wi32(dev, fc + 12, tmp);
- nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
- nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
- nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
- nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
-
- if (dev_priv->chipset < 0x17)
- goto out;
-
- nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
- tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
- nv_wi32(dev, fc + 36, tmp);
- nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
- nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
- nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
-
-out:
- nv10_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
- return 0;
-}
-
-static void
-nv10_fifo_init_reset(struct drm_device *dev)
-{
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
-
- nv_wr32(dev, 0x003224, 0x000f0078);
- nv_wr32(dev, 0x002044, 0x0101ffff);
- nv_wr32(dev, 0x002040, 0x000000ff);
- nv_wr32(dev, 0x002500, 0x00000000);
- nv_wr32(dev, 0x003000, 0x00000000);
- nv_wr32(dev, 0x003050, 0x00000000);
-
- nv_wr32(dev, 0x003258, 0x00000000);
- nv_wr32(dev, 0x003210, 0x00000000);
- nv_wr32(dev, 0x003270, 0x00000000);
-}
-
-static void
-nv10_fifo_init_ramxx(struct drm_device *dev)
+nv10_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fifo_priv *priv;
- nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht->bits - 9) << 16) |
- (dev_priv->ramht->gpuobj->pinst >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- if (dev_priv->chipset < 0x17) {
- nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
- } else {
- nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc->pinst >> 8) |
- (1 << 16) /* 64 Bytes entry*/);
- /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
- }
-}
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv04_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv10_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 31;
+ priv->ramfc_desc = nv10_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-static void
-nv10_fifo_init_intr(struct drm_device *dev)
-{
nouveau_irq_register(dev, 8, nv04_fifo_isr);
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xffffffff);
-}
-
-int
-nv10_fifo_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- int i;
-
- nv10_fifo_init_reset(dev);
- nv10_fifo_init_ramxx(dev);
-
- nv10_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
-
- nv10_fifo_init_intr(dev);
- pfifo->enable(dev);
- pfifo->reassign(dev, true);
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->channels.ptr[i]) {
- uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
- nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
- }
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 7255e4a4d3f3..fb1d88a951de 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -759,7 +759,6 @@ static int
nv10_graph_unload_context(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
struct graph_state *ctx;
uint32_t tmp;
@@ -782,7 +781,7 @@ nv10_graph_unload_context(struct drm_device *dev)
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (pfifo->channels - 1) << 24;
+ tmp |= 31 << 24;
nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
return 0;
}
@@ -822,12 +821,12 @@ struct nouveau_channel *
nv10_graph_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chid = dev_priv->engine.fifo.channels;
+ int chid = 31;
if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
- if (chid >= dev_priv->engine.fifo.channels)
+ if (chid >= 31)
return NULL;
return dev_priv->channels.ptr[chid];
@@ -948,7 +947,7 @@ nv10_graph_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+ tmp |= 31 << 24;
nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
@@ -1153,10 +1152,6 @@ nv10_graph_create(struct drm_device *dev)
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
nouveau_irq_register(dev, 12, nv10_graph_isr);
- /* nvsw */
- NVOBJ_CLASS(dev, 0x506e, SW);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
-
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
diff --git a/drivers/gpu/drm/nouveau/nv17_fifo.c b/drivers/gpu/drm/nouveau/nv17_fifo.c
new file mode 100644
index 000000000000..d9e482e4abee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_fifo.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2012 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
+#include "nouveau_util.h"
+#include "nouveau_ramht.h"
+
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv17_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ {}
+};
+
+struct nv17_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv17_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static int
+nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv17_fifo_chan *fctx;
+ unsigned long flags;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ /* map channel control registers */
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 64, ~0, 64,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static int
+nv17_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
+
+ nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+ nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht->bits - 9) << 16) |
+ (dev_priv->ramht->gpuobj->pinst >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
+ nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
+ dev_priv->ramfc->pinst >> 8);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (dev_priv->channels.ptr[i])
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
+ }
+
+ return 0;
+}
+
+int
+nv17_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_fifo_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv17_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv17_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 31;
+ priv->ramfc_desc = nv17_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 183e37512ef9..e34ea30758f6 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -43,8 +43,6 @@ struct nv20_graph_engine {
int
nv20_graph_unload_context(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
struct nouveau_gpuobj *grctx;
u32 tmp;
@@ -62,7 +60,7 @@ nv20_graph_unload_context(struct drm_device *dev)
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (pfifo->channels - 1) << 24;
+ tmp |= 31 << 24;
nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
return 0;
}
@@ -796,10 +794,6 @@ nv20_graph_create(struct drm_device *dev)
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
nouveau_irq_register(dev, 12, nv20_graph_isr);
- /* nvsw */
- NVOBJ_CLASS(dev, 0x506e, SW);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
-
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
diff --git a/drivers/gpu/drm/nouveau/nv31_mpeg.c b/drivers/gpu/drm/nouveau/nv31_mpeg.c
index 6f06a0713f00..5f239bf658c4 100644
--- a/drivers/gpu/drm/nouveau/nv31_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv31_mpeg.c
@@ -24,6 +24,7 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
struct nv31_mpeg_engine {
@@ -208,6 +209,7 @@ nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
static int
nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ctx;
unsigned long flags;
@@ -218,7 +220,7 @@ nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
return 0;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
if (!dev_priv->channels.ptr[i])
continue;
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 68cb2d991c88..cdc818479b0a 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
+ * Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
@@ -25,215 +25,123 @@
*/
#include "drmP.h"
+#include "drm.h"
#include "nouveau_drv.h"
-#include "nouveau_drm.h"
+#include "nouveau_fifo.h"
+#include "nouveau_util.h"
#include "nouveau_ramht.h"
-#define NV40_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV40_RAMFC__SIZE))
-#define NV40_RAMFC__SIZE 128
-
-int
-nv40_fifo_create_context(struct nouveau_channel *chan)
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv40_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 2, 28, 0x18, 28, 0x002058 },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
+ { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
+ { 32, 0, 0x40, 0, 0x0032e4 },
+ { 32, 0, 0x44, 0, 0x0032e8 },
+ { 32, 0, 0x4c, 0, 0x002088 },
+ { 32, 0, 0x50, 0, 0x003300 },
+ { 32, 0, 0x54, 0, 0x00330c },
+ {}
+};
+
+struct nv40_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv40_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static int
+nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fc = NV40_RAMFC(chan->id);
+ struct nv40_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv40_fifo_chan *fctx;
unsigned long flags;
int ret;
- ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
- NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
- if (ret)
- return ret;
-
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV40_USER(chan->id), PAGE_SIZE);
- if (!chan->user)
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
return -ENOMEM;
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ /* map channel control registers */
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
- nv_wi32(dev, fc + 0, chan->pushbuf_base);
- nv_wi32(dev, fc + 4, chan->pushbuf_base);
- nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
- nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 128, ~0, 128,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x18, 0x30000000 |
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
- 0x30000000 /* no idea.. */);
- nv_wi32(dev, fc + 60, 0x0001FFFF);
-
- /* enable the fifo dma operation */
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nv_wo32(fctx->ramfc, 0x3c, 0x0001ffff);
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- return 0;
-}
-
-static void
-nv40_fifo_do_load_context(struct drm_device *dev, int chid)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
- nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20));
-
- /* No idea what 0x2058 is.. */
- tmp = nv_ri32(dev, fc + 24);
- tmp2 = nv_rd32(dev, 0x2058) & 0xFFF;
- tmp2 |= (tmp & 0x30000000);
- nv_wr32(dev, 0x2058, tmp2);
- tmp &= ~0x30000000;
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp);
- nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28));
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32));
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36));
- tmp = nv_ri32(dev, fc + 40);
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44));
- nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48));
- nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52));
- nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56));
+ /*XXX: remove this later, need fifo engine context commit hook */
+ nouveau_gpuobj_ref(fctx->ramfc, &chan->ramfc);
- /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
- tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
- tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF;
- nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp);
-
- nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64));
- /* NVIDIA does this next line twice... */
- nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
- nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
- nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
- nv_wr32(dev, 0x330c, nv_ri32(dev, fc + 84));
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
-}
-
-int
-nv40_fifo_load_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- uint32_t tmp;
-
- nv40_fifo_do_load_context(dev, chan->id);
-
- /* Set channel active, and in DMA mode */
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
- NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
-
- /* Reset DMA_CTL_AT_INFO to INVALID */
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
-
- return 0;
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
}
-int
-nv40_fifo_unload_context(struct drm_device *dev)
+static int
+nv40_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- uint32_t fc, tmp;
- int chid;
-
- chid = pfifo->channel_id(dev);
- if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
- return 0;
- fc = NV40_RAMFC(chid);
-
- nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
- nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
- nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
- nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE));
- nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT));
- nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH);
- tmp |= nv_rd32(dev, 0x2058) & 0x30000000;
- nv_wi32(dev, fc + 24, tmp);
- nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
- nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
- nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
- tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
- nv_wi32(dev, fc + 40, tmp);
- nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
- nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
- /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
- * more involved depending on the value of 0x3228?
- */
- nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
- nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE));
- nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff);
- /* No idea what the below is for exactly, ripped from a mmio-trace */
- nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4));
- /* NVIDIA do this next line twice.. bug? */
- nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8));
- nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088));
- nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300));
-#if 0 /* no real idea which is PUT/GET in UNK_48.. */
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET);
- tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
- nv_wi32(dev, fc + 72, tmp);
-#endif
- nv_wi32(dev, fc + 84, nv_rd32(dev, 0x330c));
-
- nv40_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
- NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1));
- return 0;
-}
-
-static void
-nv40_fifo_init_reset(struct drm_device *dev)
-{
+ struct nv40_fifo_priv *priv = nv_engine(dev, engine);
int i;
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
- nv_wr32(dev, 0x003224, 0x000f0078);
- nv_wr32(dev, 0x003210, 0x00000000);
- nv_wr32(dev, 0x003270, 0x00000000);
- nv_wr32(dev, 0x003240, 0x00000000);
- nv_wr32(dev, 0x003244, 0x00000000);
- nv_wr32(dev, 0x003258, 0x00000000);
- nv_wr32(dev, 0x002504, 0x00000000);
- for (i = 0; i < 16; i++)
- nv_wr32(dev, 0x002510 + (i * 4), 0x00000000);
- nv_wr32(dev, 0x00250c, 0x0000ffff);
- nv_wr32(dev, 0x002048, 0x00000000);
- nv_wr32(dev, 0x003228, 0x00000000);
- nv_wr32(dev, 0x0032e8, 0x00000000);
- nv_wr32(dev, 0x002410, 0x00000000);
- nv_wr32(dev, 0x002420, 0x00000000);
- nv_wr32(dev, 0x002058, 0x00000001);
- nv_wr32(dev, 0x00221c, 0x00000000);
- /* something with 0x2084, read/modify/write, no change */
nv_wr32(dev, 0x002040, 0x000000ff);
- nv_wr32(dev, 0x002500, 0x00000000);
- nv_wr32(dev, 0x003200, 0x00000000);
-
- nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
-}
-
-static void
-nv40_fifo_init_ramxx(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv_wr32(dev, 0x002044, 0x2101ffff);
+ nv_wr32(dev, 0x002058, 0x00000001);
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
@@ -244,64 +152,59 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
case 0x47:
case 0x49:
case 0x4b:
- nv_wr32(dev, 0x2230, 1);
- break;
- default:
- break;
- }
-
- switch (dev_priv->chipset) {
+ nv_wr32(dev, 0x002230, 0x00000001);
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x45:
- case 0x47:
case 0x48:
- case 0x49:
- case 0x4b:
- nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002);
+ nv_wr32(dev, 0x002220, 0x00030002);
break;
default:
- nv_wr32(dev, 0x2230, 0);
- nv_wr32(dev, NV40_PFIFO_RAMFC,
- ((dev_priv->vram_size - 512 * 1024 +
- dev_priv->ramfc->pinst) >> 16) | (3 << 16));
+ nv_wr32(dev, 0x002230, 0x00000000);
+ nv_wr32(dev, 0x002220, ((dev_priv->vram_size - 512 * 1024 +
+ dev_priv->ramfc->pinst) >> 16) |
+ 0x00030000);
break;
}
-}
-static void
-nv40_fifo_init_intr(struct drm_device *dev)
-{
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (dev_priv->channels.ptr[i])
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
+ }
+
+ return 0;
}
int
-nv40_fifo_init(struct drm_device *dev)
+nv40_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- int i;
-
- nv40_fifo_init_reset(dev);
- nv40_fifo_init_ramxx(dev);
+ struct nv40_fifo_priv *priv;
- nv40_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
-
- nv40_fifo_init_intr(dev);
- pfifo->enable(dev);
- pfifo->reassign(dev, true);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->channels.ptr[i]) {
- uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
- nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
- }
- }
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv40_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv40_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 31;
+ priv->ramfc_desc = nv40_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index ba14a93d8afa..aa9e2df64a26 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -27,7 +27,7 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
-#include "nouveau_grctx.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
struct nv40_graph_engine {
@@ -42,7 +42,6 @@ nv40_graph_context_new(struct nouveau_channel *chan, int engine)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *grctx = NULL;
- struct nouveau_grctx ctx = {};
unsigned long flags;
int ret;
@@ -52,11 +51,7 @@ nv40_graph_context_new(struct nouveau_channel *chan, int engine)
return ret;
/* Initialise default context values */
- ctx.dev = chan->dev;
- ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = grctx;
- nv40_grctx_init(&ctx);
-
+ nv40_grctx_fill(dev, grctx);
nv_wo32(grctx, 0, grctx->vinst);
/* init grctx pointer in ramfc, and on PFIFO if channel is
@@ -184,8 +179,7 @@ nv40_graph_init(struct drm_device *dev, int engine)
struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nouveau_grctx ctx = {};
- uint32_t vramsz, *cp;
+ uint32_t vramsz;
int i, j;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -193,22 +187,8 @@ nv40_graph_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
- cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
- if (!cp)
- return -ENOMEM;
-
- ctx.dev = dev;
- ctx.mode = NOUVEAU_GRCTX_PROG;
- ctx.data = cp;
- ctx.ctxprog_max = 256;
- nv40_grctx_init(&ctx);
- pgraph->grctx_size = ctx.ctxvals_pos * 4;
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < ctx.ctxprog_len; i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
-
- kfree(cp);
+ /* generate and upload context program */
+ nv40_grctx_init(dev, &pgraph->grctx_size);
/* No context present currently */
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
@@ -366,13 +346,14 @@ nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
static int
nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *grctx;
unsigned long flags;
int i;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
if (!dev_priv->channels.ptr[i])
continue;
grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
@@ -460,7 +441,6 @@ nv40_graph_create(struct drm_device *dev)
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
nouveau_irq_register(dev, 12, nv40_graph_isr);
- NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
@@ -483,8 +463,5 @@ nv40_graph_create(struct drm_device *dev)
else
NVOBJ_CLASS(dev, 0x4097, GR);
- /* nvsw */
- NVOBJ_CLASS(dev, 0x506e, SW);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index f70447d131d7..be0a74750fb1 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -595,8 +595,8 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
}
}
-void
-nv40_grctx_init(struct nouveau_grctx *ctx)
+static void
+nv40_grctx_generate(struct nouveau_grctx *ctx)
{
/* decide whether we're loading/unloading the context */
cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
@@ -660,3 +660,31 @@ nv40_grctx_init(struct nouveau_grctx *ctx)
cp_out (ctx, CP_END);
}
+void
+nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+{
+ nv40_grctx_generate(&(struct nouveau_grctx) {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_VALS,
+ .data = mem,
+ });
+}
+
+void
+nv40_grctx_init(struct drm_device *dev, u32 *size)
+{
+ u32 ctxprog[256], i;
+ struct nouveau_grctx ctx = {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_PROG,
+ .data = ctxprog,
+ .ctxprog_max = ARRAY_SIZE(ctxprog)
+ };
+
+ nv40_grctx_generate(&ctx);
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]);
+ *size = ctx.ctxvals_pos * 4;
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index c7615381c5d9..e66273aff493 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -27,6 +27,7 @@
#include "nouveau_bios.h"
#include "nouveau_pm.h"
#include "nouveau_hw.h"
+#include "nouveau_fifo.h"
#define min2(a,b) ((a) < (b) ? (a) : (b))
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 701b927998bf..97a477b3d52d 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -79,15 +79,15 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
NV_ERROR(dev, "no space while blanking crtc\n");
return ret;
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
OUT_RING(evo, 0);
if (dev_priv->chipset != 0x50) {
- BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
+ BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
} else {
if (nv_crtc->cursor.visible)
@@ -100,20 +100,20 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
NV_ERROR(dev, "no space while unblanking crtc\n");
return ret;
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
OUT_RING(evo, nv_crtc->lut.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF :
NV50_EVO_CRTC_CLUT_MODE_ON);
OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
if (dev_priv->chipset != 0x50) {
- BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
+ BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
OUT_RING(evo, NvEvoVRAM);
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
OUT_RING(evo, nv_crtc->fb.offset >> 8);
OUT_RING(evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
if (dev_priv->chipset != 0x50)
if (nv_crtc->fb.tile_flags == 0x7a00 ||
nv_crtc->fb.tile_flags == 0xfe00)
@@ -158,10 +158,10 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
if (ret == 0) {
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
OUT_RING (evo, mode);
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
FIRE_RING (evo);
}
@@ -193,11 +193,11 @@ nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
OUT_RING (evo, (hue << 20) | (vib << 8));
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
FIRE_RING (evo);
}
@@ -311,9 +311,9 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
if (ret)
return ret;
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
OUT_RING (evo, ctrl);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
OUT_RING (evo, oY << 16 | oX);
OUT_RING (evo, oY << 16 | oX);
@@ -383,23 +383,15 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
static void
nv50_crtc_destroy(struct drm_crtc *crtc)
{
- struct drm_device *dev;
- struct nouveau_crtc *nv_crtc;
-
- if (!crtc)
- return;
-
- dev = crtc->dev;
- nv_crtc = nouveau_crtc(crtc);
-
- NV_DEBUG_KMS(dev, "\n");
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- drm_crtc_cleanup(&nv_crtc->base);
+ NV_DEBUG_KMS(crtc->dev, "\n");
nouveau_bo_unmap(nv_crtc->lut.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ drm_crtc_cleanup(&nv_crtc->base);
kfree(nv_crtc);
}
@@ -593,7 +585,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
if (ret)
return ret;
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
OUT_RING (evo, fb->r_dma);
}
@@ -601,18 +593,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
if (ret)
return ret;
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
OUT_RING (evo, nv_crtc->fb.offset >> 8);
OUT_RING (evo, 0);
OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
OUT_RING (evo, fb->r_pitch);
OUT_RING (evo, fb->r_format);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
OUT_RING (evo, fb->base.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
OUT_RING (evo, (y << 16) | x);
if (nv_crtc->lut.depth != fb->base.depth) {
@@ -672,23 +664,23 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
ret = RING_SPACE(evo, 18);
if (ret == 0) {
- BEGIN_RING(evo, 0, 0x0804 + head, 2);
+ BEGIN_NV04(evo, 0, 0x0804 + head, 2);
OUT_RING (evo, 0x00800000 | mode->clock);
OUT_RING (evo, (ilace == 2) ? 2 : 0);
- BEGIN_RING(evo, 0, 0x0810 + head, 6);
+ BEGIN_NV04(evo, 0, 0x0810 + head, 6);
OUT_RING (evo, 0x00000000); /* border colour */
OUT_RING (evo, (vactive << 16) | hactive);
OUT_RING (evo, ( vsynce << 16) | hsynce);
OUT_RING (evo, (vblanke << 16) | hblanke);
OUT_RING (evo, (vblanks << 16) | hblanks);
OUT_RING (evo, (vblan2e << 16) | vblan2s);
- BEGIN_RING(evo, 0, 0x082c + head, 1);
+ BEGIN_NV04(evo, 0, 0x082c + head, 1);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x0900 + head, 1);
+ BEGIN_NV04(evo, 0, 0x0900 + head, 1);
OUT_RING (evo, 0x00000311); /* makes sync channel work */
- BEGIN_RING(evo, 0, 0x08c8 + head, 1);
+ BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
- BEGIN_RING(evo, 0, 0x08d4 + head, 1);
+ BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
OUT_RING (evo, 0x00000000); /* screen position */
}
@@ -755,21 +747,25 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (!nv_crtc)
return -ENOMEM;
+ nv_crtc->index = index;
+ nv_crtc->set_dither = nv50_crtc_set_dither;
+ nv_crtc->set_scale = nv50_crtc_set_scale;
+ nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
nv_crtc->color_vibrance = 50;
nv_crtc->vibrant_hue = 0;
-
- /* Default CLUT parameters, will be activated on the hw upon
- * first mode set.
- */
+ nv_crtc->lut.depth = 0;
for (i = 0; i < 256; i++) {
nv_crtc->lut.r[i] = i << 8;
nv_crtc->lut.g[i] = i << 8;
nv_crtc->lut.b[i] = i << 8;
}
- nv_crtc->lut.depth = 0;
+
+ drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
+ drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
+ drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->lut.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -778,24 +774,12 @@ nv50_crtc_create(struct drm_device *dev, int index)
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
}
- if (ret) {
- kfree(nv_crtc);
- return ret;
- }
-
- nv_crtc->index = index;
+ if (ret)
+ goto out;
- /* set function pointers */
- nv_crtc->set_dither = nv50_crtc_set_dither;
- nv_crtc->set_scale = nv50_crtc_set_scale;
- nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
-
- drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
- drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
- drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -804,6 +788,12 @@ nv50_crtc_create(struct drm_device *dev, int index)
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
}
+ if (ret)
+ goto out;
+
nv50_cursor_init(nv_crtc);
- return 0;
+out:
+ if (ret)
+ nv50_crtc_destroy(&nv_crtc->base);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index adfc9b607a50..af4ec7bf3670 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -53,15 +53,15 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
}
if (dev_priv->chipset != 0x50) {
- BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
+ BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
OUT_RING(evo, NvEvoVRAM);
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
OUT_RING(evo, nv_crtc->cursor.offset >> 8);
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING(evo, 0);
FIRE_RING(evo);
nv_crtc->cursor.visible = true;
@@ -86,16 +86,16 @@ nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
NV_ERROR(dev, "no space while hiding cursor\n");
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
OUT_RING(evo, 0);
if (dev_priv->chipset != 0x50) {
- BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
+ BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
}
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING(evo, 0);
FIRE_RING(evo);
nv_crtc->cursor.visible = false;
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 55c56330be6d..eb216a446b89 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -55,9 +55,9 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
NV_ERROR(dev, "no space while disconnecting DAC\n");
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
OUT_RING (evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
nv_encoder->crtc = NULL;
@@ -240,7 +240,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
NV_ERROR(dev, "no space while connecting DAC\n");
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
OUT_RING(evo, mode_ctl);
OUT_RING(evo, mode_ctl2);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 8b78b9cfa383..5c41612723b4 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -32,6 +32,7 @@
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
#include "nouveau_ramht.h"
+#include "nouveau_software.h"
#include "drm_crtc_helper.h"
static void nv50_display_isr(struct drm_device *);
@@ -140,11 +141,11 @@ nv50_display_sync(struct drm_device *dev)
ret = RING_SPACE(evo, 6);
if (ret == 0) {
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x80000000);
- BEGIN_RING(evo, 0, 0x0080, 1);
+ BEGIN_NV04(evo, 0, 0x0080, 1);
OUT_RING (evo, 0);
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000000);
nv_wo32(disp->ntfy, 0x000, 0x00000000);
@@ -267,7 +268,7 @@ nv50_display_init(struct drm_device *dev)
ret = RING_SPACE(evo, 3);
if (ret)
return ret;
- BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
OUT_RING (evo, NvEvoSync);
@@ -292,7 +293,7 @@ nv50_display_fini(struct drm_device *dev)
ret = RING_SPACE(evo, 2);
if (ret == 0) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING(evo, 0);
}
FIRE_RING(evo);
@@ -358,8 +359,11 @@ nv50_display_create(struct drm_device *dev)
dev_priv->engine.display.priv = priv;
/* Create CRTC objects */
- for (i = 0; i < 2; i++)
- nv50_crtc_create(dev, i);
+ for (i = 0; i < 2; i++) {
+ ret = nv50_crtc_create(dev, i);
+ if (ret)
+ return ret;
+ }
/* We setup the encoders from the BIOS table */
for (i = 0 ; i < dcb->entries; i++) {
@@ -438,13 +442,13 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
return;
}
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x0094, 1);
+ BEGIN_NV04(evo, 0, 0x0094, 1);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x00c0, 1);
+ BEGIN_NV04(evo, 0, 0x00c0, 1);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x0080, 1);
+ BEGIN_NV04(evo, 0, 0x0080, 1);
OUT_RING (evo, 0x00000000);
FIRE_RING (evo);
}
@@ -474,28 +478,28 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
}
if (dev_priv->chipset < 0xc0) {
- BEGIN_RING(chan, 0, 0x0060, 2);
+ BEGIN_NV04(chan, 0, 0x0060, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
OUT_RING (chan, dispc->sem.offset);
- BEGIN_RING(chan, 0, 0x006c, 1);
+ BEGIN_NV04(chan, 0, 0x006c, 1);
OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
- BEGIN_RING(chan, 0, 0x0064, 2);
+ BEGIN_NV04(chan, 0, 0x0064, 2);
OUT_RING (chan, dispc->sem.offset ^ 0x10);
OUT_RING (chan, 0x74b1e000);
- BEGIN_RING(chan, 0, 0x0060, 1);
+ BEGIN_NV04(chan, 0, 0x0060, 1);
if (dev_priv->chipset < 0x84)
OUT_RING (chan, NvSema);
else
OUT_RING (chan, chan->vram_handle);
} else {
- u64 offset = chan->dispc_vma[nv_crtc->index].offset;
+ u64 offset = nvc0_software_crtc(chan, nv_crtc->index);
offset += dispc->sem.offset;
- BEGIN_NVC0(chan, 2, 0, 0x0010, 4);
+ BEGIN_NVC0(chan, 0, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 2, 0, 0x0010, 4);
+ BEGIN_NVC0(chan, 0, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
@@ -508,40 +512,40 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
}
/* queue the flip on the crtc's "display sync" channel */
- BEGIN_RING(evo, 0, 0x0100, 1);
+ BEGIN_NV04(evo, 0, 0x0100, 1);
OUT_RING (evo, 0xfffe0000);
if (chan) {
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000100);
} else {
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000010);
/* allows gamma somehow, PDISP will bitch at you if
* you don't wait for vblank before changing this..
*/
- BEGIN_RING(evo, 0, 0x00e0, 1);
+ BEGIN_NV04(evo, 0, 0x00e0, 1);
OUT_RING (evo, 0x40000000);
}
- BEGIN_RING(evo, 0, 0x0088, 4);
+ BEGIN_NV04(evo, 0, 0x0088, 4);
OUT_RING (evo, dispc->sem.offset);
OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
OUT_RING (evo, 0x74b1e000);
OUT_RING (evo, NvEvoSync);
- BEGIN_RING(evo, 0, 0x00a0, 2);
+ BEGIN_NV04(evo, 0, 0x00a0, 2);
OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x00c0, 1);
+ BEGIN_NV04(evo, 0, 0x00c0, 1);
OUT_RING (evo, nv_fb->r_dma);
- BEGIN_RING(evo, 0, 0x0110, 2);
+ BEGIN_NV04(evo, 0, 0x0110, 2);
OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x0800, 5);
+ BEGIN_NV04(evo, 0, 0x0800, 5);
OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
OUT_RING (evo, 0);
OUT_RING (evo, (fb->height << 16) | fb->width);
OUT_RING (evo, nv_fb->r_pitch);
OUT_RING (evo, nv_fb->r_format);
- BEGIN_RING(evo, 0, 0x0080, 1);
+ BEGIN_NV04(evo, 0, 0x0080, 1);
OUT_RING (evo, 0x00000000);
FIRE_RING (evo);
@@ -642,20 +646,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
static void
nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan, *tmp;
-
- list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
- nvsw.vbl_wait) {
- if (chan->nvsw.vblsem_head != crtc)
- continue;
-
- nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
- chan->nvsw.vblsem_rval);
- list_del(&chan->nvsw.vbl_wait);
- drm_vblank_put(dev, crtc);
- }
-
+ nouveau_software_vblank(dev, crtc);
drm_handle_vblank(dev, crtc);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 5d3dd14d2837..e9db9b97f041 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -33,6 +33,7 @@
#include "nouveau_dma.h"
#include "nouveau_reg.h"
#include "nouveau_crtc.h"
+#include "nouveau_software.h"
#include "nv50_evo.h"
struct nv50_display_crtc {
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 9b962e989d7c..ddcd55595824 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -117,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
evo->user_get = 4;
evo->user_put = 0;
- ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+ ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
&evo->pushbuf_bo);
if (ret == 0)
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
@@ -333,7 +333,7 @@ nv50_evo_create(struct drm_device *dev)
goto err;
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &dispc->sem.bo);
+ 0, 0x0000, NULL, &dispc->sem.bo);
if (!ret) {
ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index bdd2afe29205..f1e4b9e07d14 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -2,6 +2,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_fifo.h"
struct nv50_fb_priv {
struct page *r100c08_page;
@@ -212,6 +213,7 @@ static struct nouveau_enum vm_fault[] = {
void
nv50_fb_vm_trap(struct drm_device *dev, int display)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
const struct nouveau_enum *en, *cl;
unsigned long flags;
@@ -236,7 +238,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display)
/* lookup channel id */
chinst = (trap[2] << 16) | trap[1];
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
+ for (ch = 0; ch < pfifo->channels; ch++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
if (!chan || !chan->ramin)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index dc75a7206524..e3c8b05dcae4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -43,22 +43,22 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
return ret;
if (rect->rop != ROP_COPY) {
- BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
OUT_RING(chan, 1);
}
- BEGIN_RING(chan, NvSub2D, 0x0588, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0588, 1);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR)
OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
else
OUT_RING(chan, rect->color);
- BEGIN_RING(chan, NvSub2D, 0x0600, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x0600, 4);
OUT_RING(chan, rect->dx);
OUT_RING(chan, rect->dy);
OUT_RING(chan, rect->dx + rect->width);
OUT_RING(chan, rect->dy + rect->height);
if (rect->rop != ROP_COPY) {
- BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
OUT_RING(chan, 3);
}
FIRE_RING(chan);
@@ -78,14 +78,14 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
if (ret)
return ret;
- BEGIN_RING(chan, NvSub2D, 0x0110, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0110, 1);
OUT_RING(chan, 0);
- BEGIN_RING(chan, NvSub2D, 0x08b0, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x08b0, 4);
OUT_RING(chan, region->dx);
OUT_RING(chan, region->dy);
OUT_RING(chan, region->width);
OUT_RING(chan, region->height);
- BEGIN_RING(chan, NvSub2D, 0x08d0, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x08d0, 4);
OUT_RING(chan, 0);
OUT_RING(chan, region->sx);
OUT_RING(chan, 0);
@@ -116,7 +116,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
- BEGIN_RING(chan, NvSub2D, 0x0814, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
OUT_RING(chan, palette[image->bg_color] | mask);
@@ -125,10 +125,10 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING(chan, image->bg_color);
OUT_RING(chan, image->fg_color);
}
- BEGIN_RING(chan, NvSub2D, 0x0838, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0838, 2);
OUT_RING(chan, image->width);
OUT_RING(chan, image->height);
- BEGIN_RING(chan, NvSub2D, 0x0850, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x0850, 4);
OUT_RING(chan, 0);
OUT_RING(chan, image->dx);
OUT_RING(chan, 0);
@@ -143,7 +143,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
dwords -= push;
- BEGIN_RING(chan, NvSub2D, 0x40000860, push);
+ BEGIN_NI04(chan, NvSub2D, 0x0860, push);
OUT_RINGp(chan, data, push);
data += push;
}
@@ -199,60 +199,59 @@ nv50_fbcon_accel_init(struct fb_info *info)
return ret;
}
- BEGIN_RING(chan, NvSub2D, 0x0000, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
OUT_RING(chan, Nv2D);
- BEGIN_RING(chan, NvSub2D, 0x0180, 4);
- OUT_RING(chan, NvNotify0);
+ BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
OUT_RING(chan, chan->vram_handle);
OUT_RING(chan, chan->vram_handle);
OUT_RING(chan, chan->vram_handle);
- BEGIN_RING(chan, NvSub2D, 0x0290, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
OUT_RING(chan, 0);
- BEGIN_RING(chan, NvSub2D, 0x0888, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
OUT_RING(chan, 3);
- BEGIN_RING(chan, NvSub2D, 0x02a0, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x02a0, 1);
OUT_RING(chan, 0x55);
- BEGIN_RING(chan, NvSub2D, 0x08c0, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x08c0, 4);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0580, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0580, 2);
OUT_RING(chan, 4);
OUT_RING(chan, format);
- BEGIN_RING(chan, NvSub2D, 0x02e8, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x02e8, 2);
OUT_RING(chan, 2);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0804, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0804, 1);
OUT_RING(chan, format);
- BEGIN_RING(chan, NvSub2D, 0x0800, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0800, 1);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0808, 3);
+ BEGIN_NV04(chan, NvSub2D, 0x0808, 3);
OUT_RING(chan, 0);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x081c, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x081c, 1);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0840, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x0840, 4);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0200, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0200, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0214, 5);
+ BEGIN_NV04(chan, NvSub2D, 0x0214, 5);
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
OUT_RING(chan, upper_32_bits(fb->vma.offset));
OUT_RING(chan, lower_32_bits(fb->vma.offset));
- BEGIN_RING(chan, NvSub2D, 0x0230, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0244, 5);
+ BEGIN_NV04(chan, NvSub2D, 0x0244, 5);
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 3bc2a565c20b..55383b85db0b 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
+ * Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
@@ -27,480 +27,268 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
#include "nouveau_vm.h"
-static void
+struct nv50_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nv50_fifo_chan {
+ struct nouveau_fifo_chan base;
+};
+
+void
nv50_fifo_playlist_update(struct drm_device *dev)
{
+ struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_gpuobj *cur;
- int i, nr;
-
- NV_DEBUG(dev, "\n");
+ int i, p;
- cur = pfifo->playlist[pfifo->cur_playlist];
- pfifo->cur_playlist = !pfifo->cur_playlist;
+ cur = priv->playlist[priv->cur_playlist];
+ priv->cur_playlist = !priv->cur_playlist;
- /* We never schedule channel 0 or 127 */
- for (i = 1, nr = 0; i < 127; i++) {
- if (dev_priv->channels.ptr[i] &&
- dev_priv->channels.ptr[i]->ramfc) {
- nv_wo32(cur, (nr * 4), i);
- nr++;
- }
+ for (i = 0, p = 0; i < priv->base.channels; i++) {
+ if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
+ nv_wo32(cur, p++ * 4, i);
}
- dev_priv->engine.instmem.flush(dev);
-
- nv_wr32(dev, 0x32f4, cur->vinst >> 12);
- nv_wr32(dev, 0x32ec, nr);
- nv_wr32(dev, 0x2500, 0x101);
-}
-static void
-nv50_fifo_channel_enable(struct drm_device *dev, int channel)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
- uint32_t inst;
-
- NV_DEBUG(dev, "ch%d\n", channel);
-
- if (dev_priv->chipset == 0x50)
- inst = chan->ramfc->vinst >> 12;
- else
- inst = chan->ramfc->vinst >> 8;
+ dev_priv->engine.instmem.flush(dev);
- nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
- NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
+ nv_wr32(dev, 0x0032f4, cur->vinst >> 12);
+ nv_wr32(dev, 0x0032ec, p);
+ nv_wr32(dev, 0x002500, 0x00000101);
}
-static void
-nv50_fifo_channel_disable(struct drm_device *dev, int channel)
+static int
+nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
{
+ struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine);
+ struct nv50_fifo_chan *fctx;
+ struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t inst;
-
- NV_DEBUG(dev, "ch%d\n", channel);
+ u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
+ u64 instance = chan->ramin->vinst >> 12;
+ unsigned long flags;
+ int ret = 0, i;
- if (dev_priv->chipset == 0x50)
- inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
- else
- inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
- nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
-}
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ atomic_inc(&chan->vm->engref[engine]);
-static void
-nv50_fifo_init_reset(struct drm_device *dev)
-{
- uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
- NV_DEBUG(dev, "\n");
+ for (i = 0; i < 0x100; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+ nv_wo32(chan->ramin, 0x3c, 0x403f6078);
+ nv_wo32(chan->ramin, 0x40, 0x00000000);
+ nv_wo32(chan->ramin, 0x44, 0x01003fff);
+ nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4);
+ nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
+ nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
+ drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(chan->ramin, 0x60, 0x7fffffff);
+ nv_wo32(chan->ramin, 0x78, 0x00000000);
+ nv_wo32(chan->ramin, 0x7c, 0x30000001);
+ nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->cinst >> 4));
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
-}
+ dev_priv->engine.instmem.flush(dev);
-static void
-nv50_fifo_init_intr(struct drm_device *dev)
-{
- NV_DEBUG(dev, "\n");
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
+ nv50_fifo_playlist_update(dev);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
- nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
}
-static void
-nv50_fifo_init_context_table(struct drm_device *dev)
+static bool
+nv50_fifo_kickoff(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
-
- NV_DEBUG(dev, "\n");
-
- for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
- if (dev_priv->channels.ptr[i])
- nv50_fifo_channel_enable(dev, i);
- else
- nv50_fifo_channel_disable(dev, i);
+ struct drm_device *dev = chan->dev;
+ bool done = true;
+ u32 me;
+
+ /* HW bug workaround:
+ *
+ * PFIFO will hang forever if the connected engines don't report
+ * that they've processed the context switch request.
+ *
+ * In order for the kickoff to work, we need to ensure all the
+ * connected engines are in a state where they can answer.
+ *
+ * Newer chipsets don't seem to suffer from this issue, and well,
+ * there's also a "ignore these engines" bitmask reg we can use
+ * if we hit the issue there..
+ */
+
+ /* PME: make sure engine is enabled */
+ me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
+
+ /* do the kickoff... */
+ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
+ if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
+ NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
+ done = false;
}
- nv50_fifo_playlist_update(dev);
+ /* restore any engine states we changed, and exit */
+ nv_wr32(dev, 0x00b860, me);
+ return done;
}
static void
-nv50_fifo_init_regs__nv(struct drm_device *dev)
-{
- NV_DEBUG(dev, "\n");
-
- nv_wr32(dev, 0x250c, 0x6f3cfc34);
-}
-
-static void
-nv50_fifo_init_regs(struct drm_device *dev)
-{
- NV_DEBUG(dev, "\n");
-
- nv_wr32(dev, 0x2500, 0);
- nv_wr32(dev, 0x3250, 0);
- nv_wr32(dev, 0x3220, 0);
- nv_wr32(dev, 0x3204, 0);
- nv_wr32(dev, 0x3210, 0);
- nv_wr32(dev, 0x3270, 0);
- nv_wr32(dev, 0x2044, 0x01003fff);
-
- /* Enable dummy channels setup by nv50_instmem.c */
- nv50_fifo_channel_enable(dev, 0);
- nv50_fifo_channel_enable(dev, 127);
-}
-
-int
-nv50_fifo_init(struct drm_device *dev)
+nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
{
+ struct nv50_fifo_chan *fctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- int ret;
+ unsigned long flags;
- NV_DEBUG(dev, "\n");
+ /* remove channel from playlist, will context switch if active */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
+ nv50_fifo_playlist_update(dev);
- if (pfifo->playlist[0]) {
- pfifo->cur_playlist = !pfifo->cur_playlist;
- goto just_reset;
- }
+ /* tell any engines on this channel to unload their contexts */
+ nv50_fifo_kickoff(chan);
- ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pfifo->playlist[0]);
- if (ret) {
- NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
- return ret;
- }
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pfifo->playlist[1]);
- if (ret) {
- nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
- NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
- return ret;
+ /* clean up */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
}
-just_reset:
- nv50_fifo_init_reset(dev);
- nv50_fifo_init_intr(dev);
- nv50_fifo_init_context_table(dev);
- nv50_fifo_init_regs__nv(dev);
- nv50_fifo_init_regs(dev);
- dev_priv->engine.fifo.enable(dev);
- dev_priv->engine.fifo.reassign(dev, true);
-
- return 0;
+ atomic_dec(&chan->vm->engref[engine]);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
}
-void
-nv50_fifo_takedown(struct drm_device *dev)
+static int
+nv50_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ u32 instance;
+ int i;
- NV_DEBUG(dev, "\n");
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x00250c, 0x6f3cfc34);
+ nv_wr32(dev, 0x002044, 0x01003fff);
- if (!pfifo->playlist[0])
- return;
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
- nv_wr32(dev, 0x2140, 0x00000000);
- nouveau_irq_unregister(dev, 8);
+ for (i = 0; i < 128; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan && chan->engctx[engine])
+ instance = 0x80000000 | chan->ramin->vinst >> 12;
+ else
+ instance = 0x00000000;
+ nv_wr32(dev, 0x002600 + (i * 4), instance);
+ }
- nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
- nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
-}
+ nv50_fifo_playlist_update(dev);
-int
-nv50_fifo_channel_id(struct drm_device *dev)
-{
- return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
- NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
+ nv_wr32(dev, 0x003200, 1);
+ nv_wr32(dev, 0x003250, 1);
+ nv_wr32(dev, 0x002500, 1);
+ return 0;
}
-int
-nv50_fifo_create_context(struct nouveau_channel *chan)
+static int
+nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend)
{
- struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramfc = NULL;
- uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
- unsigned long flags;
- int ret;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- if (dev_priv->chipset == 0x50) {
- ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
- chan->ramin->vinst, 0x100,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE,
- &chan->ramfc);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
- chan->ramin->vinst + 0x0400,
- 4096, 0, &chan->cache);
- if (ret)
- return ret;
- } else {
- ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
- 0, &chan->cache);
- if (ret)
- return ret;
- }
- ramfc = chan->ramfc;
+ struct nv50_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_USER(chan->id), PAGE_SIZE);
- if (!chan->user)
- return -ENOMEM;
+ /* set playlist length to zero, fifo will unload context */
+ nv_wr32(dev, 0x0032ec, 0);
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
- nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj->cinst >> 4));
- nv_wo32(ramfc, 0x44, 0x01003fff);
- nv_wo32(ramfc, 0x60, 0x7fffffff);
- nv_wo32(ramfc, 0x40, 0x00000000);
- nv_wo32(ramfc, 0x7c, 0x30000001);
- nv_wo32(ramfc, 0x78, 0x00000000);
- nv_wo32(ramfc, 0x3c, 0x403f6078);
- nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
- nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
- drm_order(chan->dma.ib_max + 1) << 16);
-
- if (dev_priv->chipset != 0x50) {
- nv_wo32(chan->ramin, 0, chan->id);
- nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
-
- nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
- nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
+ /* tell all connected engines to unload their contexts */
+ for (i = 0; i < priv->base.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan && !nv50_fifo_kickoff(chan))
+ return -EBUSY;
}
- dev_priv->engine.instmem.flush(dev);
-
- nv50_fifo_channel_enable(dev, chan->id);
- nv50_fifo_playlist_update(dev);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ nv_wr32(dev, 0x002140, 0);
return 0;
}
void
-nv50_fifo_destroy_context(struct nouveau_channel *chan)
+nv50_fifo_tlb_flush(struct drm_device *dev, int engine)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nouveau_gpuobj *ramfc = NULL;
- unsigned long flags;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- pfifo->reassign(dev, false);
-
- /* Unload the context if it's the currently active one */
- if (pfifo->channel_id(dev) == chan->id) {
- pfifo->disable(dev);
- pfifo->unload_context(dev);
- pfifo->enable(dev);
- }
-
- /* This will ensure the channel is seen as disabled. */
- nouveau_gpuobj_ref(chan->ramfc, &ramfc);
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
- nv50_fifo_channel_disable(dev, chan->id);
-
- /* Dummy channel, also used on ch 127 */
- if (chan->id == 0)
- nv50_fifo_channel_disable(dev, 127);
- nv50_fifo_playlist_update(dev);
-
- pfifo->reassign(dev, true);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* Free the channel resources */
- if (chan->user) {
- iounmap(chan->user);
- chan->user = NULL;
- }
- nouveau_gpuobj_ref(NULL, &ramfc);
- nouveau_gpuobj_ref(NULL, &chan->cache);
+ nv50_vm_flush_engine(dev, 5);
}
-int
-nv50_fifo_load_context(struct nouveau_channel *chan)
+void
+nv50_fifo_destroy(struct drm_device *dev, int engine)
{
- struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramfc = chan->ramfc;
- struct nouveau_gpuobj *cache = chan->cache;
- int ptr, cnt;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00));
- nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04));
- nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08));
- nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c));
- nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10));
- nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14));
- nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18));
- nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c));
- nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20));
- nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24));
- nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28));
- nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c));
- nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30));
- nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34));
- nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38));
- nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c));
- nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40));
- nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44));
- nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48));
- nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c));
- nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50));
- nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54));
- nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58));
- nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c));
- nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60));
- nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64));
- nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68));
- nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c));
- nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70));
- nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74));
- nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78));
- nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c));
- nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80));
-
- cnt = nv_ro32(ramfc, 0x84);
- for (ptr = 0; ptr < cnt; ptr++) {
- nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
- nv_ro32(cache, (ptr * 8) + 0));
- nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
- nv_ro32(cache, (ptr * 8) + 4));
- }
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
-
- /* guessing that all the 0x34xx regs aren't on NV50 */
- if (dev_priv->chipset != 0x50) {
- nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88));
- nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c));
- nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90));
- nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94));
- nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98));
- }
+ struct nv50_fifo_priv *priv = nv_engine(dev, engine);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
- return 0;
+ nouveau_irq_unregister(dev, 8);
+
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
}
int
-nv50_fifo_unload_context(struct drm_device *dev)
+nv50_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nouveau_gpuobj *ramfc, *cache;
- struct nouveau_channel *chan = NULL;
- int chid, get, put, ptr;
-
- NV_DEBUG(dev, "\n");
-
- chid = pfifo->channel_id(dev);
- if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
- return 0;
-
- chan = dev_priv->channels.ptr[chid];
- if (!chan) {
- NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
- return -EINVAL;
- }
- NV_DEBUG(dev, "ch%d\n", chan->id);
- ramfc = chan->ramfc;
- cache = chan->cache;
-
- nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330));
- nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334));
- nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240));
- nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320));
- nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244));
- nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328));
- nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368));
- nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c));
- nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370));
- nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374));
- nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378));
- nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c));
- nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228));
- nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364));
- nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0));
- nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224));
- nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c));
- nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044));
- nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c));
- nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234));
- nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340));
- nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344));
- nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280));
- nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254));
- nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260));
- nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264));
- nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268));
- nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c));
- nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4));
- nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248));
- nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088));
- nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058));
- nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210));
-
- put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
- get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
- ptr = 0;
- while (put != get) {
- nv_wo32(cache, ptr + 0,
- nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
- nv_wo32(cache, ptr + 4,
- nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
- get = (get + 1) & 0x1ff;
- ptr += 8;
- }
-
- /* guessing that all the 0x34xx regs aren't on NV50 */
- if (dev_priv->chipset != 0x50) {
- nv_wo32(ramfc, 0x84, ptr >> 3);
- nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c));
- nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400));
- nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404));
- nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408));
- nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410));
- }
+ struct nv50_fifo_priv *priv;
+ int ret;
- dev_priv->engine.instmem.flush(dev);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- /*XXX: probably reload ch127 (NULL) state back too */
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
- return 0;
-}
+ priv->base.base.destroy = nv50_fifo_destroy;
+ priv->base.base.init = nv50_fifo_init;
+ priv->base.base.fini = nv50_fifo_fini;
+ priv->base.base.context_new = nv50_fifo_context_new;
+ priv->base.base.context_del = nv50_fifo_context_del;
+ priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
+ priv->base.channels = 127;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
+ if (ret)
+ goto error;
-void
-nv50_fifo_tlb_flush(struct drm_device *dev)
-{
- nv50_vm_flush_engine(dev, 5);
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 33d5711a918d..d9cc2f2638d6 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -27,8 +27,8 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
-#include "nouveau_grctx.h"
#include "nouveau_dma.h"
#include "nouveau_vm.h"
#include "nv50_evo.h"
@@ -40,86 +40,6 @@ struct nv50_graph_engine {
u32 grctx_size;
};
-static void
-nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
-{
- const uint32_t mask = 0x00010001;
-
- if (enabled)
- nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
- else
- nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
-}
-
-static struct nouveau_channel *
-nv50_graph_channel(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t inst;
- int i;
-
- /* Be sure we're not in the middle of a context switch or bad things
- * will happen, such as unloading the wrong pgraph context.
- */
- if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
- NV_ERROR(dev, "Ctxprog is still running\n");
-
- inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
- if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
- return NULL;
- inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[i];
-
- if (chan && chan->ramin && chan->ramin->vinst == inst)
- return chan;
- }
-
- return NULL;
-}
-
-static int
-nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
-{
- uint32_t fifo = nv_rd32(dev, 0x400500);
-
- nv_wr32(dev, 0x400500, fifo & ~1);
- nv_wr32(dev, 0x400784, inst);
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
- nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
- nv_wr32(dev, 0x400040, 0xffffffff);
- (void)nv_rd32(dev, 0x400040);
- nv_wr32(dev, 0x400040, 0x00000000);
- nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
-
- if (nouveau_wait_for_idle(dev))
- nv_wr32(dev, 0x40032c, inst | (1<<31));
- nv_wr32(dev, 0x400500, fifo);
-
- return 0;
-}
-
-static int
-nv50_graph_unload_context(struct drm_device *dev)
-{
- uint32_t inst;
-
- inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
- if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
- return 0;
- inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
-
- nouveau_wait_for_idle(dev);
- nv_wr32(dev, 0x400784, inst);
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
- nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
- nouveau_wait_for_idle(dev);
-
- nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
- return 0;
-}
-
static int
nv50_graph_init(struct drm_device *dev, int engine)
{
@@ -211,12 +131,6 @@ nv50_graph_init(struct drm_device *dev, int engine)
static int
nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
- nv_mask(dev, 0x400500, 0x00010001, 0x00000000);
- if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) {
- nv_mask(dev, 0x400500, 0x00010001, 0x00010001);
- return -EBUSY;
- }
- nv50_graph_unload_context(dev);
nv_wr32(dev, 0x40013c, 0x00000000);
return 0;
}
@@ -229,7 +143,6 @@ nv50_graph_context_new(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *ramin = chan->ramin;
struct nouveau_gpuobj *grctx = NULL;
struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
- struct nouveau_grctx ctx = {};
int hdr, ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -248,11 +161,7 @@ nv50_graph_context_new(struct nouveau_channel *chan, int engine)
nv_wo32(ramin, hdr + 0x10, 0);
nv_wo32(ramin, hdr + 0x14, 0x00010000);
- ctx.dev = chan->dev;
- ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = grctx;
- nv50_grctx_init(&ctx);
-
+ nv50_grctx_fill(dev, grctx);
nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
dev_priv->engine.instmem.flush(dev);
@@ -268,33 +177,14 @@ nv50_graph_context_del(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *grctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
- unsigned long flags;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- if (!chan->ramin)
- return;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- pfifo->reassign(dev, false);
- nv50_graph_fifo_access(dev, false);
-
- if (nv50_graph_channel(dev) == chan)
- nv50_graph_unload_context(dev);
for (i = hdr; i < hdr + 24; i += 4)
nv_wo32(chan->ramin, i, 0);
dev_priv->engine.instmem.flush(dev);
- nv50_graph_fifo_access(dev, true);
- pfifo->reassign(dev, true);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- nouveau_gpuobj_ref(NULL, &grctx);
-
atomic_dec(&chan->vm->engref[engine]);
+ nouveau_gpuobj_ref(NULL, &grctx);
chan->engctx[engine] = NULL;
}
@@ -325,85 +215,6 @@ nv50_graph_object_new(struct nouveau_channel *chan, int engine,
}
static void
-nv50_graph_context_switch(struct drm_device *dev)
-{
- uint32_t inst;
-
- nv50_graph_unload_context(dev);
-
- inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
- inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
- nv50_graph_do_load_context(dev, inst);
-
- nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
- NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
-}
-
-static int
-nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- struct nouveau_gpuobj *gpuobj;
-
- gpuobj = nouveau_ramht_find(chan, data);
- if (!gpuobj)
- return -ENOENT;
-
- if (nouveau_notifier_offset(gpuobj, NULL))
- return -EINVAL;
-
- chan->nvsw.vblsem = gpuobj;
- chan->nvsw.vblsem_offset = ~0;
- return 0;
-}
-
-static int
-nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
- return -ERANGE;
-
- chan->nvsw.vblsem_offset = data >> 2;
- return 0;
-}
-
-static int
-nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- chan->nvsw.vblsem_rval = data;
- return 0;
-}
-
-static int
-nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
- return -EINVAL;
-
- drm_vblank_get(dev, data);
-
- chan->nvsw.vblsem_head = data;
- list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
-
- return 0;
-}
-
-static int
-nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- nouveau_finish_page_flip(chan, NULL);
- return 0;
-}
-
-
-static void
nv50_graph_tlb_flush(struct drm_device *dev, int engine)
{
nv50_vm_flush_engine(dev, 0);
@@ -514,6 +325,7 @@ struct nouveau_enum nv50_data_error_names[] = {
{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
+ { 0x00000024, "VP_ZERO_INPUTS", NULL },
{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
@@ -900,13 +712,14 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
int
nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
unsigned long flags;
int i;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
chan = dev_priv->channels.ptr[i];
if (!chan || !chan->ramin)
continue;
@@ -939,15 +752,6 @@ nv50_graph_isr(struct drm_device *dev)
show &= ~0x00000010;
}
- if (stat & 0x00001000) {
- nv_wr32(dev, 0x400500, 0x00000000);
- nv_wr32(dev, 0x400100, 0x00001000);
- nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
- nv50_graph_context_switch(dev);
- stat &= ~0x00001000;
- show &= ~0x00001000;
- }
-
show = (show && nouveau_ratelimit()) ? show : 0;
if (show & 0x00100000) {
@@ -996,28 +800,21 @@ nv50_graph_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_graph_engine *pgraph;
- struct nouveau_grctx ctx = {};
int ret;
pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
if (!pgraph)
return -ENOMEM;
- ctx.dev = dev;
- ctx.mode = NOUVEAU_GRCTX_PROG;
- ctx.data = pgraph->ctxprog;
- ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog);
-
- ret = nv50_grctx_init(&ctx);
+ ret = nv50_grctx_init(dev, pgraph->ctxprog, ARRAY_SIZE(pgraph->ctxprog),
+ &pgraph->ctxprog_size,
+ &pgraph->grctx_size);
if (ret) {
NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
kfree(pgraph);
return 0;
}
- pgraph->grctx_size = ctx.ctxvals_pos * 4;
- pgraph->ctxprog_size = ctx.ctxprog_len;
-
pgraph->base.destroy = nv50_graph_destroy;
pgraph->base.init = nv50_graph_init;
pgraph->base.fini = nv50_graph_fini;
@@ -1031,14 +828,6 @@ nv50_graph_create(struct drm_device *dev)
nouveau_irq_register(dev, 12, nv50_graph_isr);
- /* NVSW really doesn't live here... */
- NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
- NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
- NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
- NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
- NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
-
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index 4b46d6968566..881e22b249fc 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -172,8 +172,8 @@ static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
/* Main function: construct the ctxprog skeleton, call the other functions. */
-int
-nv50_grctx_init(struct nouveau_grctx *ctx)
+static int
+nv50_grctx_generate(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
@@ -210,7 +210,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
cp_name(ctx, cp_check_load);
cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
- cp_bra (ctx, ALWAYS, TRUE, cp_exit);
+ cp_bra (ctx, ALWAYS, TRUE, cp_prepare_exit);
/* setup for context load */
cp_name(ctx, cp_setup_auto_load);
@@ -277,6 +277,33 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
return 0;
}
+void
+nv50_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+{
+ nv50_grctx_generate(&(struct nouveau_grctx) {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_VALS,
+ .data = mem,
+ });
+}
+
+int
+nv50_grctx_init(struct drm_device *dev, u32 *data, u32 max, u32 *len, u32 *cnt)
+{
+ struct nouveau_grctx ctx = {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_PROG,
+ .data = data,
+ .ctxprog_max = max
+ };
+ int ret;
+
+ ret = nv50_grctx_generate(&ctx);
+ *cnt = ctx.ctxvals_pos * 4;
+ *len = ctx.ctxprog_len;
+ return ret;
+}
+
/*
* Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
* registers to save/restore and the default values for them.
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a7c12c94a5a6..0bba54f11800 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -83,7 +83,7 @@ nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
return ret;
}
- ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size);
+ ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size - 0x6000);
if (ret) {
nv50_channel_del(&chan);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
index b57a2d180ad2..90e8ed22cfcb 100644
--- a/drivers/gpu/drm/nouveau/nv50_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c
@@ -77,27 +77,13 @@ nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
static void
nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_gpuobj *ctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
- unsigned long flags;
- u32 inst, i;
-
- if (!chan->ramin)
- return;
-
- inst = chan->ramin->vinst >> 12;
- inst |= 0x80000000;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
- if (nv_rd32(dev, 0x00b318) == inst)
- nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ int i;
for (i = 0x00; i <= 0x14; i += 4)
nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
+
nouveau_gpuobj_ref(NULL, &ctx);
chan->engctx[engine] = NULL;
}
@@ -162,7 +148,6 @@ nv50_mpeg_init(struct drm_device *dev, int engine)
static int
nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
{
- /*XXX: context save for s/r */
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
nv_wr32(dev, 0x00b140, 0x00000000);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c
new file mode 100644
index 000000000000..114d2517d4a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_software.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
+#include "nouveau_software.h"
+
+#include "nv50_display.h"
+
+struct nv50_software_priv {
+ struct nouveau_software_priv base;
+};
+
+struct nv50_software_chan {
+ struct nouveau_software_chan base;
+ struct {
+ struct nouveau_gpuobj *object;
+ } vblank;
+};
+
+static int
+mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+ struct nouveau_gpuobj *gpuobj;
+
+ gpuobj = nouveau_ramht_find(chan, data);
+ if (!gpuobj)
+ return -ENOENT;
+
+ if (nouveau_notifier_offset(gpuobj, NULL))
+ return -EINVAL;
+
+ pch->vblank.object = gpuobj;
+ pch->base.vblank.offset = ~0;
+ return 0;
+}
+
+static int
+mthd_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+
+ if (nouveau_notifier_offset(pch->vblank.object, &data))
+ return -ERANGE;
+
+ pch->base.vblank.offset = data >> 2;
+ return 0;
+}
+
+static int
+mthd_vblsem_value(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+ pch->base.vblank.value = data;
+ return 0;
+}
+
+static int
+mthd_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
+ struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+ struct drm_device *dev = chan->dev;
+
+ if (!pch->vblank.object || pch->base.vblank.offset == ~0 || data > 1)
+ return -EINVAL;
+
+ drm_vblank_get(dev, data);
+
+ pch->base.vblank.head = data;
+ list_add(&pch->base.vblank.list, &psw->base.vblank);
+ return 0;
+}
+
+static int
+mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ nouveau_finish_page_flip(chan, NULL);
+ return 0;
+}
+
+static int
+nv50_software_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
+ struct nv50_display *pdisp = nv50_display(chan->dev);
+ struct nv50_software_chan *pch;
+ int ret = 0, i;
+
+ pch = kzalloc(sizeof(*pch), GFP_KERNEL);
+ if (!pch)
+ return -ENOMEM;
+
+ nouveau_software_context_new(&pch->base);
+ pch->base.vblank.bo = chan->notifier_bo;
+ chan->engctx[engine] = pch;
+
+ /* dma objects for display sync channel semaphore blocks */
+ for (i = 0; i < chan->dev->mode_config.num_crtc; i++) {
+ struct nv50_display_crtc *dispc = &pdisp->crtc[i];
+ struct nouveau_gpuobj *obj = NULL;
+
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ dispc->sem.bo->bo.offset, 0x1000,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &obj);
+ if (ret)
+ break;
+
+ ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ }
+
+ if (ret)
+ psw->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nv50_software_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv50_software_chan *pch = chan->engctx[engine];
+ chan->engctx[engine] = NULL;
+ kfree(pch);
+}
+
+static int
+nv50_software_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 0;
+ obj->class = class;
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
+static int
+nv50_software_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static int
+nv50_software_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nv50_software_destroy(struct drm_device *dev, int engine)
+{
+ struct nv50_software_priv *psw = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, SW);
+ kfree(psw);
+}
+
+int
+nv50_software_create(struct drm_device *dev)
+{
+ struct nv50_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
+ if (!psw)
+ return -ENOMEM;
+
+ psw->base.base.destroy = nv50_software_destroy;
+ psw->base.base.init = nv50_software_init;
+ psw->base.base.fini = nv50_software_fini;
+ psw->base.base.context_new = nv50_software_context_new;
+ psw->base.base.context_del = nv50_software_context_del;
+ psw->base.base.object_new = nv50_software_object_new;
+ nouveau_software_create(&psw->base);
+
+ NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x018c, mthd_dma_vblsem);
+ NVOBJ_MTHD (dev, 0x506e, 0x0400, mthd_vblsem_offset);
+ NVOBJ_MTHD (dev, 0x506e, 0x0404, mthd_vblsem_value);
+ NVOBJ_MTHD (dev, 0x506e, 0x0408, mthd_vblsem_release);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, mthd_flip);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 274640212475..a9514eaa74c1 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -242,9 +242,9 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
NV_ERROR(dev, "no space while disconnecting SOR\n");
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
OUT_RING (evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
nouveau_hdmi_mode_set(encoder, NULL);
@@ -430,7 +430,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
nv_encoder->crtc = NULL;
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
OUT_RING(evo, mode_ctl);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 44fbac9c7d93..179bb42a635c 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -147,7 +147,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
{
struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
int i;
pinstmem->flush(vm->dev);
@@ -158,7 +157,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
return;
}
- pfifo->tlb_flush(vm->dev);
for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
if (atomic_read(&vm->engref[i]))
dev_priv->eng[i]->tlb_flush(vm->dev, i);
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
new file mode 100644
index 000000000000..c2f889b0d340
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fifo.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+
+struct nv84_fence_chan {
+ struct nouveau_fence_chan base;
+};
+
+struct nv84_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_gpuobj *mem;
+};
+
+static int
+nv84_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ int ret = RING_SPACE(chan, 7);
+ if (ret == 0) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, NvSema);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(chan->id * 16));
+ OUT_RING (chan, lower_32_bits(chan->id * 16));
+ OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
+
+static int
+nv84_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ int ret = RING_SPACE(chan, 7);
+ if (ret == 0) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, NvSema);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(prev->id * 16));
+ OUT_RING (chan, lower_32_bits(prev->id * 16));
+ OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
+static u32
+nv84_fence_read(struct nouveau_channel *chan)
+{
+ struct nv84_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
+ return nv_ro32(priv->mem, chan->id * 16);
+}
+
+static void
+nv84_fence_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fence_chan *fctx = chan->engctx[engine];
+ nouveau_fence_context_del(&fctx->base);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv84_fence_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fence_priv *priv = nv_engine(chan->dev, engine);
+ struct nv84_fence_chan *fctx;
+ struct nouveau_gpuobj *obj;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ nouveau_fence_context_new(&fctx->base);
+
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
+ priv->mem->vinst, priv->mem->size,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &obj);
+ if (ret == 0) {
+ ret = nouveau_ramht_insert(chan, NvSema, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ nv_wo32(priv->mem, chan->id * 16, 0x00000000);
+ }
+
+ if (ret)
+ nv84_fence_context_del(chan, engine);
+ return ret;
+}
+
+static int
+nv84_fence_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static int
+nv84_fence_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static void
+nv84_fence_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fence_priv *priv = nv_engine(dev, engine);
+
+ nouveau_gpuobj_ref(NULL, &priv->mem);
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv84_fence_create(struct drm_device *dev)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fence_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.engine.destroy = nv84_fence_destroy;
+ priv->base.engine.init = nv84_fence_init;
+ priv->base.engine.fini = nv84_fence_fini;
+ priv->base.engine.context_new = nv84_fence_context_new;
+ priv->base.engine.context_del = nv84_fence_context_del;
+ priv->base.emit = nv84_fence_emit;
+ priv->base.sync = nv84_fence_sync;
+ priv->base.read = nv84_fence_read;
+ dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 16 * pfifo->channels,
+ 0x1000, 0, &priv->mem);
+ if (ret)
+ goto out;
+
+out:
+ if (ret)
+ nv84_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c
new file mode 100644
index 000000000000..cc82d799fc3b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_fifo.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2012 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
+#include "nouveau_ramht.h"
+#include "nouveau_vm.h"
+
+struct nv84_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nv84_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+ struct nouveau_gpuobj *cache;
+};
+
+static int
+nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine);
+ struct nv84_fifo_chan *fctx;
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
+ u64 instance;
+ unsigned long flags;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ atomic_inc(&chan->vm->engref[engine]);
+
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ instance = fctx->ramfc->vinst >> 8;
+
+ ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
+ nv_wo32(fctx->ramfc, 0x40, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
+ nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4);
+ nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
+ nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
+ drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(fctx->ramfc, 0x60, 0x7fffffff);
+ nv_wo32(fctx->ramfc, 0x78, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
+ nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->cinst >> 4));
+ nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10);
+ nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12);
+
+ nv_wo32(chan->ramin, 0x00, chan->id);
+ nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8);
+
+ dev_priv->engine.instmem.flush(dev);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
+ nv50_fifo_playlist_update(dev);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fifo_chan *fctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ /* remove channel from playlist, will context switch if active */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
+ nv50_fifo_playlist_update(dev);
+
+ /* tell any engines on this channel to unload their contexts */
+ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
+ if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
+ NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
+
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* clean up */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+
+ nouveau_gpuobj_ref(NULL, &fctx->ramfc);
+ nouveau_gpuobj_ref(NULL, &fctx->cache);
+
+ atomic_dec(&chan->vm->engref[engine]);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv84_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fifo_chan *fctx;
+ u32 instance;
+ int i;
+
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x00250c, 0x6f3cfc34);
+ nv_wr32(dev, 0x002044, 0x01003fff);
+
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
+
+ for (i = 0; i < 128; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan && (fctx = chan->engctx[engine]))
+ instance = 0x80000000 | fctx->ramfc->vinst >> 8;
+ else
+ instance = 0x00000000;
+ nv_wr32(dev, 0x002600 + (i * 4), instance);
+ }
+
+ nv50_fifo_playlist_update(dev);
+
+ nv_wr32(dev, 0x003200, 1);
+ nv_wr32(dev, 0x003250, 1);
+ nv_wr32(dev, 0x002500, 1);
+ return 0;
+}
+
+static int
+nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ /* set playlist length to zero, fifo will unload context */
+ nv_wr32(dev, 0x0032ec, 0);
+
+ /* tell all connected engines to unload their contexts */
+ for (i = 0; i < priv->base.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan)
+ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
+ if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
+ NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, 0x002140, 0);
+ return 0;
+}
+
+int
+nv84_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv50_fifo_destroy;
+ priv->base.base.init = nv84_fifo_init;
+ priv->base.base.fini = nv84_fifo_fini;
+ priv->base.base.context_new = nv84_fifo_context_new;
+ priv->base.base.context_del = nv84_fifo_context_del;
+ priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
+ priv->base.channels = 127;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
+ if (ret)
+ goto error;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
index db94ff0a9fab..e25e13fb894e 100644
--- a/drivers/gpu/drm/nouveau/nv98_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.c
@@ -23,21 +23,93 @@
*/
#include "drmP.h"
+
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include "nouveau_vm.h"
#include "nouveau_ramht.h"
-struct nv98_crypt_engine {
+#include "nv98_crypt.fuc.h"
+
+struct nv98_crypt_priv {
struct nouveau_exec_engine base;
};
+struct nv98_crypt_chan {
+ struct nouveau_gpuobj *mem;
+};
+
static int
-nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv98_crypt_priv *priv = nv_engine(dev, engine);
+ struct nv98_crypt_chan *cctx;
+ int ret;
+
+ cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
+ if (!cctx)
+ return -ENOMEM;
+
+ atomic_inc(&chan->vm->engref[engine]);
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
+ if (ret)
+ goto error;
+
+ nv_wo32(chan->ramin, 0xa0, 0x00190000);
+ nv_wo32(chan->ramin, 0xa4, cctx->mem->vinst + cctx->mem->size - 1);
+ nv_wo32(chan->ramin, 0xa8, cctx->mem->vinst);
+ nv_wo32(chan->ramin, 0xac, 0x00000000);
+ nv_wo32(chan->ramin, 0xb0, 0x00000000);
+ nv_wo32(chan->ramin, 0xb4, 0x00000000);
+ dev_priv->engine.instmem.flush(dev);
+
+error:
+ if (ret)
+ priv->base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv98_crypt_chan *cctx = chan->engctx[engine];
+ int i;
+
+ for (i = 0xa0; i < 0xb4; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+
+ nouveau_gpuobj_ref(NULL, &cctx->mem);
+
+ atomic_dec(&chan->vm->engref[engine]);
+ chan->engctx[engine] = NULL;
+ kfree(cctx);
+}
+
+static int
+nv98_crypt_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
{
- if (!(nv_rd32(dev, 0x000200) & 0x00004000))
- return 0;
+ struct nv98_crypt_chan *cctx = chan->engctx[engine];
+
+ /* fuc engine doesn't need an object, our ramht code does.. */
+ cctx->mem->engine = 5;
+ cctx->mem->class = class;
+ return nouveau_ramht_insert(chan, handle, cctx->mem);
+}
+static void
+nv98_crypt_tlb_flush(struct drm_device *dev, int engine)
+{
+ nv50_vm_flush_engine(dev, 0x0a);
+}
+
+static int
+nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+{
nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
return 0;
}
@@ -45,34 +117,100 @@ nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
static int
nv98_crypt_init(struct drm_device *dev, int engine)
{
+ int i;
+
+ /* reset! */
nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+
+ /* wait for exit interrupt to signal */
+ nv_wait(dev, 0x087008, 0x00000010, 0x00000010);
+ nv_wr32(dev, 0x087004, 0x00000010);
+
+ /* upload microcode code and data segments */
+ nv_wr32(dev, 0x087ff8, 0x00100000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
+ nv_wr32(dev, 0x087ff4, nv98_pcrypt_code[i]);
+
+ nv_wr32(dev, 0x087ff8, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
+ nv_wr32(dev, 0x087ff4, nv98_pcrypt_data[i]);
+
+ /* start it running */
+ nv_wr32(dev, 0x08710c, 0x00000000);
+ nv_wr32(dev, 0x087104, 0x00000000); /* ENTRY */
+ nv_wr32(dev, 0x087100, 0x00000002); /* TRIGGER */
return 0;
}
+static struct nouveau_enum nv98_crypt_isr_error_name[] = {
+ { 0x0000, "ILLEGAL_MTHD" },
+ { 0x0001, "INVALID_BITFIELD" },
+ { 0x0002, "INVALID_ENUM" },
+ { 0x0003, "QUERY" },
+ {}
+};
+
+static void
+nv98_crypt_isr(struct drm_device *dev)
+{
+ u32 disp = nv_rd32(dev, 0x08701c);
+ u32 stat = nv_rd32(dev, 0x087008) & disp & ~(disp >> 16);
+ u32 inst = nv_rd32(dev, 0x087050) & 0x3fffffff;
+ u32 ssta = nv_rd32(dev, 0x087040) & 0x0000ffff;
+ u32 addr = nv_rd32(dev, 0x087040) >> 16;
+ u32 mthd = (addr & 0x07ff) << 2;
+ u32 subc = (addr & 0x3800) >> 11;
+ u32 data = nv_rd32(dev, 0x087044);
+ int chid = nv50_graph_isr_chid(dev, inst);
+
+ if (stat & 0x00000040) {
+ NV_INFO(dev, "PCRYPT: DISPATCH_ERROR [");
+ nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
+ printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, mthd, data);
+ nv_wr32(dev, 0x087004, 0x00000040);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PCRYPT: unhandled intr 0x%08x\n", stat);
+ nv_wr32(dev, 0x087004, stat);
+ }
+
+ nv50_fb_vm_trap(dev, 1);
+}
+
static void
nv98_crypt_destroy(struct drm_device *dev, int engine)
{
- struct nv98_crypt_engine *pcrypt = nv_engine(dev, engine);
+ struct nv98_crypt_priv *priv = nv_engine(dev, engine);
+ nouveau_irq_unregister(dev, 14);
NVOBJ_ENGINE_DEL(dev, CRYPT);
-
- kfree(pcrypt);
+ kfree(priv);
}
int
nv98_crypt_create(struct drm_device *dev)
{
- struct nv98_crypt_engine *pcrypt;
+ struct nv98_crypt_priv *priv;
- pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
- if (!pcrypt)
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- pcrypt->base.destroy = nv98_crypt_destroy;
- pcrypt->base.init = nv98_crypt_init;
- pcrypt->base.fini = nv98_crypt_fini;
+ priv->base.destroy = nv98_crypt_destroy;
+ priv->base.init = nv98_crypt_init;
+ priv->base.fini = nv98_crypt_fini;
+ priv->base.context_new = nv98_crypt_context_new;
+ priv->base.context_del = nv98_crypt_context_del;
+ priv->base.object_new = nv98_crypt_object_new;
+ priv->base.tlb_flush = nv98_crypt_tlb_flush;
+
+ nouveau_irq_register(dev, 14, nv98_crypt_isr);
- NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
+ NVOBJ_ENGINE_ADD(dev, CRYPT, &priv->base);
+ NVOBJ_CLASS(dev, 0x88b4, CRYPT);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc b/drivers/gpu/drm/nouveau/nv98_crypt.fuc
new file mode 100644
index 000000000000..7393813044de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.fuc
@@ -0,0 +1,698 @@
+/*
+ * fuc microcode for nv98 pcrypt engine
+ * Copyright (C) 2010 Marcin Kościelnicki
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+.section #nv98_pcrypt_data
+
+ctx_dma:
+ctx_dma_query: .b32 0
+ctx_dma_src: .b32 0
+ctx_dma_dst: .b32 0
+.equ #dma_count 3
+ctx_query_address_high: .b32 0
+ctx_query_address_low: .b32 0
+ctx_query_counter: .b32 0
+ctx_cond_address_high: .b32 0
+ctx_cond_address_low: .b32 0
+ctx_cond_off: .b32 0
+ctx_src_address_high: .b32 0
+ctx_src_address_low: .b32 0
+ctx_dst_address_high: .b32 0
+ctx_dst_address_low: .b32 0
+ctx_mode: .b32 0
+.align 16
+ctx_key: .skip 16
+ctx_iv: .skip 16
+
+.align 0x80
+swap:
+.skip 32
+
+.align 8
+common_cmd_dtable:
+.b32 #ctx_query_address_high + 0x20000 ~0xff
+.b32 #ctx_query_address_low + 0x20000 ~0xfffffff0
+.b32 #ctx_query_counter + 0x20000 ~0xffffffff
+.b32 #cmd_query_get + 0x00000 ~1
+.b32 #ctx_cond_address_high + 0x20000 ~0xff
+.b32 #ctx_cond_address_low + 0x20000 ~0xfffffff0
+.b32 #cmd_cond_mode + 0x00000 ~7
+.b32 #cmd_wrcache_flush + 0x00000 ~0
+.equ #common_cmd_max 0x88
+
+
+.align 8
+engine_cmd_dtable:
+.b32 #ctx_key + 0x0 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0x4 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0x8 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0xc + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x0 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x4 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x8 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0xc + 0x20000 ~0xffffffff
+.b32 #ctx_src_address_high + 0x20000 ~0xff
+.b32 #ctx_src_address_low + 0x20000 ~0xfffffff0
+.b32 #ctx_dst_address_high + 0x20000 ~0xff
+.b32 #ctx_dst_address_low + 0x20000 ~0xfffffff0
+.b32 #crypt_cmd_mode + 0x00000 ~0xf
+.b32 #crypt_cmd_length + 0x10000 ~0x0ffffff0
+.equ #engine_cmd_max 0xce
+
+.align 4
+crypt_dtable:
+.b16 #crypt_copy_prep #crypt_do_inout
+.b16 #crypt_store_prep #crypt_do_out
+.b16 #crypt_ecb_e_prep #crypt_do_inout
+.b16 #crypt_ecb_d_prep #crypt_do_inout
+.b16 #crypt_cbc_e_prep #crypt_do_inout
+.b16 #crypt_cbc_d_prep #crypt_do_inout
+.b16 #crypt_pcbc_e_prep #crypt_do_inout
+.b16 #crypt_pcbc_d_prep #crypt_do_inout
+.b16 #crypt_cfb_e_prep #crypt_do_inout
+.b16 #crypt_cfb_d_prep #crypt_do_inout
+.b16 #crypt_ofb_prep #crypt_do_inout
+.b16 #crypt_ctr_prep #crypt_do_inout
+.b16 #crypt_cbc_mac_prep #crypt_do_in
+.b16 #crypt_cmac_finish_complete_prep #crypt_do_in
+.b16 #crypt_cmac_finish_partial_prep #crypt_do_in
+
+.align 0x100
+
+.section #nv98_pcrypt_code
+
+ // $r0 is always set to 0 in our code - this allows some space savings.
+ clear b32 $r0
+
+ // set up the interrupt handler
+ mov $r1 #ih
+ mov $iv0 $r1
+
+ // init stack pointer
+ mov $sp $r0
+
+ // set interrupt dispatch - route timer, fifo, ctxswitch to i0, others to host
+ movw $r1 0xfff0
+ sethi $r1 0
+ mov $r2 0x400
+ iowr I[$r2 + 0x300] $r1
+
+ // enable the interrupts
+ or $r1 0xc
+ iowr I[$r2] $r1
+
+ // enable fifo access and context switching
+ mov $r1 3
+ mov $r2 0x1200
+ iowr I[$r2] $r1
+
+ // enable i0 delivery
+ bset $flags ie0
+
+ // sleep forver, waking only for interrupts.
+ bset $flags $p0
+ spin:
+ sleep $p0
+ bra #spin
+
+// i0 handler
+ih:
+ // see which interrupts we got
+ iord $r1 I[$r0 + 0x200]
+
+ and $r2 $r1 0x8
+ cmpu b32 $r2 0
+ bra e #noctx
+
+ // context switch... prepare the regs for xfer
+ mov $r2 0x7700
+ mov $xtargets $r2
+ mov $xdbase $r0
+ // 128-byte context.
+ mov $r2 0
+ sethi $r2 0x50000
+
+ // read current channel
+ mov $r3 0x1400
+ iord $r4 I[$r3]
+ // if bit 30 set, it's active, so we have to unload it first.
+ shl b32 $r5 $r4 1
+ cmps b32 $r5 0
+ bra nc #ctxload
+
+ // unload the current channel - save the context
+ xdst $r0 $r2
+ xdwait
+ // and clear bit 30, then write back
+ bclr $r4 0x1e
+ iowr I[$r3] $r4
+ // tell PFIFO we unloaded
+ mov $r4 1
+ iowr I[$r3 + 0x200] $r4
+
+ bra #noctx
+
+ ctxload:
+ // no channel loaded - perhaps we're requested to load one
+ iord $r4 I[$r3 + 0x100]
+ shl b32 $r15 $r4 1
+ cmps b32 $r15 0
+ // if bit 30 of next channel not set, probably PFIFO is just
+ // killing a context. do a faux load, without the active bit.
+ bra nc #dummyload
+
+ // ok, do a real context load.
+ xdld $r0 $r2
+ xdwait
+ mov $r5 #ctx_dma
+ mov $r6 #dma_count - 1
+ ctxload_dma_loop:
+ ld b32 $r7 D[$r5 + $r6 * 4]
+ add b32 $r8 $r6 0x180
+ shl b32 $r8 8
+ iowr I[$r8] $r7
+ sub b32 $r6 1
+ bra nc #ctxload_dma_loop
+
+ dummyload:
+ // tell PFIFO we're done
+ mov $r5 2
+ iowr I[$r3 + 0x200] $r5
+
+ noctx:
+ and $r2 $r1 0x4
+ cmpu b32 $r2 0
+ bra e #nocmd
+
+ // incoming fifo command.
+ mov $r3 0x1900
+ iord $r2 I[$r3 + 0x100]
+ iord $r3 I[$r3]
+ // extract the method
+ and $r4 $r2 0x7ff
+ // shift the addr to proper position if we need to interrupt later
+ shl b32 $r2 0x10
+
+ // mthd 0 and 0x100 [NAME, NOP]: ignore
+ and $r5 $r4 0x7bf
+ cmpu b32 $r5 0
+ bra e #cmddone
+
+ mov $r5 #engine_cmd_dtable - 0xc0 * 8
+ mov $r6 #engine_cmd_max
+ cmpu b32 $r4 0xc0
+ bra nc #dtable_cmd
+ mov $r5 #common_cmd_dtable - 0x80 * 8
+ mov $r6 #common_cmd_max
+ cmpu b32 $r4 0x80
+ bra nc #dtable_cmd
+ cmpu b32 $r4 0x60
+ bra nc #dma_cmd
+ cmpu b32 $r4 0x50
+ bra ne #illegal_mthd
+
+ // mthd 0x140: PM_TRIGGER
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x20000
+ iowr I[$r2] $r3
+ bra #cmddone
+
+ dma_cmd:
+ // mthd 0x180...: DMA_*
+ cmpu b32 $r4 0x60+#dma_count
+ bra nc #illegal_mthd
+ shl b32 $r5 $r4 2
+ add b32 $r5 (#ctx_dma - 0x60 * 4) & 0xffff
+ bset $r3 0x1e
+ st b32 D[$r5] $r3
+ add b32 $r4 0x180 - 0x60
+ shl b32 $r4 8
+ iowr I[$r4] $r3
+ bra #cmddone
+
+ dtable_cmd:
+ cmpu b32 $r4 $r6
+ bra nc #illegal_mthd
+ shl b32 $r4 3
+ add b32 $r4 $r5
+ ld b32 $r5 D[$r4 + 4]
+ and $r5 $r3
+ cmpu b32 $r5 0
+ bra ne #invalid_bitfield
+ ld b16 $r5 D[$r4]
+ ld b16 $r6 D[$r4 + 2]
+ cmpu b32 $r6 2
+ bra e #cmd_setctx
+ ld b32 $r7 D[$r0 + #ctx_cond_off]
+ and $r6 $r7
+ cmpu b32 $r6 1
+ bra e #cmddone
+ call $r5
+ bra $p1 #dispatch_error
+ bra #cmddone
+
+ cmd_setctx:
+ st b32 D[$r5] $r3
+ bra #cmddone
+
+
+ invalid_bitfield:
+ or $r2 1
+ dispatch_error:
+ illegal_mthd:
+ mov $r4 0x1000
+ iowr I[$r4] $r2
+ iowr I[$r4 + 0x100] $r3
+ mov $r4 0x40
+ iowr I[$r0] $r4
+
+ im_loop:
+ iord $r4 I[$r0 + 0x200]
+ and $r4 0x40
+ cmpu b32 $r4 0
+ bra ne #im_loop
+
+ cmddone:
+ // remove the command from FIFO
+ mov $r3 0x1d00
+ mov $r4 1
+ iowr I[$r3] $r4
+
+ nocmd:
+ // ack the processed interrupts
+ and $r1 $r1 0xc
+ iowr I[$r0 + 0x100] $r1
+iret
+
+cmd_query_get:
+ // if bit 0 of param set, trigger interrupt afterwards.
+ setp $p1 $r3
+ or $r2 3
+
+ // read PTIMER, beware of races...
+ mov $r4 0xb00
+ ptimer_retry:
+ iord $r6 I[$r4 + 0x100]
+ iord $r5 I[$r4]
+ iord $r7 I[$r4 + 0x100]
+ cmpu b32 $r6 $r7
+ bra ne #ptimer_retry
+
+ // prepare the query structure
+ ld b32 $r4 D[$r0 + #ctx_query_counter]
+ st b32 D[$r0 + #swap + 0x0] $r4
+ st b32 D[$r0 + #swap + 0x4] $r0
+ st b32 D[$r0 + #swap + 0x8] $r5
+ st b32 D[$r0 + #swap + 0xc] $r6
+
+ // will use target 0, DMA_QUERY.
+ mov $xtargets $r0
+
+ ld b32 $r4 D[$r0 + #ctx_query_address_high]
+ shl b32 $r4 0x18
+ mov $xdbase $r4
+
+ ld b32 $r4 D[$r0 + #ctx_query_address_low]
+ mov $r5 #swap
+ sethi $r5 0x20000
+ xdst $r4 $r5
+ xdwait
+
+ ret
+
+cmd_cond_mode:
+ // if >= 5, INVALID_ENUM
+ bset $flags $p1
+ or $r2 2
+ cmpu b32 $r3 5
+ bra nc #return
+
+ // otherwise, no error.
+ bclr $flags $p1
+
+ // if < 2, no QUERY object is involved
+ cmpu b32 $r3 2
+ bra nc #cmd_cond_mode_queryful
+
+ xor $r3 1
+ st b32 D[$r0 + #ctx_cond_off] $r3
+ return:
+ ret
+
+ cmd_cond_mode_queryful:
+ // ok, will need to pull a QUERY object, prepare offsets
+ ld b32 $r4 D[$r0 + #ctx_cond_address_high]
+ ld b32 $r5 D[$r0 + #ctx_cond_address_low]
+ and $r6 $r5 0xff
+ shr b32 $r5 8
+ shl b32 $r4 0x18
+ or $r4 $r5
+ mov $xdbase $r4
+ mov $xtargets $r0
+
+ // pull the first one
+ mov $r5 #swap
+ sethi $r5 0x20000
+ xdld $r6 $r5
+
+ // if == 2, only a single QUERY is involved...
+ cmpu b32 $r3 2
+ bra ne #cmd_cond_mode_double
+
+ xdwait
+ ld b32 $r4 D[$r0 + #swap + 4]
+ cmpu b32 $r4 0
+ xbit $r4 $flags z
+ st b32 D[$r0 + #ctx_cond_off] $r4
+ ret
+
+ // ok, we'll need to pull second one too
+ cmd_cond_mode_double:
+ add b32 $r6 0x10
+ add b32 $r5 0x10
+ xdld $r6 $r5
+ xdwait
+
+ // compare COUNTERs
+ ld b32 $r5 D[$r0 + #swap + 0x00]
+ ld b32 $r6 D[$r0 + #swap + 0x10]
+ cmpu b32 $r5 $r6
+ xbit $r4 $flags z
+
+ // compare RESen
+ ld b32 $r5 D[$r0 + #swap + 0x04]
+ ld b32 $r6 D[$r0 + #swap + 0x14]
+ cmpu b32 $r5 $r6
+ xbit $r5 $flags z
+ and $r4 $r5
+
+ // and negate or not, depending on mode
+ cmpu b32 $r3 3
+ xbit $r5 $flags z
+ xor $r4 $r5
+ st b32 D[$r0 + #ctx_cond_off] $r4
+ ret
+
+cmd_wrcache_flush:
+ bclr $flags $p1
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x10000
+ iowr I[$r2] $r3
+ ret
+
+crypt_cmd_mode:
+ // if >= 0xf, INVALID_ENUM
+ bset $flags $p1
+ or $r2 2
+ cmpu b32 $r3 0xf
+ bra nc #crypt_cmd_mode_return
+
+ bclr $flags $p1
+ st b32 D[$r0 + #ctx_mode] $r3
+
+ crypt_cmd_mode_return:
+ ret
+
+crypt_cmd_length:
+ // nop if length == 0
+ cmpu b32 $r3 0
+ bra e #crypt_cmd_mode_return
+
+ // init key, IV
+ cxset 3
+ mov $r4 #ctx_key
+ sethi $r4 0x70000
+ xdst $r0 $r4
+ mov $r4 #ctx_iv
+ sethi $r4 0x60000
+ xdst $r0 $r4
+ xdwait
+ ckeyreg $c7
+
+ // prepare the targets
+ mov $r4 0x2100
+ mov $xtargets $r4
+
+ // prepare src address
+ ld b32 $r4 D[$r0 + #ctx_src_address_high]
+ ld b32 $r5 D[$r0 + #ctx_src_address_low]
+ shr b32 $r8 $r5 8
+ shl b32 $r4 0x18
+ or $r4 $r8
+ and $r5 $r5 0xff
+
+ // prepare dst address
+ ld b32 $r6 D[$r0 + #ctx_dst_address_high]
+ ld b32 $r7 D[$r0 + #ctx_dst_address_low]
+ shr b32 $r8 $r7 8
+ shl b32 $r6 0x18
+ or $r6 $r8
+ and $r7 $r7 0xff
+
+ // find the proper prep & do functions
+ ld b32 $r8 D[$r0 + #ctx_mode]
+ shl b32 $r8 2
+
+ // run prep
+ ld b16 $r9 D[$r8 + #crypt_dtable]
+ call $r9
+
+ // do it
+ ld b16 $r9 D[$r8 + #crypt_dtable + 2]
+ call $r9
+ cxset 1
+ xdwait
+ cxset 0x61
+ xdwait
+ xdwait
+
+ // update src address
+ shr b32 $r8 $r4 0x18
+ shl b32 $r9 $r4 8
+ add b32 $r9 $r5
+ adc b32 $r8 0
+ st b32 D[$r0 + #ctx_src_address_high] $r8
+ st b32 D[$r0 + #ctx_src_address_low] $r9
+
+ // update dst address
+ shr b32 $r8 $r6 0x18
+ shl b32 $r9 $r6 8
+ add b32 $r9 $r7
+ adc b32 $r8 0
+ st b32 D[$r0 + #ctx_dst_address_high] $r8
+ st b32 D[$r0 + #ctx_dst_address_low] $r9
+
+ // pull updated IV
+ cxset 2
+ mov $r4 #ctx_iv
+ sethi $r4 0x60000
+ xdld $r0 $r4
+ xdwait
+
+ ret
+
+
+crypt_copy_prep:
+ cs0begin 2
+ cxsin $c0
+ cxsout $c0
+ ret
+
+crypt_store_prep:
+ cs0begin 1
+ cxsout $c6
+ ret
+
+crypt_ecb_e_prep:
+ cs0begin 3
+ cxsin $c0
+ cenc $c0 $c0
+ cxsout $c0
+ ret
+
+crypt_ecb_d_prep:
+ ckexp $c7 $c7
+ cs0begin 3
+ cxsin $c0
+ cdec $c0 $c0
+ cxsout $c0
+ ret
+
+crypt_cbc_e_prep:
+ cs0begin 4
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ cxsout $c6
+ ret
+
+crypt_cbc_d_prep:
+ ckexp $c7 $c7
+ cs0begin 5
+ cmov $c2 $c6
+ cxsin $c6
+ cdec $c0 $c6
+ cxor $c0 $c2
+ cxsout $c0
+ ret
+
+crypt_pcbc_e_prep:
+ cs0begin 5
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ cxsout $c6
+ cxor $c6 $c0
+ ret
+
+crypt_pcbc_d_prep:
+ ckexp $c7 $c7
+ cs0begin 5
+ cxsin $c0
+ cdec $c1 $c0
+ cxor $c6 $c1
+ cxsout $c6
+ cxor $c6 $c0
+ ret
+
+crypt_cfb_e_prep:
+ cs0begin 4
+ cenc $c6 $c6
+ cxsin $c0
+ cxor $c6 $c0
+ cxsout $c6
+ ret
+
+crypt_cfb_d_prep:
+ cs0begin 4
+ cenc $c0 $c6
+ cxsin $c6
+ cxor $c0 $c6
+ cxsout $c0
+ ret
+
+crypt_ofb_prep:
+ cs0begin 4
+ cenc $c6 $c6
+ cxsin $c0
+ cxor $c0 $c6
+ cxsout $c0
+ ret
+
+crypt_ctr_prep:
+ cs0begin 5
+ cenc $c1 $c6
+ cadd $c6 1
+ cxsin $c0
+ cxor $c0 $c1
+ cxsout $c0
+ ret
+
+crypt_cbc_mac_prep:
+ cs0begin 3
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+crypt_cmac_finish_complete_prep:
+ cs0begin 7
+ cxsin $c0
+ cxor $c6 $c0
+ cxor $c0 $c0
+ cenc $c0 $c0
+ cprecmac $c0 $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+crypt_cmac_finish_partial_prep:
+ cs0begin 8
+ cxsin $c0
+ cxor $c6 $c0
+ cxor $c0 $c0
+ cenc $c0 $c0
+ cprecmac $c0 $c0
+ cprecmac $c0 $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+// TODO
+crypt_do_in:
+ add b32 $r3 $r5
+ mov $xdbase $r4
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_in_loop:
+ xdld $r5 $r9
+ xdwait
+ cxset 0x22
+ xdst $r0 $r9
+ cs0exec 1
+ xdwait
+ add b32 $r5 0x10
+ cmpu b32 $r5 $r3
+ bra ne #crypt_do_in_loop
+ cxset 1
+ xdwait
+ ret
+
+crypt_do_out:
+ add b32 $r3 $r7
+ mov $xdbase $r6
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_out_loop:
+ cs0exec 1
+ cxset 0x61
+ xdld $r7 $r9
+ xdst $r7 $r9
+ cxset 1
+ xdwait
+ add b32 $r7 0x10
+ cmpu b32 $r7 $r3
+ bra ne #crypt_do_out_loop
+ ret
+
+crypt_do_inout:
+ add b32 $r3 $r5
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_inout_loop:
+ mov $xdbase $r4
+ xdld $r5 $r9
+ xdwait
+ cxset 0x21
+ xdst $r0 $r9
+ cs0exec 1
+ cxset 0x61
+ mov $xdbase $r6
+ xdld $r7 $r9
+ xdst $r7 $r9
+ cxset 1
+ xdwait
+ add b32 $r5 0x10
+ add b32 $r7 0x10
+ cmpu b32 $r5 $r3
+ bra ne #crypt_do_inout_loop
+ ret
+
+.align 0x100
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h b/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
new file mode 100644
index 000000000000..38676c74e6e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
@@ -0,0 +1,584 @@
+uint32_t nv98_pcrypt_data[] = {
+/* 0x0000: ctx_dma */
+/* 0x0000: ctx_dma_query */
+ 0x00000000,
+/* 0x0004: ctx_dma_src */
+ 0x00000000,
+/* 0x0008: ctx_dma_dst */
+ 0x00000000,
+/* 0x000c: ctx_query_address_high */
+ 0x00000000,
+/* 0x0010: ctx_query_address_low */
+ 0x00000000,
+/* 0x0014: ctx_query_counter */
+ 0x00000000,
+/* 0x0018: ctx_cond_address_high */
+ 0x00000000,
+/* 0x001c: ctx_cond_address_low */
+ 0x00000000,
+/* 0x0020: ctx_cond_off */
+ 0x00000000,
+/* 0x0024: ctx_src_address_high */
+ 0x00000000,
+/* 0x0028: ctx_src_address_low */
+ 0x00000000,
+/* 0x002c: ctx_dst_address_high */
+ 0x00000000,
+/* 0x0030: ctx_dst_address_low */
+ 0x00000000,
+/* 0x0034: ctx_mode */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0040: ctx_key */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0050: ctx_iv */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0080: swap */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x00a0: common_cmd_dtable */
+ 0x0002000c,
+ 0xffffff00,
+ 0x00020010,
+ 0x0000000f,
+ 0x00020014,
+ 0x00000000,
+ 0x00000192,
+ 0xfffffffe,
+ 0x00020018,
+ 0xffffff00,
+ 0x0002001c,
+ 0x0000000f,
+ 0x000001d7,
+ 0xfffffff8,
+ 0x00000260,
+ 0xffffffff,
+/* 0x00e0: engine_cmd_dtable */
+ 0x00020040,
+ 0x00000000,
+ 0x00020044,
+ 0x00000000,
+ 0x00020048,
+ 0x00000000,
+ 0x0002004c,
+ 0x00000000,
+ 0x00020050,
+ 0x00000000,
+ 0x00020054,
+ 0x00000000,
+ 0x00020058,
+ 0x00000000,
+ 0x0002005c,
+ 0x00000000,
+ 0x00020024,
+ 0xffffff00,
+ 0x00020028,
+ 0x0000000f,
+ 0x0002002c,
+ 0xffffff00,
+ 0x00020030,
+ 0x0000000f,
+ 0x00000271,
+ 0xfffffff0,
+ 0x00010285,
+ 0xf000000f,
+/* 0x0150: crypt_dtable */
+ 0x04db0321,
+ 0x04b1032f,
+ 0x04db0339,
+ 0x04db034b,
+ 0x04db0361,
+ 0x04db0377,
+ 0x04db0395,
+ 0x04db03af,
+ 0x04db03cd,
+ 0x04db03e3,
+ 0x04db03f9,
+ 0x04db040f,
+ 0x04830429,
+ 0x0483043b,
+ 0x0483045d,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nv98_pcrypt_code[] = {
+ 0x17f004bd,
+ 0x0010fe35,
+ 0xf10004fe,
+ 0xf0fff017,
+ 0x27f10013,
+ 0x21d00400,
+ 0x0c15f0c0,
+ 0xf00021d0,
+ 0x27f10317,
+ 0x21d01200,
+ 0x1031f400,
+/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
+/* 0x0035: ih */
+ 0x8001cffd,
+ 0xb00812c4,
+ 0x0bf40024,
+ 0x0027f167,
+ 0x002bfe77,
+ 0xf00007fe,
+ 0x23f00027,
+ 0x0037f105,
+ 0x0034cf14,
+ 0xb0014594,
+ 0x18f40055,
+ 0x0602fa17,
+ 0x4af003f8,
+ 0x0034d01e,
+ 0xd00147f0,
+ 0x0ef48034,
+/* 0x0075: ctxload */
+ 0x4034cf33,
+ 0xb0014f94,
+ 0x18f400f5,
+ 0x0502fa21,
+ 0x57f003f8,
+ 0x0267f000,
+/* 0x008c: ctxload_dma_loop */
+ 0xa07856bc,
+ 0xb6018068,
+ 0x87d00884,
+ 0x0162b600,
+/* 0x009f: dummyload */
+ 0xf0f018f4,
+ 0x35d00257,
+/* 0x00a5: noctx */
+ 0x0412c480,
+ 0xf50024b0,
+ 0xf100df0b,
+ 0xcf190037,
+ 0x33cf4032,
+ 0xff24e400,
+ 0x1024b607,
+ 0x07bf45e4,
+ 0xf50054b0,
+ 0xf100b90b,
+ 0xf1fae057,
+ 0xb000ce67,
+ 0x18f4c044,
+ 0xa057f14d,
+ 0x8867f1fc,
+ 0x8044b000,
+ 0xb03f18f4,
+ 0x18f46044,
+ 0x5044b019,
+ 0xf1741bf4,
+ 0xbd220027,
+ 0x0233f034,
+ 0xf50023d0,
+/* 0x0103: dma_cmd */
+ 0xb000810e,
+ 0x18f46344,
+ 0x0245945e,
+ 0xfe8050b7,
+ 0x801e39f0,
+ 0x40b70053,
+ 0x44b60120,
+ 0x0043d008,
+/* 0x0123: dtable_cmd */
+ 0xb8600ef4,
+ 0x18f40446,
+ 0x0344b63e,
+ 0x980045bb,
+ 0x53fd0145,
+ 0x0054b004,
+ 0x58291bf4,
+ 0x46580045,
+ 0x0264b001,
+ 0x98170bf4,
+ 0x67fd0807,
+ 0x0164b004,
+ 0xf9300bf4,
+ 0x0f01f455,
+/* 0x015b: cmd_setctx */
+ 0x80280ef4,
+ 0x0ef40053,
+/* 0x0161: invalid_bitfield */
+ 0x0125f022,
+/* 0x0164: dispatch_error */
+/* 0x0164: illegal_mthd */
+ 0x100047f1,
+ 0xd00042d0,
+ 0x47f04043,
+ 0x0004d040,
+/* 0x0174: im_loop */
+ 0xf08004cf,
+ 0x44b04044,
+ 0xf71bf400,
+/* 0x0180: cmddone */
+ 0x1d0037f1,
+ 0xd00147f0,
+/* 0x018a: nocmd */
+ 0x11c40034,
+ 0x4001d00c,
+/* 0x0192: cmd_query_get */
+ 0x38f201f8,
+ 0x0325f001,
+ 0x0b0047f1,
+/* 0x019c: ptimer_retry */
+ 0xcf4046cf,
+ 0x47cf0045,
+ 0x0467b840,
+ 0x98f41bf4,
+ 0x04800504,
+ 0x21008020,
+ 0x80220580,
+ 0x0bfe2306,
+ 0x03049800,
+ 0xfe1844b6,
+ 0x04980047,
+ 0x8057f104,
+ 0x0253f000,
+ 0xf80645fa,
+/* 0x01d7: cmd_cond_mode */
+ 0xf400f803,
+ 0x25f00131,
+ 0x0534b002,
+ 0xf41218f4,
+ 0x34b00132,
+ 0x0b18f402,
+ 0x800136f0,
+/* 0x01f2: return */
+ 0x00f80803,
+/* 0x01f4: cmd_cond_mode_queryful */
+ 0x98060498,
+ 0x56c40705,
+ 0x0855b6ff,
+ 0xfd1844b6,
+ 0x47fe0545,
+ 0x000bfe00,
+ 0x008057f1,
+ 0xfa0253f0,
+ 0x34b00565,
+ 0x131bf402,
+ 0x049803f8,
+ 0x0044b021,
+ 0x800b4cf0,
+ 0x00f80804,
+/* 0x022c: cmd_cond_mode_double */
+ 0xb61060b6,
+ 0x65fa1050,
+ 0x9803f805,
+ 0x06982005,
+ 0x0456b824,
+ 0x980b4cf0,
+ 0x06982105,
+ 0x0456b825,
+ 0xfd0b5cf0,
+ 0x34b00445,
+ 0x0b5cf003,
+ 0x800645fd,
+ 0x00f80804,
+/* 0x0260: cmd_wrcache_flush */
+ 0xf10132f4,
+ 0xbd220027,
+ 0x0133f034,
+ 0xf80023d0,
+/* 0x0271: crypt_cmd_mode */
+ 0x0131f400,
+ 0xb00225f0,
+ 0x18f40f34,
+ 0x0132f409,
+/* 0x0283: crypt_cmd_mode_return */
+ 0xf80d0380,
+/* 0x0285: crypt_cmd_length */
+ 0x0034b000,
+ 0xf4fb0bf4,
+ 0x47f0033c,
+ 0x0743f040,
+ 0xf00604fa,
+ 0x43f05047,
+ 0x0604fa06,
+ 0x3cf503f8,
+ 0x47f1c407,
+ 0x4bfe2100,
+ 0x09049800,
+ 0x950a0598,
+ 0x44b60858,
+ 0x0548fd18,
+ 0x98ff55c4,
+ 0x07980b06,
+ 0x0878950c,
+ 0xfd1864b6,
+ 0x77c40568,
+ 0x0d0898ff,
+ 0x580284b6,
+ 0x95f9a889,
+ 0xf9a98958,
+ 0x013cf495,
+ 0x3cf403f8,
+ 0xf803f861,
+ 0x18489503,
+ 0xbb084994,
+ 0x81b60095,
+ 0x09088000,
+ 0x950a0980,
+ 0x69941868,
+ 0x0097bb08,
+ 0x800081b6,
+ 0x09800b08,
+ 0x023cf40c,
+ 0xf05047f0,
+ 0x04fa0643,
+ 0xf803f805,
+/* 0x0321: crypt_copy_prep */
+ 0x203cf500,
+ 0x003cf594,
+ 0x003cf588,
+/* 0x032f: crypt_store_prep */
+ 0xf500f88c,
+ 0xf594103c,
+ 0xf88c063c,
+/* 0x0339: crypt_ecb_e_prep */
+ 0x303cf500,
+ 0x003cf594,
+ 0x003cf588,
+ 0x003cf5d0,
+/* 0x034b: crypt_ecb_d_prep */
+ 0xf500f88c,
+ 0xf5c8773c,
+ 0xf594303c,
+ 0xf588003c,
+ 0xf5d4003c,
+ 0xf88c003c,
+/* 0x0361: crypt_cbc_e_prep */
+ 0x403cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+ 0x063cf5d0,
+/* 0x0377: crypt_cbc_d_prep */
+ 0xf500f88c,
+ 0xf5c8773c,
+ 0xf594503c,
+ 0xf584623c,
+ 0xf588063c,
+ 0xf5d4603c,
+ 0xf5ac203c,
+ 0xf88c003c,
+/* 0x0395: crypt_pcbc_e_prep */
+ 0x503cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+ 0x063cf5d0,
+ 0x063cf58c,
+/* 0x03af: crypt_pcbc_d_prep */
+ 0xf500f8ac,
+ 0xf5c8773c,
+ 0xf594503c,
+ 0xf588003c,
+ 0xf5d4013c,
+ 0xf5ac163c,
+ 0xf58c063c,
+ 0xf8ac063c,
+/* 0x03cd: crypt_cfb_e_prep */
+ 0x403cf500,
+ 0x663cf594,
+ 0x003cf5d0,
+ 0x063cf588,
+ 0x063cf5ac,
+/* 0x03e3: crypt_cfb_d_prep */
+ 0xf500f88c,
+ 0xf594403c,
+ 0xf5d0603c,
+ 0xf588063c,
+ 0xf5ac603c,
+ 0xf88c003c,
+/* 0x03f9: crypt_ofb_prep */
+ 0x403cf500,
+ 0x663cf594,
+ 0x003cf5d0,
+ 0x603cf588,
+ 0x003cf5ac,
+/* 0x040f: crypt_ctr_prep */
+ 0xf500f88c,
+ 0xf594503c,
+ 0xf5d0613c,
+ 0xf5b0163c,
+ 0xf588003c,
+ 0xf5ac103c,
+ 0xf88c003c,
+/* 0x0429: crypt_cbc_mac_prep */
+ 0x303cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+/* 0x043b: crypt_cmac_finish_complete_prep */
+ 0xf500f8d0,
+ 0xf594703c,
+ 0xf588003c,
+ 0xf5ac063c,
+ 0xf5ac003c,
+ 0xf5d0003c,
+ 0xf5bc003c,
+ 0xf5ac063c,
+ 0xf8d0663c,
+/* 0x045d: crypt_cmac_finish_partial_prep */
+ 0x803cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x003cf5ac,
+ 0x003cf5ac,
+ 0x003cf5d0,
+ 0x003cf5bc,
+ 0x063cf5bc,
+ 0x663cf5ac,
+/* 0x0483: crypt_do_in */
+ 0xbb00f8d0,
+ 0x47fe0035,
+ 0x8097f100,
+ 0x0293f000,
+/* 0x0490: crypt_do_in_loop */
+ 0xf80559fa,
+ 0x223cf403,
+ 0xf50609fa,
+ 0xf898103c,
+ 0x1050b603,
+ 0xf40453b8,
+ 0x3cf4e91b,
+ 0xf803f801,
+/* 0x04b1: crypt_do_out */
+ 0x0037bb00,
+ 0xf10067fe,
+ 0xf0008097,
+/* 0x04be: crypt_do_out_loop */
+ 0x3cf50293,
+ 0x3cf49810,
+ 0x0579fa61,
+ 0xf40679fa,
+ 0x03f8013c,
+ 0xb81070b6,
+ 0x1bf40473,
+/* 0x04db: crypt_do_inout */
+ 0xbb00f8e8,
+ 0x97f10035,
+ 0x93f00080,
+/* 0x04e5: crypt_do_inout_loop */
+ 0x0047fe02,
+ 0xf80559fa,
+ 0x213cf403,
+ 0xf50609fa,
+ 0xf498103c,
+ 0x67fe613c,
+ 0x0579fa00,
+ 0xf40679fa,
+ 0x03f8013c,
+ 0xb61050b6,
+ 0x53b81070,
+ 0xd41bf404,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
index 8f356d58e409..0387dc7f4f42 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.c
+++ b/drivers/gpu/drm/nouveau/nva3_copy.c
@@ -79,29 +79,13 @@ static void
nva3_copy_context_del(struct nouveau_channel *chan, int engine)
{
struct nouveau_gpuobj *ctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- u32 inst;
-
- inst = (chan->ramin->vinst >> 12);
- inst |= 0x40000000;
-
- /* disable fifo access */
- nv_wr32(dev, 0x104048, 0x00000000);
- /* mark channel as unloaded if it's currently active */
- if (nv_rd32(dev, 0x104050) == inst)
- nv_mask(dev, 0x104050, 0x40000000, 0x00000000);
- /* mark next channel as invalid if it's about to be loaded */
- if (nv_rd32(dev, 0x104054) == inst)
- nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
- /* restore fifo access */
- nv_wr32(dev, 0x104048, 0x00000003);
+ int i;
- for (inst = 0xc0; inst <= 0xd4; inst += 4)
- nv_wo32(chan->ramin, inst, 0x00000000);
-
- nouveau_gpuobj_ref(NULL, &ctx);
+ for (i = 0xc0; i <= 0xd4; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
atomic_dec(&chan->vm->engref[engine]);
+ nouveau_gpuobj_ref(NULL, &ctx);
chan->engctx[engine] = ctx;
}
@@ -143,13 +127,6 @@ static int
nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
{
nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
-
- /* trigger fuc context unload */
- nv_wait(dev, 0x104008, 0x0000000c, 0x00000000);
- nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
- nv_wr32(dev, 0x104000, 0x00000008);
- nv_wait(dev, 0x104008, 0x00000008, 0x00000000);
-
nv_wr32(dev, 0x104014, 0xffffffff);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 9e636e6ef6d7..798829353fb6 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -98,7 +98,9 @@ read_pll(struct drm_device *dev, int clk, u32 pll)
sclk = read_clk(dev, 0x10 + clk, false);
}
- return sclk * N / (M * P);
+ if (M * P)
+ return sclk * N / (M * P);
+ return 0;
}
struct creg {
@@ -182,23 +184,26 @@ prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
const u32 src1 = 0x004160 + (clk * 4);
const u32 ctrl = pll + 0;
const u32 coef = pll + 4;
- u32 cntl;
if (!reg->clk && !reg->pll) {
NV_DEBUG(dev, "no clock for %02x\n", clk);
return;
}
- cntl = nv_rd32(dev, ctrl) & 0xfffffff2;
if (reg->pll) {
nv_mask(dev, src0, 0x00000101, 0x00000101);
nv_wr32(dev, coef, reg->pll);
- nv_wr32(dev, ctrl, cntl | 0x00000015);
+ nv_mask(dev, ctrl, 0x00000015, 0x00000015);
+ nv_mask(dev, ctrl, 0x00000010, 0x00000000);
+ nv_wait(dev, ctrl, 0x00020000, 0x00020000);
+ nv_mask(dev, ctrl, 0x00000010, 0x00000010);
+ nv_mask(dev, ctrl, 0x00000008, 0x00000000);
nv_mask(dev, src1, 0x00000100, 0x00000000);
nv_mask(dev, src1, 0x00000001, 0x00000000);
} else {
nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk);
- nv_wr32(dev, ctrl, cntl | 0x0000001d);
+ nv_mask(dev, ctrl, 0x00000018, 0x00000018);
+ udelay(20);
nv_mask(dev, ctrl, 0x00000001, 0x00000000);
nv_mask(dev, src0, 0x00000100, 0x00000000);
nv_mask(dev, src0, 0x00000001, 0x00000000);
@@ -230,17 +235,28 @@ nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
}
struct nva3_pm_state {
+ struct nouveau_pm_level *perflvl;
+
struct creg nclk;
struct creg sclk;
- struct creg mclk;
struct creg vdec;
struct creg unka0;
+
+ struct creg mclk;
+ u8 *rammap;
+ u8 rammap_ver;
+ u8 rammap_len;
+ u8 *ramcfg;
+ u8 ramcfg_len;
+ u32 r004018;
+ u32 r100760;
};
void *
nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
struct nva3_pm_state *info;
+ u8 ramcfg_cnt;
int ret;
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -267,6 +283,20 @@ nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
if (ret < 0)
goto out;
+ info->rammap = nouveau_perf_rammap(dev, perflvl->memory,
+ &info->rammap_ver,
+ &info->rammap_len,
+ &ramcfg_cnt, &info->ramcfg_len);
+ if (info->rammap_ver != 0x10 || info->rammap_len < 5)
+ info->rammap = NULL;
+
+ info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory,
+ &info->rammap_ver,
+ &info->ramcfg_len);
+ if (info->rammap_ver != 0x10)
+ info->ramcfg = NULL;
+
+ info->perflvl = perflvl;
out:
if (ret < 0) {
kfree(info);
@@ -287,6 +317,240 @@ nva3_pm_grcp_idle(void *data)
return false;
}
+static void
+mclk_precharge(struct nouveau_mem_exec_func *exec)
+{
+ nv_wr32(exec->dev, 0x1002d4, 0x00000001);
+}
+
+static void
+mclk_refresh(struct nouveau_mem_exec_func *exec)
+{
+ nv_wr32(exec->dev, 0x1002d0, 0x00000001);
+}
+
+static void
+mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
+{
+ nv_wr32(exec->dev, 0x100210, enable ? 0x80000000 : 0x00000000);
+}
+
+static void
+mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
+{
+ nv_wr32(exec->dev, 0x1002dc, enable ? 0x00000001 : 0x00000000);
+}
+
+static void
+mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
+{
+ volatile u32 post = nv_rd32(exec->dev, 0); (void)post;
+ udelay((nsec + 500) / 1000);
+}
+
+static u32
+mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
+{
+ if (mr <= 1)
+ return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4));
+ if (mr <= 3)
+ return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4));
+ return 0;
+}
+
+static void
+mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
+{
+ struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
+
+ if (mr <= 1) {
+ if (dev_priv->vram_rank_B)
+ nv_wr32(exec->dev, 0x1002c8 + ((mr - 0) * 4), data);
+ nv_wr32(exec->dev, 0x1002c0 + ((mr - 0) * 4), data);
+ } else
+ if (mr <= 3) {
+ if (dev_priv->vram_rank_B)
+ nv_wr32(exec->dev, 0x1002e8 + ((mr - 2) * 4), data);
+ nv_wr32(exec->dev, 0x1002e0 + ((mr - 2) * 4), data);
+ }
+}
+
+static void
+mclk_clock_set(struct nouveau_mem_exec_func *exec)
+{
+ struct drm_device *dev = exec->dev;
+ struct nva3_pm_state *info = exec->priv;
+ u32 ctrl;
+
+ ctrl = nv_rd32(dev, 0x004000);
+ if (!(ctrl & 0x00000008) && info->mclk.pll) {
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000008));
+ nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
+ nv_wr32(dev, 0x004018, 0x00001000);
+ nv_wr32(dev, 0x004000, (ctrl &= ~0x00000001));
+ nv_wr32(dev, 0x004004, info->mclk.pll);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000001));
+ udelay(64);
+ nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
+ udelay(20);
+ } else
+ if (!info->mclk.pll) {
+ nv_mask(dev, 0x004168, 0x003f3040, info->mclk.clk);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000008));
+ nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
+ nv_wr32(dev, 0x004018, 0x0000d000 | info->r004018);
+ }
+
+ if (info->rammap) {
+ if (info->ramcfg && (info->rammap[4] & 0x08)) {
+ u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) |
+ info->ramcfg[5];
+ u32 unk5a4 = ROM16(info->ramcfg[7]);
+ u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 |
+ (info->ramcfg[3] & 0x0f) << 16 |
+ (info->ramcfg[9] & 0x0f) |
+ 0x80000000;
+ nv_wr32(dev, 0x1005a0, unk5a0);
+ nv_wr32(dev, 0x1005a4, unk5a4);
+ nv_wr32(dev, 0x10f804, unk804);
+ nv_mask(dev, 0x10053c, 0x00001000, 0x00000000);
+ } else {
+ nv_mask(dev, 0x10053c, 0x00001000, 0x00001000);
+ nv_mask(dev, 0x10f804, 0x80000000, 0x00000000);
+ nv_mask(dev, 0x100760, 0x22222222, info->r100760);
+ nv_mask(dev, 0x1007a0, 0x22222222, info->r100760);
+ nv_mask(dev, 0x1007e0, 0x22222222, info->r100760);
+ }
+ }
+
+ if (info->mclk.pll) {
+ nv_mask(dev, 0x1110e0, 0x00088000, 0x00011000);
+ nv_wr32(dev, 0x004000, (ctrl &= ~0x00000008));
+ }
+}
+
+static void
+mclk_timing_set(struct nouveau_mem_exec_func *exec)
+{
+ struct drm_device *dev = exec->dev;
+ struct nva3_pm_state *info = exec->priv;
+ struct nouveau_pm_level *perflvl = info->perflvl;
+ int i;
+
+ for (i = 0; i < 9; i++)
+ nv_wr32(dev, 0x100220 + (i * 4), perflvl->timing.reg[i]);
+
+ if (info->ramcfg) {
+ u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
+ nv_mask(dev, 0x100200, 0x00001000, data);
+ }
+
+ if (info->ramcfg) {
+ u32 unk714 = nv_rd32(dev, 0x100714) & ~0xf0000010;
+ u32 unk718 = nv_rd32(dev, 0x100718) & ~0x00000100;
+ u32 unk71c = nv_rd32(dev, 0x10071c) & ~0x00000100;
+ if ( (info->ramcfg[2] & 0x20))
+ unk714 |= 0xf0000000;
+ if (!(info->ramcfg[2] & 0x04))
+ unk714 |= 0x00000010;
+ nv_wr32(dev, 0x100714, unk714);
+
+ if (info->ramcfg[2] & 0x01)
+ unk71c |= 0x00000100;
+ nv_wr32(dev, 0x10071c, unk71c);
+
+ if (info->ramcfg[2] & 0x02)
+ unk718 |= 0x00000100;
+ nv_wr32(dev, 0x100718, unk718);
+
+ if (info->ramcfg[2] & 0x10)
+ nv_wr32(dev, 0x111100, 0x48000000); /*XXX*/
+ }
+}
+
+static void
+prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
+{
+ struct nouveau_mem_exec_func exec = {
+ .dev = dev,
+ .precharge = mclk_precharge,
+ .refresh = mclk_refresh,
+ .refresh_auto = mclk_refresh_auto,
+ .refresh_self = mclk_refresh_self,
+ .wait = mclk_wait,
+ .mrg = mclk_mrg,
+ .mrs = mclk_mrs,
+ .clock_set = mclk_clock_set,
+ .timing_set = mclk_timing_set,
+ .priv = info
+ };
+ u32 ctrl;
+
+ /* XXX: where the fuck does 750MHz come from? */
+ if (info->perflvl->memory <= 750000) {
+ info->r004018 = 0x10000000;
+ info->r100760 = 0x22222222;
+ }
+
+ ctrl = nv_rd32(dev, 0x004000);
+ if (ctrl & 0x00000008) {
+ if (info->mclk.pll) {
+ nv_mask(dev, 0x004128, 0x00000101, 0x00000101);
+ nv_wr32(dev, 0x004004, info->mclk.pll);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000001));
+ nv_wr32(dev, 0x004000, (ctrl &= 0xffffffef));
+ nv_wait(dev, 0x004000, 0x00020000, 0x00020000);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000010));
+ nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000004));
+ }
+ } else {
+ u32 ssel = 0x00000101;
+ if (info->mclk.clk)
+ ssel |= info->mclk.clk;
+ else
+ ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
+ nv_mask(dev, 0x004168, 0x003f3141, ctrl);
+ }
+
+ if (info->ramcfg) {
+ if (info->ramcfg[2] & 0x10) {
+ nv_mask(dev, 0x111104, 0x00000600, 0x00000000);
+ } else {
+ nv_mask(dev, 0x111100, 0x40000000, 0x40000000);
+ nv_mask(dev, 0x111104, 0x00000180, 0x00000000);
+ }
+ }
+ if (info->rammap && !(info->rammap[4] & 0x02))
+ nv_mask(dev, 0x100200, 0x00000800, 0x00000000);
+ nv_wr32(dev, 0x611200, 0x00003300);
+ if (!(info->ramcfg[2] & 0x10))
+ nv_wr32(dev, 0x111100, 0x4c020000); /*XXX*/
+
+ nouveau_mem_exec(&exec, info->perflvl);
+
+ nv_wr32(dev, 0x611200, 0x00003330);
+ if (info->rammap && (info->rammap[4] & 0x02))
+ nv_mask(dev, 0x100200, 0x00000800, 0x00000800);
+ if (info->ramcfg) {
+ if (info->ramcfg[2] & 0x10) {
+ nv_mask(dev, 0x111104, 0x00000180, 0x00000180);
+ nv_mask(dev, 0x111100, 0x40000000, 0x00000000);
+ } else {
+ nv_mask(dev, 0x111104, 0x00000600, 0x00000600);
+ }
+ }
+
+ if (info->mclk.pll) {
+ nv_mask(dev, 0x004168, 0x00000001, 0x00000000);
+ nv_mask(dev, 0x004168, 0x00000100, 0x00000000);
+ } else {
+ nv_mask(dev, 0x004000, 0x00000001, 0x00000000);
+ nv_mask(dev, 0x004128, 0x00000001, 0x00000000);
+ nv_mask(dev, 0x004128, 0x00000100, 0x00000000);
+ }
+}
+
int
nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
{
@@ -316,18 +580,8 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
prog_clk(dev, 0x20, &info->unka0);
prog_clk(dev, 0x21, &info->vdec);
- if (info->mclk.clk || info->mclk.pll) {
- nv_wr32(dev, 0x100210, 0);
- nv_wr32(dev, 0x1002dc, 1);
- nv_wr32(dev, 0x004018, 0x00001000);
- prog_pll(dev, 0x02, 0x004000, &info->mclk);
- if (nv_rd32(dev, 0x4000) & 0x00000008)
- nv_wr32(dev, 0x004018, 0x1000d000);
- else
- nv_wr32(dev, 0x004018, 0x10005000);
- nv_wr32(dev, 0x1002dc, 0);
- nv_wr32(dev, 0x100210, 0x80000000);
- }
+ if (info->mclk.clk || info->mclk.pll)
+ prog_mem(dev, info);
ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index a495e48197ca..797159e7b7a6 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -43,22 +43,22 @@ nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
return ret;
if (rect->rop != ROP_COPY) {
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
OUT_RING (chan, 1);
}
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0588, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0588, 1);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR)
OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
else
OUT_RING (chan, rect->color);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0600, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x0600, 4);
OUT_RING (chan, rect->dx);
OUT_RING (chan, rect->dy);
OUT_RING (chan, rect->dx + rect->width);
OUT_RING (chan, rect->dy + rect->height);
if (rect->rop != ROP_COPY) {
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
OUT_RING (chan, 3);
}
FIRE_RING(chan);
@@ -78,14 +78,14 @@ nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
if (ret)
return ret;
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0110, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0110, 1);
OUT_RING (chan, 0);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x08b0, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x08b0, 4);
OUT_RING (chan, region->dx);
OUT_RING (chan, region->dy);
OUT_RING (chan, region->width);
OUT_RING (chan, region->height);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x08d0, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x08d0, 4);
OUT_RING (chan, 0);
OUT_RING (chan, region->sx);
OUT_RING (chan, 0);
@@ -116,7 +116,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0814, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
OUT_RING (chan, palette[image->bg_color] | mask);
@@ -125,10 +125,10 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING (chan, image->bg_color);
OUT_RING (chan, image->fg_color);
}
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0838, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x0838, 2);
OUT_RING (chan, image->width);
OUT_RING (chan, image->height);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0850, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x0850, 4);
OUT_RING (chan, 0);
OUT_RING (chan, image->dx);
OUT_RING (chan, 0);
@@ -143,7 +143,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
dwords -= push;
- BEGIN_NVC0(chan, 6, NvSub2D, 0x0860, push);
+ BEGIN_NIC0(chan, NvSub2D, 0x0860, push);
OUT_RINGp(chan, data, push);
data += push;
}
@@ -200,47 +200,47 @@ nvc0_fbcon_accel_init(struct fb_info *info)
return ret;
}
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
OUT_RING (chan, 0x0000902d);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x0104, 2);
OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
OUT_RING (chan, 0);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
OUT_RING (chan, 3);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02a0, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x02a0, 1);
OUT_RING (chan, 0x55);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x08c0, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x08c0, 4);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0580, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x0580, 2);
OUT_RING (chan, 4);
OUT_RING (chan, format);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02e8, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x02e8, 2);
OUT_RING (chan, 2);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0804, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0804, 1);
OUT_RING (chan, format);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0800, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0800, 1);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0808, 3);
+ BEGIN_NVC0(chan, NvSub2D, 0x0808, 3);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x081c, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x081c, 1);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0840, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x0840, 4);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0200, 10);
+ BEGIN_NVC0(chan, NvSub2D, 0x0200, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
OUT_RING (chan, 0);
@@ -251,7 +251,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->var.yres_virtual);
OUT_RING (chan, upper_32_bits(fb->vma.offset));
OUT_RING (chan, lower_32_bits(fb->vma.offset));
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
+ BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
OUT_RING (chan, 0);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
new file mode 100644
index 000000000000..47ab388a606e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fifo.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+
+struct nvc0_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_bo *bo;
+};
+
+struct nvc0_fence_chan {
+ struct nouveau_fence_chan base;
+ struct nouveau_vma vma;
+};
+
+static int
+nvc0_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ u64 addr = fctx->vma.offset + chan->id * 16;
+ int ret;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr));
+ OUT_RING (chan, lower_32_bits(addr));
+ OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ FIRE_RING (chan);
+ }
+
+ return ret;
+}
+
+static int
+nvc0_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ u64 addr = fctx->vma.offset + prev->id * 16;
+ int ret;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr));
+ OUT_RING (chan, lower_32_bits(addr));
+ OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
+ NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
+ FIRE_RING (chan);
+ }
+
+ return ret;
+}
+
+static u32
+nvc0_fence_read(struct nouveau_channel *chan)
+{
+ struct nvc0_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
+ return nouveau_bo_rd32(priv->bo, chan->id * 16/4);
+}
+
+static void
+nvc0_fence_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
+ struct nvc0_fence_chan *fctx = chan->engctx[engine];
+
+ nouveau_bo_vma_del(priv->bo, &fctx->vma);
+ nouveau_fence_context_del(&fctx->base);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nvc0_fence_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
+ struct nvc0_fence_chan *fctx;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ nouveau_fence_context_new(&fctx->base);
+
+ ret = nouveau_bo_vma_add(priv->bo, chan->vm, &fctx->vma);
+ if (ret)
+ nvc0_fence_context_del(chan, engine);
+
+ nouveau_bo_wr32(priv->bo, chan->id * 16/4, 0x00000000);
+ return ret;
+}
+
+static int
+nvc0_fence_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static int
+nvc0_fence_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static void
+nvc0_fence_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fence_priv *priv = nv_engine(dev, engine);
+
+ nouveau_bo_unmap(priv->bo);
+ nouveau_bo_ref(NULL, &priv->bo);
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nvc0_fence_create(struct drm_device *dev)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fence_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.engine.destroy = nvc0_fence_destroy;
+ priv->base.engine.init = nvc0_fence_init;
+ priv->base.engine.fini = nvc0_fence_fini;
+ priv->base.engine.context_new = nvc0_fence_context_new;
+ priv->base.engine.context_del = nvc0_fence_context_del;
+ priv->base.emit = nvc0_fence_emit;
+ priv->base.sync = nvc0_fence_sync;
+ priv->base.read = nvc0_fence_read;
+ dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
+
+ ret = nouveau_bo_new(dev, 16 * pfifo->channels, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, NULL, &priv->bo);
+ if (ret == 0) {
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ if (ret == 0)
+ ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo);
+ }
+
+ if (ret)
+ nvc0_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 50d68a7a1379..7d85553d518c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -26,10 +26,12 @@
#include "nouveau_drv.h"
#include "nouveau_mm.h"
+#include "nouveau_fifo.h"
static void nvc0_fifo_isr(struct drm_device *);
struct nvc0_fifo_priv {
+ struct nouveau_fifo_priv base;
struct nouveau_gpuobj *playlist[2];
int cur_playlist;
struct nouveau_vma user_vma;
@@ -37,8 +39,8 @@ struct nvc0_fifo_priv {
};
struct nvc0_fifo_chan {
+ struct nouveau_fifo_chan base;
struct nouveau_gpuobj *user;
- struct nouveau_gpuobj *ramfc;
};
static void
@@ -46,8 +48,7 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nvc0_fifo_priv *priv = pfifo->priv;
+ struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_gpuobj *cur;
int i, p;
@@ -69,59 +70,20 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
NV_ERROR(dev, "PFIFO - playlist update failed\n");
}
-void
-nvc0_fifo_disable(struct drm_device *dev)
-{
-}
-
-void
-nvc0_fifo_enable(struct drm_device *dev)
-{
-}
-
-bool
-nvc0_fifo_reassign(struct drm_device *dev, bool enable)
-{
- return false;
-}
-
-bool
-nvc0_fifo_cache_pull(struct drm_device *dev, bool enable)
-{
- return false;
-}
-
-int
-nvc0_fifo_channel_id(struct drm_device *dev)
-{
- return 127;
-}
-
-int
-nvc0_fifo_create_context(struct nouveau_channel *chan)
+static int
+nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nvc0_fifo_priv *priv = pfifo->priv;
- struct nvc0_fifo_chan *fifoch;
+ struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nvc0_fifo_chan *fctx;
u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
- int ret;
+ int ret, i;
- chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
- if (!chan->fifo_priv)
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
return -ENOMEM;
- fifoch = chan->fifo_priv;
-
- /* allocate vram for control regs, map into polling area */
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &fifoch->user);
- if (ret)
- goto error;
-
- nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
- *(struct nouveau_mem **)fifoch->user->node);
chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
priv->user_vma.offset + (chan->id * 0x1000),
@@ -131,176 +93,77 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
goto error;
}
- /* ramfc */
- ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
- chan->ramin->vinst, 0x100,
- NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
+ /* allocate vram for control regs, map into polling area */
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &fctx->user);
if (ret)
goto error;
- nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(fifoch->user->vinst));
- nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(fifoch->user->vinst));
- nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
- nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
- nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
- nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
+ nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
+ *(struct nouveau_mem **)fctx->user->node);
+
+ for (i = 0; i < 0x100; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+ nv_wo32(chan->ramin, 0x08, lower_32_bits(fctx->user->vinst));
+ nv_wo32(chan->ramin, 0x0c, upper_32_bits(fctx->user->vinst));
+ nv_wo32(chan->ramin, 0x10, 0x0000face);
+ nv_wo32(chan->ramin, 0x30, 0xfffff902);
+ nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
+ nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
upper_32_bits(ib_virt));
- nv_wo32(fifoch->ramfc, 0x54, 0x00000002);
- nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
- nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
- nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
- nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f);
- nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f);
- nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
- nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000);
- nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
- nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
+ nv_wo32(chan->ramin, 0x54, 0x00000002);
+ nv_wo32(chan->ramin, 0x84, 0x20400000);
+ nv_wo32(chan->ramin, 0x94, 0x30000001);
+ nv_wo32(chan->ramin, 0x9c, 0x00000100);
+ nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
+ nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
+ nv_wo32(chan->ramin, 0xac, 0x0000001f);
+ nv_wo32(chan->ramin, 0xb8, 0xf8000000);
+ nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
pinstmem->flush(dev);
nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
(chan->ramin->vinst >> 12));
nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
nvc0_fifo_playlist_update(dev);
- return 0;
error:
- pfifo->destroy_context(chan);
+ if (ret)
+ priv->base.base.context_del(chan, engine);
return ret;
}
-void
-nvc0_fifo_destroy_context(struct nouveau_channel *chan)
+static void
+nvc0_fifo_context_del(struct nouveau_channel *chan, int engine)
{
+ struct nvc0_fifo_chan *fctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
- struct nvc0_fifo_chan *fifoch;
nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
nv_wr32(dev, 0x002634, chan->id);
if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
-
nvc0_fifo_playlist_update(dev);
-
nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
+ nouveau_gpuobj_ref(NULL, &fctx->user);
if (chan->user) {
iounmap(chan->user);
chan->user = NULL;
}
- fifoch = chan->fifo_priv;
- chan->fifo_priv = NULL;
- if (!fifoch)
- return;
-
- nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
- nouveau_gpuobj_ref(NULL, &fifoch->user);
- kfree(fifoch);
-}
-
-int
-nvc0_fifo_load_context(struct nouveau_channel *chan)
-{
- return 0;
-}
-
-int
-nvc0_fifo_unload_context(struct drm_device *dev)
-{
- int i;
-
- for (i = 0; i < 128; i++) {
- if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
- continue;
-
- nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
- nv_wr32(dev, 0x002634, i);
- if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
- NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
- i, nv_rd32(dev, 0x002634));
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
-static void
-nvc0_fifo_destroy(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nvc0_fifo_priv *priv;
-
- priv = pfifo->priv;
- if (!priv)
- return;
-
- nouveau_vm_put(&priv->user_vma);
- nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
- nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
- kfree(priv);
-}
-
-void
-nvc0_fifo_takedown(struct drm_device *dev)
-{
- nv_wr32(dev, 0x002140, 0x00000000);
- nvc0_fifo_destroy(dev);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
}
static int
-nvc0_fifo_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nvc0_fifo_priv *priv;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- pfifo->priv = priv;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
- &priv->playlist[0]);
- if (ret)
- goto error;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
- &priv->playlist[1]);
- if (ret)
- goto error;
-
- ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
- 12, NV_MEM_ACCESS_RW, &priv->user_vma);
- if (ret)
- goto error;
-
- nouveau_irq_register(dev, 8, nvc0_fifo_isr);
- NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
- return 0;
-
-error:
- nvc0_fifo_destroy(dev);
- return ret;
-}
-
-int
-nvc0_fifo_init(struct drm_device *dev)
+nvc0_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
struct nouveau_channel *chan;
- struct nvc0_fifo_priv *priv;
- int ret, i;
-
- if (!pfifo->priv) {
- ret = nvc0_fifo_create(dev);
- if (ret)
- return ret;
- }
- priv = pfifo->priv;
+ int i;
/* reset PFIFO, enable all available PSUBFIFO areas */
nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
@@ -338,7 +201,7 @@ nvc0_fifo_init(struct drm_device *dev)
/* restore PFIFO context table */
for (i = 0; i < 128; i++) {
chan = dev_priv->channels.ptr[i];
- if (!chan || !chan->fifo_priv)
+ if (!chan || !chan->engctx[engine])
continue;
nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
@@ -350,6 +213,29 @@ nvc0_fifo_init(struct drm_device *dev)
return 0;
}
+static int
+nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ int i;
+
+ for (i = 0; i < 128; i++) {
+ if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
+ continue;
+
+ nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x002634, i);
+ if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
+ NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
+ i, nv_rd32(dev, 0x002634));
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, 0x002140, 0x00000000);
+ return 0;
+}
+
+
struct nouveau_enum nvc0_fifo_fault_unit[] = {
{ 0x00, "PGRAPH" },
{ 0x03, "PEEPHOLE" },
@@ -439,13 +325,14 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
static int
nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
{
+ struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) {
+ if (likely(chid >= 0 && chid < priv->base.channels)) {
chan = dev_priv->channels.ptr[chid];
if (likely(chan))
ret = nouveau_finish_page_flip(chan, NULL);
@@ -534,3 +421,56 @@ nvc0_fifo_isr(struct drm_device *dev)
nv_wr32(dev, 0x002140, 0);
}
}
+
+static void
+nvc0_fifo_destroy(struct drm_device *dev, int engine)
+{
+ struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_vm_put(&priv->user_vma);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nvc0_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nvc0_fifo_destroy;
+ priv->base.base.init = nvc0_fifo_init;
+ priv->base.base.fini = nvc0_fifo_fini;
+ priv->base.base.context_new = nvc0_fifo_context_new;
+ priv->base.base.context_del = nvc0_fifo_context_del;
+ priv->base.channels = 128;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, priv->base.channels * 0x1000,
+ 12, NV_MEM_ACCESS_RW, &priv->user_vma);
+ if (ret)
+ goto error;
+
+ nouveau_irq_register(dev, 8, nvc0_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 9066102d1159..2a01e6e47724 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -29,6 +29,7 @@
#include "nouveau_drv.h"
#include "nouveau_mm.h"
+#include "nouveau_fifo.h"
#include "nvc0_graph.h"
#include "nvc0_grhub.fuc.h"
@@ -620,13 +621,14 @@ nvc0_graph_init(struct drm_device *dev, int engine)
int
nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
unsigned long flags;
int i;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
chan = dev_priv->channels.ptr[i];
if (!chan || !chan->ramin)
continue;
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index ce65f81bb871..7c95c44e2887 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -164,7 +164,9 @@ struct nvc0_pm_clock {
};
struct nvc0_pm_state {
+ struct nouveau_pm_level *perflvl;
struct nvc0_pm_clock eng[16];
+ struct nvc0_pm_clock mem;
};
static u32
@@ -303,6 +305,48 @@ calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
return 0;
}
+static int
+calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
+{
+ struct pll_lims pll;
+ int N, M, P, ret;
+ u32 ctrl;
+
+ /* mclk pll input freq comes from another pll, make sure it's on */
+ ctrl = nv_rd32(dev, 0x132020);
+ if (!(ctrl & 0x00000001)) {
+ /* if not, program it to 567MHz. nfi where this value comes
+ * from - it looks like it's in the pll limits table for
+ * 132000 but the binary driver ignores all my attempts to
+ * change this value.
+ */
+ nv_wr32(dev, 0x137320, 0x00000103);
+ nv_wr32(dev, 0x137330, 0x81200606);
+ nv_wait(dev, 0x132020, 0x00010000, 0x00010000);
+ nv_wr32(dev, 0x132024, 0x0001150f);
+ nv_mask(dev, 0x132020, 0x00000001, 0x00000001);
+ nv_wait(dev, 0x137390, 0x00020000, 0x00020000);
+ nv_mask(dev, 0x132020, 0x00000004, 0x00000004);
+ }
+
+ /* for the moment, until the clock tree is better understood, use
+ * pll mode for all clock frequencies
+ */
+ ret = get_pll_limits(dev, 0x132000, &pll);
+ if (ret == 0) {
+ pll.refclk = read_pll(dev, 0x132020);
+ if (pll.refclk) {
+ ret = nva3_calc_pll(dev, &pll, freq, &N, NULL, &M, &P);
+ if (ret > 0) {
+ info->coef = (P << 16) | (N << 8) | M;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
void *
nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
@@ -335,6 +379,15 @@ nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
return ERR_PTR(ret);
}
+ if (perflvl->memory) {
+ ret = calc_mem(dev, &info->mem, perflvl->memory);
+ if (ret) {
+ kfree(info);
+ return ERR_PTR(ret);
+ }
+ }
+
+ info->perflvl = perflvl;
return info;
}
@@ -375,12 +428,148 @@ prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
}
+static void
+mclk_precharge(struct nouveau_mem_exec_func *exec)
+{
+}
+
+static void
+mclk_refresh(struct nouveau_mem_exec_func *exec)
+{
+}
+
+static void
+mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
+{
+ nv_wr32(exec->dev, 0x10f210, enable ? 0x80000000 : 0x00000000);
+}
+
+static void
+mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
+{
+}
+
+static void
+mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
+{
+ udelay((nsec + 500) / 1000);
+}
+
+static u32
+mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
+{
+ struct drm_device *dev = exec->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
+ if (mr <= 1)
+ return nv_rd32(dev, 0x10f300 + ((mr - 0) * 4));
+ return nv_rd32(dev, 0x10f320 + ((mr - 2) * 4));
+ } else {
+ if (mr == 0)
+ return nv_rd32(dev, 0x10f300 + (mr * 4));
+ else
+ if (mr <= 7)
+ return nv_rd32(dev, 0x10f32c + (mr * 4));
+ return nv_rd32(dev, 0x10f34c);
+ }
+}
+
+static void
+mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
+{
+ struct drm_device *dev = exec->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
+ if (mr <= 1) {
+ nv_wr32(dev, 0x10f300 + ((mr - 0) * 4), data);
+ if (dev_priv->vram_rank_B)
+ nv_wr32(dev, 0x10f308 + ((mr - 0) * 4), data);
+ } else
+ if (mr <= 3) {
+ nv_wr32(dev, 0x10f320 + ((mr - 2) * 4), data);
+ if (dev_priv->vram_rank_B)
+ nv_wr32(dev, 0x10f328 + ((mr - 2) * 4), data);
+ }
+ } else {
+ if (mr == 0) nv_wr32(dev, 0x10f300 + (mr * 4), data);
+ else if (mr <= 7) nv_wr32(dev, 0x10f32c + (mr * 4), data);
+ else if (mr == 15) nv_wr32(dev, 0x10f34c, data);
+ }
+}
+
+static void
+mclk_clock_set(struct nouveau_mem_exec_func *exec)
+{
+ struct nvc0_pm_state *info = exec->priv;
+ struct drm_device *dev = exec->dev;
+ u32 ctrl = nv_rd32(dev, 0x132000);
+
+ nv_wr32(dev, 0x137360, 0x00000001);
+ nv_wr32(dev, 0x137370, 0x00000000);
+ nv_wr32(dev, 0x137380, 0x00000000);
+ if (ctrl & 0x00000001)
+ nv_wr32(dev, 0x132000, (ctrl &= ~0x00000001));
+
+ nv_wr32(dev, 0x132004, info->mem.coef);
+ nv_wr32(dev, 0x132000, (ctrl |= 0x00000001));
+ nv_wait(dev, 0x137390, 0x00000002, 0x00000002);
+ nv_wr32(dev, 0x132018, 0x00005000);
+
+ nv_wr32(dev, 0x137370, 0x00000001);
+ nv_wr32(dev, 0x137380, 0x00000001);
+ nv_wr32(dev, 0x137360, 0x00000000);
+}
+
+static void
+mclk_timing_set(struct nouveau_mem_exec_func *exec)
+{
+ struct nvc0_pm_state *info = exec->priv;
+ struct nouveau_pm_level *perflvl = info->perflvl;
+ int i;
+
+ for (i = 0; i < 5; i++)
+ nv_wr32(exec->dev, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
+}
+
+static void
+prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_mem_exec_func exec = {
+ .dev = dev,
+ .precharge = mclk_precharge,
+ .refresh = mclk_refresh,
+ .refresh_auto = mclk_refresh_auto,
+ .refresh_self = mclk_refresh_self,
+ .wait = mclk_wait,
+ .mrg = mclk_mrg,
+ .mrs = mclk_mrs,
+ .clock_set = mclk_clock_set,
+ .timing_set = mclk_timing_set,
+ .priv = info
+ };
+
+ if (dev_priv->chipset < 0xd0)
+ nv_wr32(dev, 0x611200, 0x00003300);
+ else
+ nv_wr32(dev, 0x62c000, 0x03030000);
+
+ nouveau_mem_exec(&exec, info->perflvl);
+
+ if (dev_priv->chipset < 0xd0)
+ nv_wr32(dev, 0x611200, 0x00003300);
+ else
+ nv_wr32(dev, 0x62c000, 0x03030300);
+}
int
nvc0_pm_clocks_set(struct drm_device *dev, void *data)
{
struct nvc0_pm_state *info = data;
int i;
+ if (info->mem.coef)
+ prog_mem(dev, info);
+
for (i = 0; i < 16; i++) {
if (!info->eng[i].freq)
continue;
diff --git a/drivers/gpu/drm/nouveau/nvc0_software.c b/drivers/gpu/drm/nouveau/nvc0_software.c
new file mode 100644
index 000000000000..93e8c164fec6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_software.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
+#include "nouveau_software.h"
+
+#include "nv50_display.h"
+
+struct nvc0_software_priv {
+ struct nouveau_software_priv base;
+};
+
+struct nvc0_software_chan {
+ struct nouveau_software_chan base;
+ struct nouveau_vma dispc_vma[4];
+};
+
+u64
+nvc0_software_crtc(struct nouveau_channel *chan, int crtc)
+{
+ struct nvc0_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+ return pch->dispc_vma[crtc].offset;
+}
+
+static int
+nvc0_software_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
+ struct nvc0_software_chan *pch;
+ int ret = 0, i;
+
+ pch = kzalloc(sizeof(*pch), GFP_KERNEL);
+ if (!pch)
+ return -ENOMEM;
+
+ nouveau_software_context_new(&pch->base);
+ chan->engctx[engine] = pch;
+
+ /* map display semaphore buffers into channel's vm */
+ for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo;
+ if (dev_priv->card_type >= NV_D0)
+ bo = nvd0_display_crtc_sema(dev, i);
+ else
+ bo = nv50_display(dev)->crtc[i].sem.bo;
+
+ ret = nouveau_bo_vma_add(bo, chan->vm, &pch->dispc_vma[i]);
+ }
+
+ if (ret)
+ psw->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nvc0_software_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_software_chan *pch = chan->engctx[engine];
+ int i;
+
+ if (dev_priv->card_type >= NV_D0) {
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &pch->dispc_vma[i]);
+ }
+ } else
+ if (dev_priv->card_type >= NV_50) {
+ struct nv50_display *disp = nv50_display(dev);
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nv50_display_crtc *dispc = &disp->crtc[i];
+ nouveau_bo_vma_del(dispc->sem.bo, &pch->dispc_vma[i]);
+ }
+ }
+
+ chan->engctx[engine] = NULL;
+ kfree(pch);
+}
+
+static int
+nvc0_software_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ return 0;
+}
+
+static int
+nvc0_software_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static int
+nvc0_software_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nvc0_software_destroy(struct drm_device *dev, int engine)
+{
+ struct nvc0_software_priv *psw = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, SW);
+ kfree(psw);
+}
+
+int
+nvc0_software_create(struct drm_device *dev)
+{
+ struct nvc0_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
+ if (!psw)
+ return -ENOMEM;
+
+ psw->base.base.destroy = nvc0_software_destroy;
+ psw->base.base.init = nvc0_software_init;
+ psw->base.base.fini = nvc0_software_fini;
+ psw->base.base.context_new = nvc0_software_context_new;
+ psw->base.base.context_del = nvc0_software_context_del;
+ psw->base.base.object_new = nvc0_software_object_new;
+ nouveau_software_create(&psw->base);
+
+ NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
+ NVOBJ_CLASS(dev, 0x906e, SW);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 0247250939e8..c486d3ce3c2c 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -33,6 +33,7 @@
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
#include "nouveau_fb.h"
+#include "nouveau_software.h"
#include "nv50_display.h"
#define EVO_DMA_NR 9
@@ -284,8 +285,6 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
u32 *push;
int ret;
- evo_sync(crtc->dev, EVO_MASTER);
-
swap_interval <<= 4;
if (swap_interval == 0)
swap_interval |= 0x100;
@@ -300,15 +299,16 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (ret)
return ret;
- offset = chan->dispc_vma[nv_crtc->index].offset;
+
+ offset = nvc0_software_crtc(chan, nv_crtc->index);
offset += evo->sem.offset;
- BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 0xf00d0000 | evo->sem.value);
OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
@@ -882,7 +882,7 @@ nvd0_crtc_create(struct drm_device *dev, int index)
drm_mode_crtc_set_gamma_size(crtc, 256);
ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -895,7 +895,7 @@ nvd0_crtc_create(struct drm_device *dev, int index)
goto out;
ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->lut.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -2030,7 +2030,7 @@ nvd0_display_create(struct drm_device *dev)
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &disp->sync);
+ 0, 0x0000, NULL, &disp->sync);
if (!ret) {
ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
new file mode 100644
index 000000000000..1855ecbd843b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_fifo.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nouveau_fifo.h"
+
+#define NVE0_FIFO_ENGINE_NUM 32
+
+static void nve0_fifo_isr(struct drm_device *);
+
+struct nve0_fifo_engine {
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nve0_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
+ struct {
+ struct nouveau_gpuobj *mem;
+ struct nouveau_vma bar;
+ } user;
+ int spoon_nr;
+};
+
+struct nve0_fifo_chan {
+ struct nouveau_fifo_chan base;
+ u32 engine;
+};
+
+static void
+nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct nve0_fifo_engine *peng = &priv->engine[engine];
+ struct nouveau_gpuobj *cur;
+ u32 match = (engine << 16) | 0x00000001;
+ int ret, i, p;
+
+ cur = peng->playlist[peng->cur_playlist];
+ if (unlikely(cur == NULL)) {
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
+ if (ret) {
+ NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
+ return;
+ }
+
+ peng->playlist[peng->cur_playlist] = cur;
+ }
+
+ peng->cur_playlist = !peng->cur_playlist;
+
+ for (i = 0, p = 0; i < priv->base.channels; i++) {
+ u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
+ if (ctrl != match)
+ continue;
+ nv_wo32(cur, p + 0, i);
+ nv_wo32(cur, p + 4, 0x00000000);
+ p += 8;
+ }
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x002270, cur->vinst >> 12);
+ nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
+ if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
+ NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
+}
+
+static int
+nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nve0_fifo_chan *fctx;
+ u64 usermem = priv->user.mem->vinst + chan->id * 512;
+ u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
+ int ret = 0, i;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ fctx->engine = 0; /* PGRAPH */
+
+ /* allocate vram for control regs, map into polling area */
+ chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
+ priv->user.bar.offset + (chan->id * 512), 512);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ for (i = 0; i < 0x100; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+ nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
+ nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
+ nv_wo32(chan->ramin, 0x10, 0x0000face);
+ nv_wo32(chan->ramin, 0x30, 0xfffff902);
+ nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
+ nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
+ upper_32_bits(ib_virt));
+ nv_wo32(chan->ramin, 0x84, 0x20400000);
+ nv_wo32(chan->ramin, 0x94, 0x30000001);
+ nv_wo32(chan->ramin, 0x9c, 0x00000100);
+ nv_wo32(chan->ramin, 0xac, 0x0000001f);
+ nv_wo32(chan->ramin, 0xe4, 0x00000000);
+ nv_wo32(chan->ramin, 0xe8, chan->id);
+ nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
+ (chan->ramin->vinst >> 12));
+ nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
+ nve0_fifo_playlist_update(dev, fctx->engine);
+ nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nve0_fifo_chan *fctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+
+ nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
+ nv_wr32(dev, 0x002634, chan->id);
+ if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
+ NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
+ nve0_fifo_playlist_update(dev, fctx->engine);
+ nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
+
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+
+ chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
+ kfree(fctx);
+}
+
+static int
+nve0_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nve0_fifo_chan *fctx;
+ int i;
+
+ /* reset PFIFO, enable all available PSUBFIFO areas */
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x000204, 0xffffffff);
+
+ priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
+ NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
+
+ /* PSUBFIFO[n] */
+ for (i = 0; i < priv->spoon_nr; i++) {
+ nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
+ }
+
+ nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+ nv_wr32(dev, 0x002a00, 0xffffffff);
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xbfffffff);
+
+ /* restore PFIFO context table */
+ for (i = 0; i < priv->base.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (!chan || !(fctx = chan->engctx[engine]))
+ continue;
+
+ nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
+ (chan->ramin->vinst >> 12));
+ nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
+ nve0_fifo_playlist_update(dev, fctx->engine);
+ nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
+ }
+
+ return 0;
+}
+
+static int
+nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
+ continue;
+
+ nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
+ nv_wr32(dev, 0x002634, i);
+ if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
+ NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
+ i, nv_rd32(dev, 0x002634));
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, 0x002140, 0x00000000);
+ return 0;
+}
+
+struct nouveau_enum nve0_fifo_fault_unit[] = {
+ {}
+};
+
+struct nouveau_enum nve0_fifo_fault_reason[] = {
+ { 0x00, "PT_NOT_PRESENT" },
+ { 0x01, "PT_TOO_SHORT" },
+ { 0x02, "PAGE_NOT_PRESENT" },
+ { 0x03, "VM_LIMIT_EXCEEDED" },
+ { 0x04, "NO_CHANNEL" },
+ { 0x05, "PAGE_SYSTEM_ONLY" },
+ { 0x06, "PAGE_READ_ONLY" },
+ { 0x0a, "COMPRESSED_SYSRAM" },
+ { 0x0c, "INVALID_STORAGE_TYPE" },
+ {}
+};
+
+struct nouveau_enum nve0_fifo_fault_hubclient[] = {
+ {}
+};
+
+struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
+ {}
+};
+
+struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
+ { 0x00200000, "ILLEGAL_MTHD" },
+ { 0x00800000, "EMPTY_SUBC" },
+ {}
+};
+
+static void
+nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
+{
+ u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
+ u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
+ u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
+ u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
+ u32 client = (stat & 0x00001f00) >> 8;
+
+ NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
+ (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
+ nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
+ printk("] from ");
+ nouveau_enum_print(nve0_fifo_fault_unit, unit);
+ if (stat & 0x00000040) {
+ printk("/");
+ nouveau_enum_print(nve0_fifo_fault_hubclient, client);
+ } else {
+ printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+ nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
+ }
+ printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static void
+nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+{
+ u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
+ u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
+ u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
+ u32 subc = (addr & 0x00070000);
+ u32 mthd = (addr & 0x00003ffc);
+
+ NV_INFO(dev, "PSUBFIFO %d:", unit);
+ nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat);
+ NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid, subc, mthd, data);
+
+ nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nve0_fifo_isr(struct drm_device *dev)
+{
+ u32 stat = nv_rd32(dev, 0x002100);
+
+ if (stat & 0x00000100) {
+ NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
+ nv_wr32(dev, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x10000000) {
+ u32 units = nv_rd32(dev, 0x00259c);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nve0_fifo_isr_vm_fault(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x00259c, units);
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 units = nv_rd32(dev, 0x0025a0);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nve0_fifo_isr_subfifo_intr(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x0025a0, units);
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
+ nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
+ stat &= ~0x40000000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
+ nv_wr32(dev, 0x002100, stat);
+ nv_wr32(dev, 0x002140, 0);
+ }
+}
+
+static void
+nve0_fifo_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ nouveau_vm_put(&priv->user.bar);
+ nouveau_gpuobj_ref(NULL, &priv->user.mem);
+
+ for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
+ }
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nve0_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nve0_fifo_destroy;
+ priv->base.base.init = nve0_fifo_init;
+ priv->base.base.fini = nve0_fifo_fini;
+ priv->base.base.context_new = nve0_fifo_context_new;
+ priv->base.base.context_del = nve0_fifo_context_del;
+ priv->base.channels = 4096;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
+ 12, NV_MEM_ACCESS_RW, &priv->user.bar);
+ if (ret)
+ goto error;
+
+ nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
+
+ nouveau_irq_register(dev, 8, nve0_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.c b/drivers/gpu/drm/nouveau/nve0_graph.c
new file mode 100644
index 000000000000..8a8051b68f10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_graph.c
@@ -0,0 +1,831 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nouveau_fifo.h"
+
+#include "nve0_graph.h"
+
+static void
+nve0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
+{
+ NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
+ nv_rd32(dev, base + 0x400));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
+ nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
+ nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
+}
+
+static void
+nve0_graph_ctxctl_debug(struct drm_device *dev)
+{
+ u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
+ u32 gpc;
+
+ nve0_graph_ctxctl_debug_unit(dev, 0x409000);
+ for (gpc = 0; gpc < gpcnr; gpc++)
+ nve0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
+}
+
+static int
+nve0_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, 0x409840, 0x00000030);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000003);
+ if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
+ NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
+
+ return 0;
+}
+
+static int
+nve0_graph_unload_context_to(struct drm_device *dev, u64 chan)
+{
+ nv_wr32(dev, 0x409840, 0x00000003);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
+ nv_wr32(dev, 0x409504, 0x00000009);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nve0_graph_construct_context(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ int ret, i;
+ u32 *ctx;
+
+ ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ nve0_graph_load_context(chan);
+
+ nv_wo32(grch->grctx, 0x1c, 1);
+ nv_wo32(grch->grctx, 0x20, 0);
+ nv_wo32(grch->grctx, 0x28, 0);
+ nv_wo32(grch->grctx, 0x2c, 0);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nve0_grctx_generate(chan);
+ if (ret)
+ goto err;
+
+ ret = nve0_graph_unload_context_to(dev, chan->ramin->vinst);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ ctx[i / 4] = nv_ro32(grch->grctx, i);
+
+ priv->grctx_vals = ctx;
+ return 0;
+
+err:
+ kfree(ctx);
+ return ret;
+}
+
+static int
+nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
+{
+ struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ u32 magic[GPC_MAX][2];
+ u16 offset = 0x0000;
+ int gpc;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x3000, 256, NVOBJ_FLAG_VM,
+ &grch->unk408004);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
+ &grch->unk40800c);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
+ &grch->unk418810);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
+ &grch->mmio);
+ if (ret)
+ return ret;
+
+#define mmio(r,v) do { \
+ nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 0, (r)); \
+ nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 4, (v)); \
+ grch->mmio_nr++; \
+} while (0)
+ mmio(0x40800c, grch->unk40800c->linst >> 8);
+ mmio(0x408010, 0x80000000);
+ mmio(0x419004, grch->unk40800c->linst >> 8);
+ mmio(0x419008, 0x00000000);
+ mmio(0x4064cc, 0x80000000);
+ mmio(0x408004, grch->unk408004->linst >> 8);
+ mmio(0x408008, 0x80000030);
+ mmio(0x418808, grch->unk408004->linst >> 8);
+ mmio(0x41880c, 0x80000030);
+ mmio(0x4064c8, 0x01800600);
+ mmio(0x418810, 0x80000000 | grch->unk418810->linst >> 12);
+ mmio(0x419848, 0x10000000 | grch->unk418810->linst >> 12);
+ mmio(0x405830, 0x02180648);
+ mmio(0x4064c4, 0x0192ffff);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
+ u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
+ magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
+ magic[gpc][1] = 0x00000000 | (magic1 << 16);
+ offset += 0x0324 * priv->tpc_nr[gpc];
+ }
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ mmio(GPC_UNIT(gpc, 0x30c0), magic[gpc][0]);
+ mmio(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset);
+ offset += 0x07ff * priv->tpc_nr[gpc];
+ }
+
+ mmio(0x17e91c, 0x06060609);
+ mmio(0x17e920, 0x00090a05);
+#undef mmio
+ return 0;
+}
+
+static int
+nve0_graph_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nve0_graph_priv *priv = nv_engine(dev, engine);
+ struct nve0_graph_chan *grch;
+ struct nouveau_gpuobj *grctx;
+ int ret, i;
+
+ grch = kzalloc(sizeof(*grch), GFP_KERNEL);
+ if (!grch)
+ return -ENOMEM;
+ chan->engctx[NVOBJ_ENGINE_GR] = grch;
+
+ ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
+ &grch->grctx);
+ if (ret)
+ goto error;
+ grctx = grch->grctx;
+
+ ret = nve0_graph_create_context_mmio_list(chan);
+ if (ret)
+ goto error;
+
+ nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
+ nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
+ pinstmem->flush(dev);
+
+ if (!priv->grctx_vals) {
+ ret = nve0_graph_construct_context(chan);
+ if (ret)
+ goto error;
+ }
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
+ nv_wo32(grctx, 0xf4, 0);
+ nv_wo32(grctx, 0xf8, 0);
+ nv_wo32(grctx, 0x10, grch->mmio_nr);
+ nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x1c, 1);
+ nv_wo32(grctx, 0x20, 0);
+ nv_wo32(grctx, 0x28, 0);
+ nv_wo32(grctx, 0x2c, 0);
+
+ pinstmem->flush(dev);
+ return 0;
+
+error:
+ priv->base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nve0_graph_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nve0_graph_chan *grch = chan->engctx[engine];
+
+ nouveau_gpuobj_ref(NULL, &grch->mmio);
+ nouveau_gpuobj_ref(NULL, &grch->unk418810);
+ nouveau_gpuobj_ref(NULL, &grch->unk40800c);
+ nouveau_gpuobj_ref(NULL, &grch->unk408004);
+ nouveau_gpuobj_ref(NULL, &grch->grctx);
+ chan->engctx[engine] = NULL;
+}
+
+static int
+nve0_graph_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ return 0;
+}
+
+static int
+nve0_graph_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nve0_graph_init_obj418880(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int i;
+
+ nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
+ nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
+}
+
+static void
+nve0_graph_init_regs(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x400080, 0x003083c2);
+ nv_wr32(dev, 0x400088, 0x0001ffe7);
+ nv_wr32(dev, 0x40008c, 0x00000000);
+ nv_wr32(dev, 0x400090, 0x00000030);
+ nv_wr32(dev, 0x40013c, 0x003901f7);
+ nv_wr32(dev, 0x400140, 0x00000100);
+ nv_wr32(dev, 0x400144, 0x00000000);
+ nv_wr32(dev, 0x400148, 0x00000110);
+ nv_wr32(dev, 0x400138, 0x00000000);
+ nv_wr32(dev, 0x400130, 0x00000000);
+ nv_wr32(dev, 0x400134, 0x00000000);
+ nv_wr32(dev, 0x400124, 0x00000002);
+}
+
+static void
+nve0_graph_init_units(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x409ffc, 0x00000000);
+ nv_wr32(dev, 0x409c14, 0x00003e3e);
+ nv_wr32(dev, 0x409c24, 0x000f0000);
+
+ nv_wr32(dev, 0x404000, 0xc0000000);
+ nv_wr32(dev, 0x404600, 0xc0000000);
+ nv_wr32(dev, 0x408030, 0xc0000000);
+ nv_wr32(dev, 0x404490, 0xc0000000);
+ nv_wr32(dev, 0x406018, 0xc0000000);
+ nv_wr32(dev, 0x407020, 0xc0000000);
+ nv_wr32(dev, 0x405840, 0xc0000000);
+ nv_wr32(dev, 0x405844, 0x00ffffff);
+
+ nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
+ nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
+
+}
+
+static void
+nve0_graph_init_gpc_0(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+ u32 data[TPC_MAX / 8];
+ u8 tpcnr[GPC_MAX];
+ int i, gpc, tpc;
+
+ nv_wr32(dev, GPC_UNIT(0, 0x3018), 0x00000001);
+
+ memset(data, 0x00, sizeof(data));
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ data[i / 8] |= tpc << ((i % 8) * 4);
+ }
+
+ nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
+ nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
+ nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
+ nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+ priv->tpc_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
+ nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
+}
+
+static void
+nve0_graph_init_gpc_1(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int gpc, tpc;
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x3038), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+ }
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+}
+
+static void
+nve0_graph_init_rop(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int rop;
+
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+}
+
+static void
+nve0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
+ struct nve0_graph_fuc *code, struct nve0_graph_fuc *data)
+{
+ int i;
+
+ nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
+ for (i = 0; i < data->size / 4; i++)
+ nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
+
+ nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
+ for (i = 0; i < code->size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, fuc_base + 0x0188, i >> 6);
+ nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
+ }
+}
+
+static int
+nve0_graph_init_ctxctl(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ u32 r000260;
+
+ /* load fuc microcode */
+ r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ nve0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
+ nve0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
+ nv_wr32(dev, 0x000260, r000260);
+
+ /* start both of them running */
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x41a10c, 0x00000000);
+ nv_wr32(dev, 0x40910c, 0x00000000);
+ nv_wr32(dev, 0x41a100, 0x00000002);
+ nv_wr32(dev, 0x409100, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
+ NV_INFO(dev, "0x409800 wait failed\n");
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x7fffffff);
+ nv_wr32(dev, 0x409504, 0x00000021);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000010);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
+ return -EBUSY;
+ }
+ priv->grctx_size = nv_rd32(dev, 0x409800);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000016);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000025);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409800, 0x00000000);
+ nv_wr32(dev, 0x409500, 0x00000001);
+ nv_wr32(dev, 0x409504, 0x00000030);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x30 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409810, 0xb00095c8);
+ nv_wr32(dev, 0x409800, 0x00000000);
+ nv_wr32(dev, 0x409500, 0x00000001);
+ nv_wr32(dev, 0x409504, 0x00000031);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x31 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409810, 0x00080420);
+ nv_wr32(dev, 0x409800, 0x00000000);
+ nv_wr32(dev, 0x409500, 0x00000001);
+ nv_wr32(dev, 0x409504, 0x00000032);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x32 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409614, 0x00000070);
+ nv_wr32(dev, 0x409614, 0x00000770);
+ nv_wr32(dev, 0x40802c, 0x00000001);
+ return 0;
+}
+
+static int
+nve0_graph_init(struct drm_device *dev, int engine)
+{
+ int ret;
+
+ nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
+
+ nve0_graph_init_obj418880(dev);
+ nve0_graph_init_regs(dev);
+ nve0_graph_init_gpc_0(dev);
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+ nv_wr32(dev, 0x400100, 0xffffffff);
+ nv_wr32(dev, 0x40013c, 0xffffffff);
+
+ nve0_graph_init_units(dev);
+ nve0_graph_init_gpc_1(dev);
+ nve0_graph_init_rop(dev);
+
+ nv_wr32(dev, 0x400108, 0xffffffff);
+ nv_wr32(dev, 0x400138, 0xffffffff);
+ nv_wr32(dev, 0x400118, 0xffffffff);
+ nv_wr32(dev, 0x400130, 0xffffffff);
+ nv_wr32(dev, 0x40011c, 0xffffffff);
+ nv_wr32(dev, 0x400134, 0xffffffff);
+ nv_wr32(dev, 0x400054, 0x34ce3464);
+
+ ret = nve0_graph_init_ctxctl(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < pfifo->channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->vinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nve0_graph_ctxctl_isr(struct drm_device *dev)
+{
+ u32 ustat = nv_rd32(dev, 0x409c18);
+
+ if (ustat & 0x00000001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
+ if (ustat & 0x00080000)
+ NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
+ if (ustat & ~0x00080001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
+
+ nve0_graph_ctxctl_debug(dev);
+ nv_wr32(dev, 0x409c20, ustat);
+}
+
+static void
+nve0_graph_trap_isr(struct drm_device *dev, int chid)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ u32 trap = nv_rd32(dev, 0x400108);
+ int rop;
+
+ if (trap & 0x00000001) {
+ u32 stat = nv_rd32(dev, 0x404000);
+ NV_INFO(dev, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid, stat);
+ nv_wr32(dev, 0x404000, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x00000001);
+ trap &= ~0x00000001;
+ }
+
+ if (trap & 0x00000010) {
+ u32 stat = nv_rd32(dev, 0x405840);
+ NV_INFO(dev, "PGRAPH: SHADER ch %d 0x%08x\n", chid, stat);
+ nv_wr32(dev, 0x405840, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x00000010);
+ trap &= ~0x00000010;
+ }
+
+ if (trap & 0x02000000) {
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ u32 statz = nv_rd32(dev, ROP_UNIT(rop, 0x070));
+ u32 statc = nv_rd32(dev, ROP_UNIT(rop, 0x144));
+ NV_INFO(dev, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
+ rop, chid, statz, statc);
+ nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+ }
+ nv_wr32(dev, 0x400108, 0x02000000);
+ trap &= ~0x02000000;
+ }
+
+ if (trap) {
+ NV_INFO(dev, "PGRAPH: TRAP ch %d 0x%08x\n", chid, trap);
+ nv_wr32(dev, 0x400108, trap);
+ }
+}
+
+static void
+nve0_graph_isr(struct drm_device *dev)
+{
+ u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+ u32 chid = nve0_graph_isr_chid(dev, inst);
+ u32 stat = nv_rd32(dev, 0x400100);
+ u32 addr = nv_rd32(dev, 0x400704);
+ u32 mthd = (addr & 0x00003ffc);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 data = nv_rd32(dev, 0x400708);
+ u32 code = nv_rd32(dev, 0x400110);
+ u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
+
+ if (stat & 0x00000010) {
+ if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
+ nv_wr32(dev, 0x400100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
+ if (stat & 0x00000020) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00000020);
+ stat &= ~0x00000020;
+ }
+
+ if (stat & 0x00100000) {
+ NV_INFO(dev, "PGRAPH: DATA_ERROR [");
+ nouveau_enum_print(nv50_data_error_names, code);
+ printk("] ch %d [0x%010llx] subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00100000);
+ stat &= ~0x00100000;
+ }
+
+ if (stat & 0x00200000) {
+ nve0_graph_trap_isr(dev, chid);
+ nv_wr32(dev, 0x400100, 0x00200000);
+ stat &= ~0x00200000;
+ }
+
+ if (stat & 0x00080000) {
+ nve0_graph_ctxctl_isr(dev);
+ nv_wr32(dev, 0x400100, 0x00080000);
+ stat &= ~0x00080000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
+ nv_wr32(dev, 0x400100, stat);
+ }
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+}
+
+static int
+nve0_graph_create_fw(struct drm_device *dev, const char *fwname,
+ struct nve0_graph_fuc *fuc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const struct firmware *fw;
+ char f[32];
+ int ret;
+
+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
+ ret = request_firmware(&fw, f, &dev->pdev->dev);
+ if (ret)
+ return ret;
+
+ fuc->size = fw->size;
+ fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
+ release_firmware(fw);
+ return (fuc->data != NULL) ? 0 : -ENOMEM;
+}
+
+static void
+nve0_graph_destroy_fw(struct nve0_graph_fuc *fuc)
+{
+ if (fuc->data) {
+ kfree(fuc->data);
+ fuc->data = NULL;
+ }
+}
+
+static void
+nve0_graph_destroy(struct drm_device *dev, int engine)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, engine);
+
+ nve0_graph_destroy_fw(&priv->fuc409c);
+ nve0_graph_destroy_fw(&priv->fuc409d);
+ nve0_graph_destroy_fw(&priv->fuc41ac);
+ nve0_graph_destroy_fw(&priv->fuc41ad);
+
+ nouveau_irq_unregister(dev, 12);
+
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+
+ if (priv->grctx_vals)
+ kfree(priv->grctx_vals);
+
+ NVOBJ_ENGINE_DEL(dev, GR);
+ kfree(priv);
+}
+
+int
+nve0_graph_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_graph_priv *priv;
+ int ret, gpc, i;
+ u32 kepler;
+
+ kepler = nve0_graph_class(dev);
+ if (!kepler) {
+ NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
+ return 0;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.destroy = nve0_graph_destroy;
+ priv->base.init = nve0_graph_init;
+ priv->base.fini = nve0_graph_fini;
+ priv->base.context_new = nve0_graph_context_new;
+ priv->base.context_del = nve0_graph_context_del;
+ priv->base.object_new = nve0_graph_object_new;
+
+ NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
+ nouveau_irq_register(dev, 12, nve0_graph_isr);
+
+ NV_INFO(dev, "PGRAPH: using external firmware\n");
+ if (nve0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
+ nve0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
+ nve0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
+ nve0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
+ ret = 0;
+ goto error;
+ }
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+ if (ret)
+ goto error;
+
+ for (i = 0; i < 0x1000; i += 4) {
+ nv_wo32(priv->unk4188b4, i, 0x00000010);
+ nv_wo32(priv->unk4188b8, i, 0x00000010);
+ }
+
+ priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
+ priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
+ priv->tpc_total += priv->tpc_nr[gpc];
+ }
+
+ switch (dev_priv->chipset) {
+ case 0xe4:
+ if (priv->tpc_total == 8)
+ priv->magic_not_rop_nr = 3;
+ else
+ if (priv->tpc_total == 7)
+ priv->magic_not_rop_nr = 1;
+ break;
+ case 0xe7:
+ priv->magic_not_rop_nr = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (!priv->magic_not_rop_nr) {
+ NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
+ priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
+ priv->tpc_nr[3], priv->rop_nr);
+ priv->magic_not_rop_nr = 0x00;
+ }
+
+ NVOBJ_CLASS(dev, 0xa097, GR); /* subc 0: 3D */
+ NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
+ NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
+ NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
+ NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
+ return 0;
+
+error:
+ nve0_graph_destroy(dev, NVOBJ_ENGINE_GR);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.h b/drivers/gpu/drm/nouveau/nve0_graph.h
new file mode 100644
index 000000000000..2ba70449ba01
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_graph.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NVE0_GRAPH_H__
+#define __NVE0_GRAPH_H__
+
+#define GPC_MAX 4
+#define TPC_MAX 32
+
+#define ROP_BCAST(r) (0x408800 + (r))
+#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
+#define GPC_BCAST(r) (0x418000 + (r))
+#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
+#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
+
+struct nve0_graph_fuc {
+ u32 *data;
+ u32 size;
+};
+
+struct nve0_graph_priv {
+ struct nouveau_exec_engine base;
+
+ struct nve0_graph_fuc fuc409c;
+ struct nve0_graph_fuc fuc409d;
+ struct nve0_graph_fuc fuc41ac;
+ struct nve0_graph_fuc fuc41ad;
+
+ u8 gpc_nr;
+ u8 rop_nr;
+ u8 tpc_nr[GPC_MAX];
+ u8 tpc_total;
+
+ u32 grctx_size;
+ u32 *grctx_vals;
+ struct nouveau_gpuobj *unk4188b4;
+ struct nouveau_gpuobj *unk4188b8;
+
+ u8 magic_not_rop_nr;
+};
+
+struct nve0_graph_chan {
+ struct nouveau_gpuobj *grctx;
+ struct nouveau_gpuobj *unk408004; /* 0x418810 too */
+ struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
+ struct nouveau_gpuobj *unk418810; /* 0x419848 too */
+ struct nouveau_gpuobj *mmio;
+ int mmio_nr;
+};
+
+int nve0_grctx_generate(struct nouveau_channel *);
+
+/* nve0_graph.c uses this also to determine supported chipsets */
+static inline u32
+nve0_graph_class(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0xe4:
+ case 0xe7:
+ return 0xa097;
+ default:
+ return 0;
+ }
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nve0_grctx.c b/drivers/gpu/drm/nouveau/nve0_grctx.c
new file mode 100644
index 000000000000..d8cb360e92c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_grctx.c
@@ -0,0 +1,2777 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nve0_graph.h"
+
+static void
+nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
+{
+ nv_wr32(dev, 0x400204, data);
+ nv_wr32(dev, 0x400200, icmd);
+ while (nv_rd32(dev, 0x400700) & 0x00000002) {}
+}
+
+static void
+nve0_grctx_generate_icmd(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x400208, 0x80000000);
+ nv_icmd(dev, 0x001000, 0x00000004);
+ nv_icmd(dev, 0x000039, 0x00000000);
+ nv_icmd(dev, 0x00003a, 0x00000000);
+ nv_icmd(dev, 0x00003b, 0x00000000);
+ nv_icmd(dev, 0x0000a9, 0x0000ffff);
+ nv_icmd(dev, 0x000038, 0x0fac6881);
+ nv_icmd(dev, 0x00003d, 0x00000001);
+ nv_icmd(dev, 0x0000e8, 0x00000400);
+ nv_icmd(dev, 0x0000e9, 0x00000400);
+ nv_icmd(dev, 0x0000ea, 0x00000400);
+ nv_icmd(dev, 0x0000eb, 0x00000400);
+ nv_icmd(dev, 0x0000ec, 0x00000400);
+ nv_icmd(dev, 0x0000ed, 0x00000400);
+ nv_icmd(dev, 0x0000ee, 0x00000400);
+ nv_icmd(dev, 0x0000ef, 0x00000400);
+ nv_icmd(dev, 0x000078, 0x00000300);
+ nv_icmd(dev, 0x000079, 0x00000300);
+ nv_icmd(dev, 0x00007a, 0x00000300);
+ nv_icmd(dev, 0x00007b, 0x00000300);
+ nv_icmd(dev, 0x00007c, 0x00000300);
+ nv_icmd(dev, 0x00007d, 0x00000300);
+ nv_icmd(dev, 0x00007e, 0x00000300);
+ nv_icmd(dev, 0x00007f, 0x00000300);
+ nv_icmd(dev, 0x000050, 0x00000011);
+ nv_icmd(dev, 0x000058, 0x00000008);
+ nv_icmd(dev, 0x000059, 0x00000008);
+ nv_icmd(dev, 0x00005a, 0x00000008);
+ nv_icmd(dev, 0x00005b, 0x00000008);
+ nv_icmd(dev, 0x00005c, 0x00000008);
+ nv_icmd(dev, 0x00005d, 0x00000008);
+ nv_icmd(dev, 0x00005e, 0x00000008);
+ nv_icmd(dev, 0x00005f, 0x00000008);
+ nv_icmd(dev, 0x000208, 0x00000001);
+ nv_icmd(dev, 0x000209, 0x00000001);
+ nv_icmd(dev, 0x00020a, 0x00000001);
+ nv_icmd(dev, 0x00020b, 0x00000001);
+ nv_icmd(dev, 0x00020c, 0x00000001);
+ nv_icmd(dev, 0x00020d, 0x00000001);
+ nv_icmd(dev, 0x00020e, 0x00000001);
+ nv_icmd(dev, 0x00020f, 0x00000001);
+ nv_icmd(dev, 0x000081, 0x00000001);
+ nv_icmd(dev, 0x000085, 0x00000004);
+ nv_icmd(dev, 0x000088, 0x00000400);
+ nv_icmd(dev, 0x000090, 0x00000300);
+ nv_icmd(dev, 0x000098, 0x00001001);
+ nv_icmd(dev, 0x0000e3, 0x00000001);
+ nv_icmd(dev, 0x0000da, 0x00000001);
+ nv_icmd(dev, 0x0000f8, 0x00000003);
+ nv_icmd(dev, 0x0000fa, 0x00000001);
+ nv_icmd(dev, 0x00009f, 0x0000ffff);
+ nv_icmd(dev, 0x0000a0, 0x0000ffff);
+ nv_icmd(dev, 0x0000a1, 0x0000ffff);
+ nv_icmd(dev, 0x0000a2, 0x0000ffff);
+ nv_icmd(dev, 0x0000b1, 0x00000001);
+ nv_icmd(dev, 0x0000ad, 0x0000013e);
+ nv_icmd(dev, 0x0000e1, 0x00000010);
+ nv_icmd(dev, 0x000290, 0x00000000);
+ nv_icmd(dev, 0x000291, 0x00000000);
+ nv_icmd(dev, 0x000292, 0x00000000);
+ nv_icmd(dev, 0x000293, 0x00000000);
+ nv_icmd(dev, 0x000294, 0x00000000);
+ nv_icmd(dev, 0x000295, 0x00000000);
+ nv_icmd(dev, 0x000296, 0x00000000);
+ nv_icmd(dev, 0x000297, 0x00000000);
+ nv_icmd(dev, 0x000298, 0x00000000);
+ nv_icmd(dev, 0x000299, 0x00000000);
+ nv_icmd(dev, 0x00029a, 0x00000000);
+ nv_icmd(dev, 0x00029b, 0x00000000);
+ nv_icmd(dev, 0x00029c, 0x00000000);
+ nv_icmd(dev, 0x00029d, 0x00000000);
+ nv_icmd(dev, 0x00029e, 0x00000000);
+ nv_icmd(dev, 0x00029f, 0x00000000);
+ nv_icmd(dev, 0x0003b0, 0x00000000);
+ nv_icmd(dev, 0x0003b1, 0x00000000);
+ nv_icmd(dev, 0x0003b2, 0x00000000);
+ nv_icmd(dev, 0x0003b3, 0x00000000);
+ nv_icmd(dev, 0x0003b4, 0x00000000);
+ nv_icmd(dev, 0x0003b5, 0x00000000);
+ nv_icmd(dev, 0x0003b6, 0x00000000);
+ nv_icmd(dev, 0x0003b7, 0x00000000);
+ nv_icmd(dev, 0x0003b8, 0x00000000);
+ nv_icmd(dev, 0x0003b9, 0x00000000);
+ nv_icmd(dev, 0x0003ba, 0x00000000);
+ nv_icmd(dev, 0x0003bb, 0x00000000);
+ nv_icmd(dev, 0x0003bc, 0x00000000);
+ nv_icmd(dev, 0x0003bd, 0x00000000);
+ nv_icmd(dev, 0x0003be, 0x00000000);
+ nv_icmd(dev, 0x0003bf, 0x00000000);
+ nv_icmd(dev, 0x0002a0, 0x00000000);
+ nv_icmd(dev, 0x0002a1, 0x00000000);
+ nv_icmd(dev, 0x0002a2, 0x00000000);
+ nv_icmd(dev, 0x0002a3, 0x00000000);
+ nv_icmd(dev, 0x0002a4, 0x00000000);
+ nv_icmd(dev, 0x0002a5, 0x00000000);
+ nv_icmd(dev, 0x0002a6, 0x00000000);
+ nv_icmd(dev, 0x0002a7, 0x00000000);
+ nv_icmd(dev, 0x0002a8, 0x00000000);
+ nv_icmd(dev, 0x0002a9, 0x00000000);
+ nv_icmd(dev, 0x0002aa, 0x00000000);
+ nv_icmd(dev, 0x0002ab, 0x00000000);
+ nv_icmd(dev, 0x0002ac, 0x00000000);
+ nv_icmd(dev, 0x0002ad, 0x00000000);
+ nv_icmd(dev, 0x0002ae, 0x00000000);
+ nv_icmd(dev, 0x0002af, 0x00000000);
+ nv_icmd(dev, 0x000420, 0x00000000);
+ nv_icmd(dev, 0x000421, 0x00000000);
+ nv_icmd(dev, 0x000422, 0x00000000);
+ nv_icmd(dev, 0x000423, 0x00000000);
+ nv_icmd(dev, 0x000424, 0x00000000);
+ nv_icmd(dev, 0x000425, 0x00000000);
+ nv_icmd(dev, 0x000426, 0x00000000);
+ nv_icmd(dev, 0x000427, 0x00000000);
+ nv_icmd(dev, 0x000428, 0x00000000);
+ nv_icmd(dev, 0x000429, 0x00000000);
+ nv_icmd(dev, 0x00042a, 0x00000000);
+ nv_icmd(dev, 0x00042b, 0x00000000);
+ nv_icmd(dev, 0x00042c, 0x00000000);
+ nv_icmd(dev, 0x00042d, 0x00000000);
+ nv_icmd(dev, 0x00042e, 0x00000000);
+ nv_icmd(dev, 0x00042f, 0x00000000);
+ nv_icmd(dev, 0x0002b0, 0x00000000);
+ nv_icmd(dev, 0x0002b1, 0x00000000);
+ nv_icmd(dev, 0x0002b2, 0x00000000);
+ nv_icmd(dev, 0x0002b3, 0x00000000);
+ nv_icmd(dev, 0x0002b4, 0x00000000);
+ nv_icmd(dev, 0x0002b5, 0x00000000);
+ nv_icmd(dev, 0x0002b6, 0x00000000);
+ nv_icmd(dev, 0x0002b7, 0x00000000);
+ nv_icmd(dev, 0x0002b8, 0x00000000);
+ nv_icmd(dev, 0x0002b9, 0x00000000);
+ nv_icmd(dev, 0x0002ba, 0x00000000);
+ nv_icmd(dev, 0x0002bb, 0x00000000);
+ nv_icmd(dev, 0x0002bc, 0x00000000);
+ nv_icmd(dev, 0x0002bd, 0x00000000);
+ nv_icmd(dev, 0x0002be, 0x00000000);
+ nv_icmd(dev, 0x0002bf, 0x00000000);
+ nv_icmd(dev, 0x000430, 0x00000000);
+ nv_icmd(dev, 0x000431, 0x00000000);
+ nv_icmd(dev, 0x000432, 0x00000000);
+ nv_icmd(dev, 0x000433, 0x00000000);
+ nv_icmd(dev, 0x000434, 0x00000000);
+ nv_icmd(dev, 0x000435, 0x00000000);
+ nv_icmd(dev, 0x000436, 0x00000000);
+ nv_icmd(dev, 0x000437, 0x00000000);
+ nv_icmd(dev, 0x000438, 0x00000000);
+ nv_icmd(dev, 0x000439, 0x00000000);
+ nv_icmd(dev, 0x00043a, 0x00000000);
+ nv_icmd(dev, 0x00043b, 0x00000000);
+ nv_icmd(dev, 0x00043c, 0x00000000);
+ nv_icmd(dev, 0x00043d, 0x00000000);
+ nv_icmd(dev, 0x00043e, 0x00000000);
+ nv_icmd(dev, 0x00043f, 0x00000000);
+ nv_icmd(dev, 0x0002c0, 0x00000000);
+ nv_icmd(dev, 0x0002c1, 0x00000000);
+ nv_icmd(dev, 0x0002c2, 0x00000000);
+ nv_icmd(dev, 0x0002c3, 0x00000000);
+ nv_icmd(dev, 0x0002c4, 0x00000000);
+ nv_icmd(dev, 0x0002c5, 0x00000000);
+ nv_icmd(dev, 0x0002c6, 0x00000000);
+ nv_icmd(dev, 0x0002c7, 0x00000000);
+ nv_icmd(dev, 0x0002c8, 0x00000000);
+ nv_icmd(dev, 0x0002c9, 0x00000000);
+ nv_icmd(dev, 0x0002ca, 0x00000000);
+ nv_icmd(dev, 0x0002cb, 0x00000000);
+ nv_icmd(dev, 0x0002cc, 0x00000000);
+ nv_icmd(dev, 0x0002cd, 0x00000000);
+ nv_icmd(dev, 0x0002ce, 0x00000000);
+ nv_icmd(dev, 0x0002cf, 0x00000000);
+ nv_icmd(dev, 0x0004d0, 0x00000000);
+ nv_icmd(dev, 0x0004d1, 0x00000000);
+ nv_icmd(dev, 0x0004d2, 0x00000000);
+ nv_icmd(dev, 0x0004d3, 0x00000000);
+ nv_icmd(dev, 0x0004d4, 0x00000000);
+ nv_icmd(dev, 0x0004d5, 0x00000000);
+ nv_icmd(dev, 0x0004d6, 0x00000000);
+ nv_icmd(dev, 0x0004d7, 0x00000000);
+ nv_icmd(dev, 0x0004d8, 0x00000000);
+ nv_icmd(dev, 0x0004d9, 0x00000000);
+ nv_icmd(dev, 0x0004da, 0x00000000);
+ nv_icmd(dev, 0x0004db, 0x00000000);
+ nv_icmd(dev, 0x0004dc, 0x00000000);
+ nv_icmd(dev, 0x0004dd, 0x00000000);
+ nv_icmd(dev, 0x0004de, 0x00000000);
+ nv_icmd(dev, 0x0004df, 0x00000000);
+ nv_icmd(dev, 0x000720, 0x00000000);
+ nv_icmd(dev, 0x000721, 0x00000000);
+ nv_icmd(dev, 0x000722, 0x00000000);
+ nv_icmd(dev, 0x000723, 0x00000000);
+ nv_icmd(dev, 0x000724, 0x00000000);
+ nv_icmd(dev, 0x000725, 0x00000000);
+ nv_icmd(dev, 0x000726, 0x00000000);
+ nv_icmd(dev, 0x000727, 0x00000000);
+ nv_icmd(dev, 0x000728, 0x00000000);
+ nv_icmd(dev, 0x000729, 0x00000000);
+ nv_icmd(dev, 0x00072a, 0x00000000);
+ nv_icmd(dev, 0x00072b, 0x00000000);
+ nv_icmd(dev, 0x00072c, 0x00000000);
+ nv_icmd(dev, 0x00072d, 0x00000000);
+ nv_icmd(dev, 0x00072e, 0x00000000);
+ nv_icmd(dev, 0x00072f, 0x00000000);
+ nv_icmd(dev, 0x0008c0, 0x00000000);
+ nv_icmd(dev, 0x0008c1, 0x00000000);
+ nv_icmd(dev, 0x0008c2, 0x00000000);
+ nv_icmd(dev, 0x0008c3, 0x00000000);
+ nv_icmd(dev, 0x0008c4, 0x00000000);
+ nv_icmd(dev, 0x0008c5, 0x00000000);
+ nv_icmd(dev, 0x0008c6, 0x00000000);
+ nv_icmd(dev, 0x0008c7, 0x00000000);
+ nv_icmd(dev, 0x0008c8, 0x00000000);
+ nv_icmd(dev, 0x0008c9, 0x00000000);
+ nv_icmd(dev, 0x0008ca, 0x00000000);
+ nv_icmd(dev, 0x0008cb, 0x00000000);
+ nv_icmd(dev, 0x0008cc, 0x00000000);
+ nv_icmd(dev, 0x0008cd, 0x00000000);
+ nv_icmd(dev, 0x0008ce, 0x00000000);
+ nv_icmd(dev, 0x0008cf, 0x00000000);
+ nv_icmd(dev, 0x000890, 0x00000000);
+ nv_icmd(dev, 0x000891, 0x00000000);
+ nv_icmd(dev, 0x000892, 0x00000000);
+ nv_icmd(dev, 0x000893, 0x00000000);
+ nv_icmd(dev, 0x000894, 0x00000000);
+ nv_icmd(dev, 0x000895, 0x00000000);
+ nv_icmd(dev, 0x000896, 0x00000000);
+ nv_icmd(dev, 0x000897, 0x00000000);
+ nv_icmd(dev, 0x000898, 0x00000000);
+ nv_icmd(dev, 0x000899, 0x00000000);
+ nv_icmd(dev, 0x00089a, 0x00000000);
+ nv_icmd(dev, 0x00089b, 0x00000000);
+ nv_icmd(dev, 0x00089c, 0x00000000);
+ nv_icmd(dev, 0x00089d, 0x00000000);
+ nv_icmd(dev, 0x00089e, 0x00000000);
+ nv_icmd(dev, 0x00089f, 0x00000000);
+ nv_icmd(dev, 0x0008e0, 0x00000000);
+ nv_icmd(dev, 0x0008e1, 0x00000000);
+ nv_icmd(dev, 0x0008e2, 0x00000000);
+ nv_icmd(dev, 0x0008e3, 0x00000000);
+ nv_icmd(dev, 0x0008e4, 0x00000000);
+ nv_icmd(dev, 0x0008e5, 0x00000000);
+ nv_icmd(dev, 0x0008e6, 0x00000000);
+ nv_icmd(dev, 0x0008e7, 0x00000000);
+ nv_icmd(dev, 0x0008e8, 0x00000000);
+ nv_icmd(dev, 0x0008e9, 0x00000000);
+ nv_icmd(dev, 0x0008ea, 0x00000000);
+ nv_icmd(dev, 0x0008eb, 0x00000000);
+ nv_icmd(dev, 0x0008ec, 0x00000000);
+ nv_icmd(dev, 0x0008ed, 0x00000000);
+ nv_icmd(dev, 0x0008ee, 0x00000000);
+ nv_icmd(dev, 0x0008ef, 0x00000000);
+ nv_icmd(dev, 0x0008a0, 0x00000000);
+ nv_icmd(dev, 0x0008a1, 0x00000000);
+ nv_icmd(dev, 0x0008a2, 0x00000000);
+ nv_icmd(dev, 0x0008a3, 0x00000000);
+ nv_icmd(dev, 0x0008a4, 0x00000000);
+ nv_icmd(dev, 0x0008a5, 0x00000000);
+ nv_icmd(dev, 0x0008a6, 0x00000000);
+ nv_icmd(dev, 0x0008a7, 0x00000000);
+ nv_icmd(dev, 0x0008a8, 0x00000000);
+ nv_icmd(dev, 0x0008a9, 0x00000000);
+ nv_icmd(dev, 0x0008aa, 0x00000000);
+ nv_icmd(dev, 0x0008ab, 0x00000000);
+ nv_icmd(dev, 0x0008ac, 0x00000000);
+ nv_icmd(dev, 0x0008ad, 0x00000000);
+ nv_icmd(dev, 0x0008ae, 0x00000000);
+ nv_icmd(dev, 0x0008af, 0x00000000);
+ nv_icmd(dev, 0x0008f0, 0x00000000);
+ nv_icmd(dev, 0x0008f1, 0x00000000);
+ nv_icmd(dev, 0x0008f2, 0x00000000);
+ nv_icmd(dev, 0x0008f3, 0x00000000);
+ nv_icmd(dev, 0x0008f4, 0x00000000);
+ nv_icmd(dev, 0x0008f5, 0x00000000);
+ nv_icmd(dev, 0x0008f6, 0x00000000);
+ nv_icmd(dev, 0x0008f7, 0x00000000);
+ nv_icmd(dev, 0x0008f8, 0x00000000);
+ nv_icmd(dev, 0x0008f9, 0x00000000);
+ nv_icmd(dev, 0x0008fa, 0x00000000);
+ nv_icmd(dev, 0x0008fb, 0x00000000);
+ nv_icmd(dev, 0x0008fc, 0x00000000);
+ nv_icmd(dev, 0x0008fd, 0x00000000);
+ nv_icmd(dev, 0x0008fe, 0x00000000);
+ nv_icmd(dev, 0x0008ff, 0x00000000);
+ nv_icmd(dev, 0x00094c, 0x000000ff);
+ nv_icmd(dev, 0x00094d, 0xffffffff);
+ nv_icmd(dev, 0x00094e, 0x00000002);
+ nv_icmd(dev, 0x0002ec, 0x00000001);
+ nv_icmd(dev, 0x000303, 0x00000001);
+ nv_icmd(dev, 0x0002e6, 0x00000001);
+ nv_icmd(dev, 0x000466, 0x00000052);
+ nv_icmd(dev, 0x000301, 0x3f800000);
+ nv_icmd(dev, 0x000304, 0x30201000);
+ nv_icmd(dev, 0x000305, 0x70605040);
+ nv_icmd(dev, 0x000306, 0xb8a89888);
+ nv_icmd(dev, 0x000307, 0xf8e8d8c8);
+ nv_icmd(dev, 0x00030a, 0x00ffff00);
+ nv_icmd(dev, 0x00030b, 0x0000001a);
+ nv_icmd(dev, 0x00030c, 0x00000001);
+ nv_icmd(dev, 0x000318, 0x00000001);
+ nv_icmd(dev, 0x000340, 0x00000000);
+ nv_icmd(dev, 0x000375, 0x00000001);
+ nv_icmd(dev, 0x00037d, 0x00000006);
+ nv_icmd(dev, 0x0003a0, 0x00000002);
+ nv_icmd(dev, 0x0003aa, 0x00000001);
+ nv_icmd(dev, 0x0003a9, 0x00000001);
+ nv_icmd(dev, 0x000380, 0x00000001);
+ nv_icmd(dev, 0x000383, 0x00000011);
+ nv_icmd(dev, 0x000360, 0x00000040);
+ nv_icmd(dev, 0x000366, 0x00000000);
+ nv_icmd(dev, 0x000367, 0x00000000);
+ nv_icmd(dev, 0x000368, 0x00000fff);
+ nv_icmd(dev, 0x000370, 0x00000000);
+ nv_icmd(dev, 0x000371, 0x00000000);
+ nv_icmd(dev, 0x000372, 0x000fffff);
+ nv_icmd(dev, 0x00037a, 0x00000012);
+ nv_icmd(dev, 0x000619, 0x00000003);
+ nv_icmd(dev, 0x000811, 0x00000003);
+ nv_icmd(dev, 0x000812, 0x00000004);
+ nv_icmd(dev, 0x000813, 0x00000006);
+ nv_icmd(dev, 0x000814, 0x00000008);
+ nv_icmd(dev, 0x000815, 0x0000000b);
+ nv_icmd(dev, 0x000800, 0x00000001);
+ nv_icmd(dev, 0x000801, 0x00000001);
+ nv_icmd(dev, 0x000802, 0x00000001);
+ nv_icmd(dev, 0x000803, 0x00000001);
+ nv_icmd(dev, 0x000804, 0x00000001);
+ nv_icmd(dev, 0x000805, 0x00000001);
+ nv_icmd(dev, 0x000632, 0x00000001);
+ nv_icmd(dev, 0x000633, 0x00000002);
+ nv_icmd(dev, 0x000634, 0x00000003);
+ nv_icmd(dev, 0x000635, 0x00000004);
+ nv_icmd(dev, 0x000654, 0x3f800000);
+ nv_icmd(dev, 0x000657, 0x3f800000);
+ nv_icmd(dev, 0x000655, 0x3f800000);
+ nv_icmd(dev, 0x000656, 0x3f800000);
+ nv_icmd(dev, 0x0006cd, 0x3f800000);
+ nv_icmd(dev, 0x0007f5, 0x3f800000);
+ nv_icmd(dev, 0x0007dc, 0x39291909);
+ nv_icmd(dev, 0x0007dd, 0x79695949);
+ nv_icmd(dev, 0x0007de, 0xb9a99989);
+ nv_icmd(dev, 0x0007df, 0xf9e9d9c9);
+ nv_icmd(dev, 0x0007e8, 0x00003210);
+ nv_icmd(dev, 0x0007e9, 0x00007654);
+ nv_icmd(dev, 0x0007ea, 0x00000098);
+ nv_icmd(dev, 0x0007ec, 0x39291909);
+ nv_icmd(dev, 0x0007ed, 0x79695949);
+ nv_icmd(dev, 0x0007ee, 0xb9a99989);
+ nv_icmd(dev, 0x0007ef, 0xf9e9d9c9);
+ nv_icmd(dev, 0x0007f0, 0x00003210);
+ nv_icmd(dev, 0x0007f1, 0x00007654);
+ nv_icmd(dev, 0x0007f2, 0x00000098);
+ nv_icmd(dev, 0x0005a5, 0x00000001);
+ nv_icmd(dev, 0x000980, 0x00000000);
+ nv_icmd(dev, 0x000981, 0x00000000);
+ nv_icmd(dev, 0x000982, 0x00000000);
+ nv_icmd(dev, 0x000983, 0x00000000);
+ nv_icmd(dev, 0x000984, 0x00000000);
+ nv_icmd(dev, 0x000985, 0x00000000);
+ nv_icmd(dev, 0x000986, 0x00000000);
+ nv_icmd(dev, 0x000987, 0x00000000);
+ nv_icmd(dev, 0x000988, 0x00000000);
+ nv_icmd(dev, 0x000989, 0x00000000);
+ nv_icmd(dev, 0x00098a, 0x00000000);
+ nv_icmd(dev, 0x00098b, 0x00000000);
+ nv_icmd(dev, 0x00098c, 0x00000000);
+ nv_icmd(dev, 0x00098d, 0x00000000);
+ nv_icmd(dev, 0x00098e, 0x00000000);
+ nv_icmd(dev, 0x00098f, 0x00000000);
+ nv_icmd(dev, 0x000990, 0x00000000);
+ nv_icmd(dev, 0x000991, 0x00000000);
+ nv_icmd(dev, 0x000992, 0x00000000);
+ nv_icmd(dev, 0x000993, 0x00000000);
+ nv_icmd(dev, 0x000994, 0x00000000);
+ nv_icmd(dev, 0x000995, 0x00000000);
+ nv_icmd(dev, 0x000996, 0x00000000);
+ nv_icmd(dev, 0x000997, 0x00000000);
+ nv_icmd(dev, 0x000998, 0x00000000);
+ nv_icmd(dev, 0x000999, 0x00000000);
+ nv_icmd(dev, 0x00099a, 0x00000000);
+ nv_icmd(dev, 0x00099b, 0x00000000);
+ nv_icmd(dev, 0x00099c, 0x00000000);
+ nv_icmd(dev, 0x00099d, 0x00000000);
+ nv_icmd(dev, 0x00099e, 0x00000000);
+ nv_icmd(dev, 0x00099f, 0x00000000);
+ nv_icmd(dev, 0x0009a0, 0x00000000);
+ nv_icmd(dev, 0x0009a1, 0x00000000);
+ nv_icmd(dev, 0x0009a2, 0x00000000);
+ nv_icmd(dev, 0x0009a3, 0x00000000);
+ nv_icmd(dev, 0x0009a4, 0x00000000);
+ nv_icmd(dev, 0x0009a5, 0x00000000);
+ nv_icmd(dev, 0x0009a6, 0x00000000);
+ nv_icmd(dev, 0x0009a7, 0x00000000);
+ nv_icmd(dev, 0x0009a8, 0x00000000);
+ nv_icmd(dev, 0x0009a9, 0x00000000);
+ nv_icmd(dev, 0x0009aa, 0x00000000);
+ nv_icmd(dev, 0x0009ab, 0x00000000);
+ nv_icmd(dev, 0x0009ac, 0x00000000);
+ nv_icmd(dev, 0x0009ad, 0x00000000);
+ nv_icmd(dev, 0x0009ae, 0x00000000);
+ nv_icmd(dev, 0x0009af, 0x00000000);
+ nv_icmd(dev, 0x0009b0, 0x00000000);
+ nv_icmd(dev, 0x0009b1, 0x00000000);
+ nv_icmd(dev, 0x0009b2, 0x00000000);
+ nv_icmd(dev, 0x0009b3, 0x00000000);
+ nv_icmd(dev, 0x0009b4, 0x00000000);
+ nv_icmd(dev, 0x0009b5, 0x00000000);
+ nv_icmd(dev, 0x0009b6, 0x00000000);
+ nv_icmd(dev, 0x0009b7, 0x00000000);
+ nv_icmd(dev, 0x0009b8, 0x00000000);
+ nv_icmd(dev, 0x0009b9, 0x00000000);
+ nv_icmd(dev, 0x0009ba, 0x00000000);
+ nv_icmd(dev, 0x0009bb, 0x00000000);
+ nv_icmd(dev, 0x0009bc, 0x00000000);
+ nv_icmd(dev, 0x0009bd, 0x00000000);
+ nv_icmd(dev, 0x0009be, 0x00000000);
+ nv_icmd(dev, 0x0009bf, 0x00000000);
+ nv_icmd(dev, 0x0009c0, 0x00000000);
+ nv_icmd(dev, 0x0009c1, 0x00000000);
+ nv_icmd(dev, 0x0009c2, 0x00000000);
+ nv_icmd(dev, 0x0009c3, 0x00000000);
+ nv_icmd(dev, 0x0009c4, 0x00000000);
+ nv_icmd(dev, 0x0009c5, 0x00000000);
+ nv_icmd(dev, 0x0009c6, 0x00000000);
+ nv_icmd(dev, 0x0009c7, 0x00000000);
+ nv_icmd(dev, 0x0009c8, 0x00000000);
+ nv_icmd(dev, 0x0009c9, 0x00000000);
+ nv_icmd(dev, 0x0009ca, 0x00000000);
+ nv_icmd(dev, 0x0009cb, 0x00000000);
+ nv_icmd(dev, 0x0009cc, 0x00000000);
+ nv_icmd(dev, 0x0009cd, 0x00000000);
+ nv_icmd(dev, 0x0009ce, 0x00000000);
+ nv_icmd(dev, 0x0009cf, 0x00000000);
+ nv_icmd(dev, 0x0009d0, 0x00000000);
+ nv_icmd(dev, 0x0009d1, 0x00000000);
+ nv_icmd(dev, 0x0009d2, 0x00000000);
+ nv_icmd(dev, 0x0009d3, 0x00000000);
+ nv_icmd(dev, 0x0009d4, 0x00000000);
+ nv_icmd(dev, 0x0009d5, 0x00000000);
+ nv_icmd(dev, 0x0009d6, 0x00000000);
+ nv_icmd(dev, 0x0009d7, 0x00000000);
+ nv_icmd(dev, 0x0009d8, 0x00000000);
+ nv_icmd(dev, 0x0009d9, 0x00000000);
+ nv_icmd(dev, 0x0009da, 0x00000000);
+ nv_icmd(dev, 0x0009db, 0x00000000);
+ nv_icmd(dev, 0x0009dc, 0x00000000);
+ nv_icmd(dev, 0x0009dd, 0x00000000);
+ nv_icmd(dev, 0x0009de, 0x00000000);
+ nv_icmd(dev, 0x0009df, 0x00000000);
+ nv_icmd(dev, 0x0009e0, 0x00000000);
+ nv_icmd(dev, 0x0009e1, 0x00000000);
+ nv_icmd(dev, 0x0009e2, 0x00000000);
+ nv_icmd(dev, 0x0009e3, 0x00000000);
+ nv_icmd(dev, 0x0009e4, 0x00000000);
+ nv_icmd(dev, 0x0009e5, 0x00000000);
+ nv_icmd(dev, 0x0009e6, 0x00000000);
+ nv_icmd(dev, 0x0009e7, 0x00000000);
+ nv_icmd(dev, 0x0009e8, 0x00000000);
+ nv_icmd(dev, 0x0009e9, 0x00000000);
+ nv_icmd(dev, 0x0009ea, 0x00000000);
+ nv_icmd(dev, 0x0009eb, 0x00000000);
+ nv_icmd(dev, 0x0009ec, 0x00000000);
+ nv_icmd(dev, 0x0009ed, 0x00000000);
+ nv_icmd(dev, 0x0009ee, 0x00000000);
+ nv_icmd(dev, 0x0009ef, 0x00000000);
+ nv_icmd(dev, 0x0009f0, 0x00000000);
+ nv_icmd(dev, 0x0009f1, 0x00000000);
+ nv_icmd(dev, 0x0009f2, 0x00000000);
+ nv_icmd(dev, 0x0009f3, 0x00000000);
+ nv_icmd(dev, 0x0009f4, 0x00000000);
+ nv_icmd(dev, 0x0009f5, 0x00000000);
+ nv_icmd(dev, 0x0009f6, 0x00000000);
+ nv_icmd(dev, 0x0009f7, 0x00000000);
+ nv_icmd(dev, 0x0009f8, 0x00000000);
+ nv_icmd(dev, 0x0009f9, 0x00000000);
+ nv_icmd(dev, 0x0009fa, 0x00000000);
+ nv_icmd(dev, 0x0009fb, 0x00000000);
+ nv_icmd(dev, 0x0009fc, 0x00000000);
+ nv_icmd(dev, 0x0009fd, 0x00000000);
+ nv_icmd(dev, 0x0009fe, 0x00000000);
+ nv_icmd(dev, 0x0009ff, 0x00000000);
+ nv_icmd(dev, 0x000468, 0x00000004);
+ nv_icmd(dev, 0x00046c, 0x00000001);
+ nv_icmd(dev, 0x000470, 0x00000000);
+ nv_icmd(dev, 0x000471, 0x00000000);
+ nv_icmd(dev, 0x000472, 0x00000000);
+ nv_icmd(dev, 0x000473, 0x00000000);
+ nv_icmd(dev, 0x000474, 0x00000000);
+ nv_icmd(dev, 0x000475, 0x00000000);
+ nv_icmd(dev, 0x000476, 0x00000000);
+ nv_icmd(dev, 0x000477, 0x00000000);
+ nv_icmd(dev, 0x000478, 0x00000000);
+ nv_icmd(dev, 0x000479, 0x00000000);
+ nv_icmd(dev, 0x00047a, 0x00000000);
+ nv_icmd(dev, 0x00047b, 0x00000000);
+ nv_icmd(dev, 0x00047c, 0x00000000);
+ nv_icmd(dev, 0x00047d, 0x00000000);
+ nv_icmd(dev, 0x00047e, 0x00000000);
+ nv_icmd(dev, 0x00047f, 0x00000000);
+ nv_icmd(dev, 0x000480, 0x00000000);
+ nv_icmd(dev, 0x000481, 0x00000000);
+ nv_icmd(dev, 0x000482, 0x00000000);
+ nv_icmd(dev, 0x000483, 0x00000000);
+ nv_icmd(dev, 0x000484, 0x00000000);
+ nv_icmd(dev, 0x000485, 0x00000000);
+ nv_icmd(dev, 0x000486, 0x00000000);
+ nv_icmd(dev, 0x000487, 0x00000000);
+ nv_icmd(dev, 0x000488, 0x00000000);
+ nv_icmd(dev, 0x000489, 0x00000000);
+ nv_icmd(dev, 0x00048a, 0x00000000);
+ nv_icmd(dev, 0x00048b, 0x00000000);
+ nv_icmd(dev, 0x00048c, 0x00000000);
+ nv_icmd(dev, 0x00048d, 0x00000000);
+ nv_icmd(dev, 0x00048e, 0x00000000);
+ nv_icmd(dev, 0x00048f, 0x00000000);
+ nv_icmd(dev, 0x000490, 0x00000000);
+ nv_icmd(dev, 0x000491, 0x00000000);
+ nv_icmd(dev, 0x000492, 0x00000000);
+ nv_icmd(dev, 0x000493, 0x00000000);
+ nv_icmd(dev, 0x000494, 0x00000000);
+ nv_icmd(dev, 0x000495, 0x00000000);
+ nv_icmd(dev, 0x000496, 0x00000000);
+ nv_icmd(dev, 0x000497, 0x00000000);
+ nv_icmd(dev, 0x000498, 0x00000000);
+ nv_icmd(dev, 0x000499, 0x00000000);
+ nv_icmd(dev, 0x00049a, 0x00000000);
+ nv_icmd(dev, 0x00049b, 0x00000000);
+ nv_icmd(dev, 0x00049c, 0x00000000);
+ nv_icmd(dev, 0x00049d, 0x00000000);
+ nv_icmd(dev, 0x00049e, 0x00000000);
+ nv_icmd(dev, 0x00049f, 0x00000000);
+ nv_icmd(dev, 0x0004a0, 0x00000000);
+ nv_icmd(dev, 0x0004a1, 0x00000000);
+ nv_icmd(dev, 0x0004a2, 0x00000000);
+ nv_icmd(dev, 0x0004a3, 0x00000000);
+ nv_icmd(dev, 0x0004a4, 0x00000000);
+ nv_icmd(dev, 0x0004a5, 0x00000000);
+ nv_icmd(dev, 0x0004a6, 0x00000000);
+ nv_icmd(dev, 0x0004a7, 0x00000000);
+ nv_icmd(dev, 0x0004a8, 0x00000000);
+ nv_icmd(dev, 0x0004a9, 0x00000000);
+ nv_icmd(dev, 0x0004aa, 0x00000000);
+ nv_icmd(dev, 0x0004ab, 0x00000000);
+ nv_icmd(dev, 0x0004ac, 0x00000000);
+ nv_icmd(dev, 0x0004ad, 0x00000000);
+ nv_icmd(dev, 0x0004ae, 0x00000000);
+ nv_icmd(dev, 0x0004af, 0x00000000);
+ nv_icmd(dev, 0x0004b0, 0x00000000);
+ nv_icmd(dev, 0x0004b1, 0x00000000);
+ nv_icmd(dev, 0x0004b2, 0x00000000);
+ nv_icmd(dev, 0x0004b3, 0x00000000);
+ nv_icmd(dev, 0x0004b4, 0x00000000);
+ nv_icmd(dev, 0x0004b5, 0x00000000);
+ nv_icmd(dev, 0x0004b6, 0x00000000);
+ nv_icmd(dev, 0x0004b7, 0x00000000);
+ nv_icmd(dev, 0x0004b8, 0x00000000);
+ nv_icmd(dev, 0x0004b9, 0x00000000);
+ nv_icmd(dev, 0x0004ba, 0x00000000);
+ nv_icmd(dev, 0x0004bb, 0x00000000);
+ nv_icmd(dev, 0x0004bc, 0x00000000);
+ nv_icmd(dev, 0x0004bd, 0x00000000);
+ nv_icmd(dev, 0x0004be, 0x00000000);
+ nv_icmd(dev, 0x0004bf, 0x00000000);
+ nv_icmd(dev, 0x0004c0, 0x00000000);
+ nv_icmd(dev, 0x0004c1, 0x00000000);
+ nv_icmd(dev, 0x0004c2, 0x00000000);
+ nv_icmd(dev, 0x0004c3, 0x00000000);
+ nv_icmd(dev, 0x0004c4, 0x00000000);
+ nv_icmd(dev, 0x0004c5, 0x00000000);
+ nv_icmd(dev, 0x0004c6, 0x00000000);
+ nv_icmd(dev, 0x0004c7, 0x00000000);
+ nv_icmd(dev, 0x0004c8, 0x00000000);
+ nv_icmd(dev, 0x0004c9, 0x00000000);
+ nv_icmd(dev, 0x0004ca, 0x00000000);
+ nv_icmd(dev, 0x0004cb, 0x00000000);
+ nv_icmd(dev, 0x0004cc, 0x00000000);
+ nv_icmd(dev, 0x0004cd, 0x00000000);
+ nv_icmd(dev, 0x0004ce, 0x00000000);
+ nv_icmd(dev, 0x0004cf, 0x00000000);
+ nv_icmd(dev, 0x000510, 0x3f800000);
+ nv_icmd(dev, 0x000511, 0x3f800000);
+ nv_icmd(dev, 0x000512, 0x3f800000);
+ nv_icmd(dev, 0x000513, 0x3f800000);
+ nv_icmd(dev, 0x000514, 0x3f800000);
+ nv_icmd(dev, 0x000515, 0x3f800000);
+ nv_icmd(dev, 0x000516, 0x3f800000);
+ nv_icmd(dev, 0x000517, 0x3f800000);
+ nv_icmd(dev, 0x000518, 0x3f800000);
+ nv_icmd(dev, 0x000519, 0x3f800000);
+ nv_icmd(dev, 0x00051a, 0x3f800000);
+ nv_icmd(dev, 0x00051b, 0x3f800000);
+ nv_icmd(dev, 0x00051c, 0x3f800000);
+ nv_icmd(dev, 0x00051d, 0x3f800000);
+ nv_icmd(dev, 0x00051e, 0x3f800000);
+ nv_icmd(dev, 0x00051f, 0x3f800000);
+ nv_icmd(dev, 0x000520, 0x000002b6);
+ nv_icmd(dev, 0x000529, 0x00000001);
+ nv_icmd(dev, 0x000530, 0xffff0000);
+ nv_icmd(dev, 0x000531, 0xffff0000);
+ nv_icmd(dev, 0x000532, 0xffff0000);
+ nv_icmd(dev, 0x000533, 0xffff0000);
+ nv_icmd(dev, 0x000534, 0xffff0000);
+ nv_icmd(dev, 0x000535, 0xffff0000);
+ nv_icmd(dev, 0x000536, 0xffff0000);
+ nv_icmd(dev, 0x000537, 0xffff0000);
+ nv_icmd(dev, 0x000538, 0xffff0000);
+ nv_icmd(dev, 0x000539, 0xffff0000);
+ nv_icmd(dev, 0x00053a, 0xffff0000);
+ nv_icmd(dev, 0x00053b, 0xffff0000);
+ nv_icmd(dev, 0x00053c, 0xffff0000);
+ nv_icmd(dev, 0x00053d, 0xffff0000);
+ nv_icmd(dev, 0x00053e, 0xffff0000);
+ nv_icmd(dev, 0x00053f, 0xffff0000);
+ nv_icmd(dev, 0x000585, 0x0000003f);
+ nv_icmd(dev, 0x000576, 0x00000003);
+ nv_icmd(dev, 0x00057b, 0x00000059);
+ nv_icmd(dev, 0x000586, 0x00000040);
+ nv_icmd(dev, 0x000582, 0x00000080);
+ nv_icmd(dev, 0x000583, 0x00000080);
+ nv_icmd(dev, 0x0005c2, 0x00000001);
+ nv_icmd(dev, 0x000638, 0x00000001);
+ nv_icmd(dev, 0x000639, 0x00000001);
+ nv_icmd(dev, 0x00063a, 0x00000002);
+ nv_icmd(dev, 0x00063b, 0x00000001);
+ nv_icmd(dev, 0x00063c, 0x00000001);
+ nv_icmd(dev, 0x00063d, 0x00000002);
+ nv_icmd(dev, 0x00063e, 0x00000001);
+ nv_icmd(dev, 0x0008b8, 0x00000001);
+ nv_icmd(dev, 0x0008b9, 0x00000001);
+ nv_icmd(dev, 0x0008ba, 0x00000001);
+ nv_icmd(dev, 0x0008bb, 0x00000001);
+ nv_icmd(dev, 0x0008bc, 0x00000001);
+ nv_icmd(dev, 0x0008bd, 0x00000001);
+ nv_icmd(dev, 0x0008be, 0x00000001);
+ nv_icmd(dev, 0x0008bf, 0x00000001);
+ nv_icmd(dev, 0x000900, 0x00000001);
+ nv_icmd(dev, 0x000901, 0x00000001);
+ nv_icmd(dev, 0x000902, 0x00000001);
+ nv_icmd(dev, 0x000903, 0x00000001);
+ nv_icmd(dev, 0x000904, 0x00000001);
+ nv_icmd(dev, 0x000905, 0x00000001);
+ nv_icmd(dev, 0x000906, 0x00000001);
+ nv_icmd(dev, 0x000907, 0x00000001);
+ nv_icmd(dev, 0x000908, 0x00000002);
+ nv_icmd(dev, 0x000909, 0x00000002);
+ nv_icmd(dev, 0x00090a, 0x00000002);
+ nv_icmd(dev, 0x00090b, 0x00000002);
+ nv_icmd(dev, 0x00090c, 0x00000002);
+ nv_icmd(dev, 0x00090d, 0x00000002);
+ nv_icmd(dev, 0x00090e, 0x00000002);
+ nv_icmd(dev, 0x00090f, 0x00000002);
+ nv_icmd(dev, 0x000910, 0x00000001);
+ nv_icmd(dev, 0x000911, 0x00000001);
+ nv_icmd(dev, 0x000912, 0x00000001);
+ nv_icmd(dev, 0x000913, 0x00000001);
+ nv_icmd(dev, 0x000914, 0x00000001);
+ nv_icmd(dev, 0x000915, 0x00000001);
+ nv_icmd(dev, 0x000916, 0x00000001);
+ nv_icmd(dev, 0x000917, 0x00000001);
+ nv_icmd(dev, 0x000918, 0x00000001);
+ nv_icmd(dev, 0x000919, 0x00000001);
+ nv_icmd(dev, 0x00091a, 0x00000001);
+ nv_icmd(dev, 0x00091b, 0x00000001);
+ nv_icmd(dev, 0x00091c, 0x00000001);
+ nv_icmd(dev, 0x00091d, 0x00000001);
+ nv_icmd(dev, 0x00091e, 0x00000001);
+ nv_icmd(dev, 0x00091f, 0x00000001);
+ nv_icmd(dev, 0x000920, 0x00000002);
+ nv_icmd(dev, 0x000921, 0x00000002);
+ nv_icmd(dev, 0x000922, 0x00000002);
+ nv_icmd(dev, 0x000923, 0x00000002);
+ nv_icmd(dev, 0x000924, 0x00000002);
+ nv_icmd(dev, 0x000925, 0x00000002);
+ nv_icmd(dev, 0x000926, 0x00000002);
+ nv_icmd(dev, 0x000927, 0x00000002);
+ nv_icmd(dev, 0x000928, 0x00000001);
+ nv_icmd(dev, 0x000929, 0x00000001);
+ nv_icmd(dev, 0x00092a, 0x00000001);
+ nv_icmd(dev, 0x00092b, 0x00000001);
+ nv_icmd(dev, 0x00092c, 0x00000001);
+ nv_icmd(dev, 0x00092d, 0x00000001);
+ nv_icmd(dev, 0x00092e, 0x00000001);
+ nv_icmd(dev, 0x00092f, 0x00000001);
+ nv_icmd(dev, 0x000648, 0x00000001);
+ nv_icmd(dev, 0x000649, 0x00000001);
+ nv_icmd(dev, 0x00064a, 0x00000001);
+ nv_icmd(dev, 0x00064b, 0x00000001);
+ nv_icmd(dev, 0x00064c, 0x00000001);
+ nv_icmd(dev, 0x00064d, 0x00000001);
+ nv_icmd(dev, 0x00064e, 0x00000001);
+ nv_icmd(dev, 0x00064f, 0x00000001);
+ nv_icmd(dev, 0x000650, 0x00000001);
+ nv_icmd(dev, 0x000658, 0x0000000f);
+ nv_icmd(dev, 0x0007ff, 0x0000000a);
+ nv_icmd(dev, 0x00066a, 0x40000000);
+ nv_icmd(dev, 0x00066b, 0x10000000);
+ nv_icmd(dev, 0x00066c, 0xffff0000);
+ nv_icmd(dev, 0x00066d, 0xffff0000);
+ nv_icmd(dev, 0x0007af, 0x00000008);
+ nv_icmd(dev, 0x0007b0, 0x00000008);
+ nv_icmd(dev, 0x0007f6, 0x00000001);
+ nv_icmd(dev, 0x0006b2, 0x00000055);
+ nv_icmd(dev, 0x0007ad, 0x00000003);
+ nv_icmd(dev, 0x000937, 0x00000001);
+ nv_icmd(dev, 0x000971, 0x00000008);
+ nv_icmd(dev, 0x000972, 0x00000040);
+ nv_icmd(dev, 0x000973, 0x0000012c);
+ nv_icmd(dev, 0x00097c, 0x00000040);
+ nv_icmd(dev, 0x000979, 0x00000003);
+ nv_icmd(dev, 0x000975, 0x00000020);
+ nv_icmd(dev, 0x000976, 0x00000001);
+ nv_icmd(dev, 0x000977, 0x00000020);
+ nv_icmd(dev, 0x000978, 0x00000001);
+ nv_icmd(dev, 0x000957, 0x00000003);
+ nv_icmd(dev, 0x00095e, 0x20164010);
+ nv_icmd(dev, 0x00095f, 0x00000020);
+ nv_icmd(dev, 0x00097d, 0x00000020);
+ nv_icmd(dev, 0x000683, 0x00000006);
+ nv_icmd(dev, 0x000685, 0x003fffff);
+ nv_icmd(dev, 0x000687, 0x003fffff);
+ nv_icmd(dev, 0x0006a0, 0x00000005);
+ nv_icmd(dev, 0x000840, 0x00400008);
+ nv_icmd(dev, 0x000841, 0x08000080);
+ nv_icmd(dev, 0x000842, 0x00400008);
+ nv_icmd(dev, 0x000843, 0x08000080);
+ nv_icmd(dev, 0x000818, 0x00000000);
+ nv_icmd(dev, 0x000819, 0x00000000);
+ nv_icmd(dev, 0x00081a, 0x00000000);
+ nv_icmd(dev, 0x00081b, 0x00000000);
+ nv_icmd(dev, 0x00081c, 0x00000000);
+ nv_icmd(dev, 0x00081d, 0x00000000);
+ nv_icmd(dev, 0x00081e, 0x00000000);
+ nv_icmd(dev, 0x00081f, 0x00000000);
+ nv_icmd(dev, 0x000848, 0x00000000);
+ nv_icmd(dev, 0x000849, 0x00000000);
+ nv_icmd(dev, 0x00084a, 0x00000000);
+ nv_icmd(dev, 0x00084b, 0x00000000);
+ nv_icmd(dev, 0x00084c, 0x00000000);
+ nv_icmd(dev, 0x00084d, 0x00000000);
+ nv_icmd(dev, 0x00084e, 0x00000000);
+ nv_icmd(dev, 0x00084f, 0x00000000);
+ nv_icmd(dev, 0x000850, 0x00000000);
+ nv_icmd(dev, 0x000851, 0x00000000);
+ nv_icmd(dev, 0x000852, 0x00000000);
+ nv_icmd(dev, 0x000853, 0x00000000);
+ nv_icmd(dev, 0x000854, 0x00000000);
+ nv_icmd(dev, 0x000855, 0x00000000);
+ nv_icmd(dev, 0x000856, 0x00000000);
+ nv_icmd(dev, 0x000857, 0x00000000);
+ nv_icmd(dev, 0x000738, 0x00000000);
+ nv_icmd(dev, 0x0006aa, 0x00000001);
+ nv_icmd(dev, 0x0006ab, 0x00000002);
+ nv_icmd(dev, 0x0006ac, 0x00000080);
+ nv_icmd(dev, 0x0006ad, 0x00000100);
+ nv_icmd(dev, 0x0006ae, 0x00000100);
+ nv_icmd(dev, 0x0006b1, 0x00000011);
+ nv_icmd(dev, 0x0006bb, 0x000000cf);
+ nv_icmd(dev, 0x0006ce, 0x2a712488);
+ nv_icmd(dev, 0x000739, 0x4085c000);
+ nv_icmd(dev, 0x00073a, 0x00000080);
+ nv_icmd(dev, 0x000786, 0x80000100);
+ nv_icmd(dev, 0x00073c, 0x00010100);
+ nv_icmd(dev, 0x00073d, 0x02800000);
+ nv_icmd(dev, 0x000787, 0x000000cf);
+ nv_icmd(dev, 0x00078c, 0x00000008);
+ nv_icmd(dev, 0x000792, 0x00000001);
+ nv_icmd(dev, 0x000794, 0x00000001);
+ nv_icmd(dev, 0x000795, 0x00000001);
+ nv_icmd(dev, 0x000796, 0x00000001);
+ nv_icmd(dev, 0x000797, 0x000000cf);
+ nv_icmd(dev, 0x000836, 0x00000001);
+ nv_icmd(dev, 0x00079a, 0x00000002);
+ nv_icmd(dev, 0x000833, 0x04444480);
+ nv_icmd(dev, 0x0007a1, 0x00000001);
+ nv_icmd(dev, 0x0007a3, 0x00000001);
+ nv_icmd(dev, 0x0007a4, 0x00000001);
+ nv_icmd(dev, 0x0007a5, 0x00000001);
+ nv_icmd(dev, 0x000831, 0x00000004);
+ nv_icmd(dev, 0x000b07, 0x00000002);
+ nv_icmd(dev, 0x000b08, 0x00000100);
+ nv_icmd(dev, 0x000b09, 0x00000100);
+ nv_icmd(dev, 0x000b0a, 0x00000001);
+ nv_icmd(dev, 0x000a04, 0x000000ff);
+ nv_icmd(dev, 0x000a0b, 0x00000040);
+ nv_icmd(dev, 0x00097f, 0x00000100);
+ nv_icmd(dev, 0x000a02, 0x00000001);
+ nv_icmd(dev, 0x000809, 0x00000007);
+ nv_icmd(dev, 0x00c221, 0x00000040);
+ nv_icmd(dev, 0x00c1b0, 0x0000000f);
+ nv_icmd(dev, 0x00c1b1, 0x0000000f);
+ nv_icmd(dev, 0x00c1b2, 0x0000000f);
+ nv_icmd(dev, 0x00c1b3, 0x0000000f);
+ nv_icmd(dev, 0x00c1b4, 0x0000000f);
+ nv_icmd(dev, 0x00c1b5, 0x0000000f);
+ nv_icmd(dev, 0x00c1b6, 0x0000000f);
+ nv_icmd(dev, 0x00c1b7, 0x0000000f);
+ nv_icmd(dev, 0x00c1b8, 0x0fac6881);
+ nv_icmd(dev, 0x00c1b9, 0x00fac688);
+ nv_icmd(dev, 0x00c401, 0x00000001);
+ nv_icmd(dev, 0x00c402, 0x00010001);
+ nv_icmd(dev, 0x00c403, 0x00000001);
+ nv_icmd(dev, 0x00c404, 0x00000001);
+ nv_icmd(dev, 0x00c40e, 0x00000020);
+ nv_icmd(dev, 0x00c500, 0x00000003);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_icmd(dev, 0x001000, 0x00000002);
+ nv_icmd(dev, 0x0006aa, 0x00000001);
+ nv_icmd(dev, 0x0006ad, 0x00000100);
+ nv_icmd(dev, 0x0006ae, 0x00000100);
+ nv_icmd(dev, 0x0006b1, 0x00000011);
+ nv_icmd(dev, 0x00078c, 0x00000008);
+ nv_icmd(dev, 0x000792, 0x00000001);
+ nv_icmd(dev, 0x000794, 0x00000001);
+ nv_icmd(dev, 0x000795, 0x00000001);
+ nv_icmd(dev, 0x000796, 0x00000001);
+ nv_icmd(dev, 0x000797, 0x000000cf);
+ nv_icmd(dev, 0x00079a, 0x00000002);
+ nv_icmd(dev, 0x000833, 0x04444480);
+ nv_icmd(dev, 0x0007a1, 0x00000001);
+ nv_icmd(dev, 0x0007a3, 0x00000001);
+ nv_icmd(dev, 0x0007a4, 0x00000001);
+ nv_icmd(dev, 0x0007a5, 0x00000001);
+ nv_icmd(dev, 0x000831, 0x00000004);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_icmd(dev, 0x001000, 0x00000008);
+ nv_icmd(dev, 0x000039, 0x00000000);
+ nv_icmd(dev, 0x00003a, 0x00000000);
+ nv_icmd(dev, 0x00003b, 0x00000000);
+ nv_icmd(dev, 0x000380, 0x00000001);
+ nv_icmd(dev, 0x000366, 0x00000000);
+ nv_icmd(dev, 0x000367, 0x00000000);
+ nv_icmd(dev, 0x000368, 0x00000fff);
+ nv_icmd(dev, 0x000370, 0x00000000);
+ nv_icmd(dev, 0x000371, 0x00000000);
+ nv_icmd(dev, 0x000372, 0x000fffff);
+ nv_icmd(dev, 0x000813, 0x00000006);
+ nv_icmd(dev, 0x000814, 0x00000008);
+ nv_icmd(dev, 0x000957, 0x00000003);
+ nv_icmd(dev, 0x000818, 0x00000000);
+ nv_icmd(dev, 0x000819, 0x00000000);
+ nv_icmd(dev, 0x00081a, 0x00000000);
+ nv_icmd(dev, 0x00081b, 0x00000000);
+ nv_icmd(dev, 0x00081c, 0x00000000);
+ nv_icmd(dev, 0x00081d, 0x00000000);
+ nv_icmd(dev, 0x00081e, 0x00000000);
+ nv_icmd(dev, 0x00081f, 0x00000000);
+ nv_icmd(dev, 0x000848, 0x00000000);
+ nv_icmd(dev, 0x000849, 0x00000000);
+ nv_icmd(dev, 0x00084a, 0x00000000);
+ nv_icmd(dev, 0x00084b, 0x00000000);
+ nv_icmd(dev, 0x00084c, 0x00000000);
+ nv_icmd(dev, 0x00084d, 0x00000000);
+ nv_icmd(dev, 0x00084e, 0x00000000);
+ nv_icmd(dev, 0x00084f, 0x00000000);
+ nv_icmd(dev, 0x000850, 0x00000000);
+ nv_icmd(dev, 0x000851, 0x00000000);
+ nv_icmd(dev, 0x000852, 0x00000000);
+ nv_icmd(dev, 0x000853, 0x00000000);
+ nv_icmd(dev, 0x000854, 0x00000000);
+ nv_icmd(dev, 0x000855, 0x00000000);
+ nv_icmd(dev, 0x000856, 0x00000000);
+ nv_icmd(dev, 0x000857, 0x00000000);
+ nv_icmd(dev, 0x000738, 0x00000000);
+ nv_icmd(dev, 0x000b07, 0x00000002);
+ nv_icmd(dev, 0x000b08, 0x00000100);
+ nv_icmd(dev, 0x000b09, 0x00000100);
+ nv_icmd(dev, 0x000b0a, 0x00000001);
+ nv_icmd(dev, 0x000a04, 0x000000ff);
+ nv_icmd(dev, 0x00097f, 0x00000100);
+ nv_icmd(dev, 0x000a02, 0x00000001);
+ nv_icmd(dev, 0x000809, 0x00000007);
+ nv_icmd(dev, 0x00c221, 0x00000040);
+ nv_icmd(dev, 0x00c401, 0x00000001);
+ nv_icmd(dev, 0x00c402, 0x00010001);
+ nv_icmd(dev, 0x00c403, 0x00000001);
+ nv_icmd(dev, 0x00c404, 0x00000001);
+ nv_icmd(dev, 0x00c40e, 0x00000020);
+ nv_icmd(dev, 0x00c500, 0x00000003);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_icmd(dev, 0x001000, 0x00000001);
+ nv_icmd(dev, 0x000b07, 0x00000002);
+ nv_icmd(dev, 0x000b08, 0x00000100);
+ nv_icmd(dev, 0x000b09, 0x00000100);
+ nv_icmd(dev, 0x000b0a, 0x00000001);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_wr32(dev, 0x400208, 0x00000000);
+}
+
+static void
+nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
+{
+ nv_wr32(dev, 0x40448c, data);
+ nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
+}
+
+static void
+nve0_grctx_generate_a097(struct drm_device *dev)
+{
+ nv_mthd(dev, 0xa097, 0x0800, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0840, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0880, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0900, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0940, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0980, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0804, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0844, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0884, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0904, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0944, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0984, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0808, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0848, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0888, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x08c8, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0908, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0948, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0988, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x09c8, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x080c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x084c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x088c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x08cc, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x090c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x094c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x098c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x09cc, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x0810, 0x000000cf);
+ nv_mthd(dev, 0xa097, 0x0850, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0890, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0910, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0950, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0990, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0814, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0854, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0894, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x08d4, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0914, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0954, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0994, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x09d4, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0818, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0858, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0898, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x08d8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0918, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0958, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0998, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x09d8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x081c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x085c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x089c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x091c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x095c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x099c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0820, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0860, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0920, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0960, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ca0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ce0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cf0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ca4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ce4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cf4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ca8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cb8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ce8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cf8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c3c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cbc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ccc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cdc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cfc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1da0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1db0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1de0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1df0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1da4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1db4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1de4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1df4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1da8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1db8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1de8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1df8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d3c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dbc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ddc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dfc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f3c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fa0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fa8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fb8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fe0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fe8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ff0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ff8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fa4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fbc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fdc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fe4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ff4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ffc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2000, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2040, 0x00000011);
+ nv_mthd(dev, 0xa097, 0x2080, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x20c0, 0x00000030);
+ nv_mthd(dev, 0xa097, 0x2100, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x2140, 0x00000051);
+ nv_mthd(dev, 0xa097, 0x200c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x204c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x208c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x20cc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x210c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x214c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x2010, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2050, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2090, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x20d0, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x2110, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x2150, 0x00000004);
+ nv_mthd(dev, 0xa097, 0x0380, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0384, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0388, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x038c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0700, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0710, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0720, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0730, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0704, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0714, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0724, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0734, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0708, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0718, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0728, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0738, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2800, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2804, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2808, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x280c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2810, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2814, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2818, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x281c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2820, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2824, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2828, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x282c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2830, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2834, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2838, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x283c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2840, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2844, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2848, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x284c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2850, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2854, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2858, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x285c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2860, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2864, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2868, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x286c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2870, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2874, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2878, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x287c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2880, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2884, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2888, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x288c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2890, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2894, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2898, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x289c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2900, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2904, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2908, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x290c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2910, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2914, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2918, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x291c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2920, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2924, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2928, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x292c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2930, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2934, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2938, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x293c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2940, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2944, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2948, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x294c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2950, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2954, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2958, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x295c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2960, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2964, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2968, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x296c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2970, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2974, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2978, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x297c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2980, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2984, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2988, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x298c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2990, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2994, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2998, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x299c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aa0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ac0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ae0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ba0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0be0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aa4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ac4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ae4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ba4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0be4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aa8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ac8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ae8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ba8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0be8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0acc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ab0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ad0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0af0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bf0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ab4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ad4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0af4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bf4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ca0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ce0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cf0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ca4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ce4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cf4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ca8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cb8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ce8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cf8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c0c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c1c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c2c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c3c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c4c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c5c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c6c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c7c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c8c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c9c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cac, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cbc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0ccc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cdc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cec, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cfc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0d00, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d08, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d10, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d18, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d20, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d28, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d30, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d38, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d04, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d0c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d14, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d1c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d24, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d2c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d34, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d3c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ea0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0eb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ec0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ed0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ee0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ef0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e04, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e14, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e24, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e34, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e44, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e54, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e64, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e74, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e84, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e94, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ea4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0eb4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ec4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ed4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ee4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ef4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e08, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e18, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e28, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e38, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e48, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e58, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e68, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e78, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e88, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e98, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ea8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0eb8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ec8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ed8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ee8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ef8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1e00, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e20, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e40, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e60, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e80, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ea0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ec0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ee0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e04, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e24, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e44, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e64, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e84, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ea4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ec4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ee4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e08, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e28, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e48, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e68, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e88, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ea8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ec8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ee8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e0c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e2c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e4c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e6c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e8c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eac, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ecc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eec, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e10, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e30, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e50, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e70, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e90, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eb0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ed0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ef0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e14, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e34, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e54, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e74, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e94, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1eb4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ed4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ef4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e18, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e38, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e58, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e78, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e98, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eb8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ed8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ef8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x3400, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3404, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3408, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x340c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3410, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3414, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3418, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x341c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3420, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3424, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3428, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x342c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3430, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3434, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3438, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x343c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3440, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3444, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3448, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x344c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3450, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3454, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3458, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x345c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3460, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3464, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3468, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x346c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3470, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3474, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3478, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x347c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3480, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3484, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3488, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x348c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3490, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3494, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3498, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x349c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3500, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3504, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3508, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x350c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3510, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3514, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3518, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x351c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3520, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3524, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3528, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x352c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3530, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3534, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3538, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x353c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3540, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3544, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3548, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x354c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3550, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3554, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3558, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x355c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3560, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3564, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3568, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x356c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3570, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3574, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3578, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x357c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3580, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3584, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3588, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x358c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3590, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3594, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3598, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x359c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x030c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1944, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1514, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d68, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x121c, 0x0fac6881);
+ nv_mthd(dev, 0xa097, 0x0fac, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1538, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0fe0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fe4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fe8, 0x00000014);
+ nv_mthd(dev, 0xa097, 0x0fec, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0ff0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x179c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1228, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x122c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x1230, 0x00010001);
+ nv_mthd(dev, 0xa097, 0x07f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15b4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1534, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x153c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x16b4, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x0fbc, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0fc0, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0fc4, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0fc8, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0df8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dfc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1948, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1970, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x161c, 0x000009f0);
+ nv_mthd(dev, 0xa097, 0x0dcc, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x163c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1160, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1164, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1168, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x116c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1170, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1174, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1178, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x117c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1180, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1184, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1188, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x118c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1190, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1194, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1198, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x119c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11a0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11a4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11a8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11ac, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11b0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11b4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11b8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11bc, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11c0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11c4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11c8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11cc, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11d0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11d4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11d8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11dc, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1880, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1884, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1888, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x188c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1890, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1894, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1898, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x189c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17d0, 0x000000ff);
+ nv_mthd(dev, 0xa097, 0x17d4, 0xffffffff);
+ nv_mthd(dev, 0xa097, 0x17d8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x17dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1434, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1438, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dec, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x13a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1318, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1644, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0748, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0de8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1648, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1120, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1124, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1128, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x112c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1118, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x164c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1658, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1910, 0x00000290);
+ nv_mthd(dev, 0xa097, 0x1518, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x165c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1520, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1604, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1570, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x13b0, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x13b4, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x020c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1670, 0x30201000);
+ nv_mthd(dev, 0xa097, 0x1674, 0x70605040);
+ nv_mthd(dev, 0xa097, 0x1678, 0xb8a89888);
+ nv_mthd(dev, 0xa097, 0x167c, 0xf8e8d8c8);
+ nv_mthd(dev, 0xa097, 0x166c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1680, 0x00ffff00);
+ nv_mthd(dev, 0xa097, 0x12d0, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x12d4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1684, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1688, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dac, 0x00001b02);
+ nv_mthd(dev, 0xa097, 0x0db0, 0x00001b02);
+ nv_mthd(dev, 0xa097, 0x0db4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x168c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x156c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x187c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1110, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0dc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1234, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1690, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12ac, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0790, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0794, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0798, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x079c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x077c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1000, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x10fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1290, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0218, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x12d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12dc, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x0d94, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x155c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1560, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1564, 0x00000fff);
+ nv_mthd(dev, 0xa097, 0x1574, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1578, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x157c, 0x000fffff);
+ nv_mthd(dev, 0xa097, 0x1354, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1610, 0x00000012);
+ nv_mthd(dev, 0xa097, 0x1608, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x160c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x260c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x162c, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x0210, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0320, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0324, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0328, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x032c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0330, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0334, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0338, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0750, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0760, 0x39291909);
+ nv_mthd(dev, 0xa097, 0x0764, 0x79695949);
+ nv_mthd(dev, 0xa097, 0x0768, 0xb9a99989);
+ nv_mthd(dev, 0xa097, 0x076c, 0xf9e9d9c9);
+ nv_mthd(dev, 0xa097, 0x0770, 0x30201000);
+ nv_mthd(dev, 0xa097, 0x0774, 0x70605040);
+ nv_mthd(dev, 0xa097, 0x0778, 0x00009080);
+ nv_mthd(dev, 0xa097, 0x0780, 0x39291909);
+ nv_mthd(dev, 0xa097, 0x0784, 0x79695949);
+ nv_mthd(dev, 0xa097, 0x0788, 0xb9a99989);
+ nv_mthd(dev, 0xa097, 0x078c, 0xf9e9d9c9);
+ nv_mthd(dev, 0xa097, 0x07d0, 0x30201000);
+ nv_mthd(dev, 0xa097, 0x07d4, 0x70605040);
+ nv_mthd(dev, 0xa097, 0x07d8, 0x00009080);
+ nv_mthd(dev, 0xa097, 0x037c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0740, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0744, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2600, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1918, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x191c, 0x00000900);
+ nv_mthd(dev, 0xa097, 0x1920, 0x00000405);
+ nv_mthd(dev, 0xa097, 0x1308, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1924, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x13ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x192c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x193c, 0x00002c1c);
+ nv_mthd(dev, 0xa097, 0x0d7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x02c0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1510, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1940, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ff4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ff8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x194c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1950, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1968, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1590, 0x0000003f);
+ nv_mthd(dev, 0xa097, 0x07e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x196c, 0x00000011);
+ nv_mthd(dev, 0xa097, 0x02e4, 0x0000b001);
+ nv_mthd(dev, 0xa097, 0x036c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0370, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x197c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x02d8, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x1980, 0x00000080);
+ nv_mthd(dev, 0xa097, 0x1504, 0x00000080);
+ nv_mthd(dev, 0xa097, 0x1984, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0300, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x13a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1310, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1314, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1380, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1384, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1388, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x138c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1390, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1394, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x139c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1398, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1594, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1598, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x159c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15a0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15a4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0f54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fa0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x130c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1360, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1364, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1368, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x136c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1370, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1374, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1378, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x137c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x133c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1340, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1344, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1348, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x134c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1350, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1358, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x12e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x131c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1320, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1324, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1328, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1140, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19c8, 0x00001500);
+ nv_mthd(dev, 0xa097, 0x135c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19e0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19e4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19e8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19ec, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19f0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19f4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19f8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19fc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19cc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a00, 0x00001111);
+ nv_mthd(dev, 0xa097, 0x1a04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d6c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d70, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x10f8, 0x00001010);
+ nv_mthd(dev, 0xa097, 0x0d80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0da0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1508, 0x80000000);
+ nv_mthd(dev, 0xa097, 0x150c, 0x40000000);
+ nv_mthd(dev, 0xa097, 0x1668, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0318, 0x00000008);
+ nv_mthd(dev, 0xa097, 0x031c, 0x00000008);
+ nv_mthd(dev, 0xa097, 0x0d9c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0374, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0378, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x07dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x074c, 0x00000055);
+ nv_mthd(dev, 0xa097, 0x1420, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x17bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17c4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1008, 0x00000008);
+ nv_mthd(dev, 0xa097, 0x100c, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x1010, 0x0000012c);
+ nv_mthd(dev, 0xa097, 0x0d60, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x075c, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x1018, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x101c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1020, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x1024, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1444, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1448, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x144c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0360, 0x20164010);
+ nv_mthd(dev, 0xa097, 0x0364, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x0368, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0de4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0204, 0x00000006);
+ nv_mthd(dev, 0xa097, 0x0208, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x02cc, 0x003fffff);
+ nv_mthd(dev, 0xa097, 0x02d0, 0x003fffff);
+ nv_mthd(dev, 0xa097, 0x1220, 0x00000005);
+ nv_mthd(dev, 0xa097, 0x0fdc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f98, 0x00400008);
+ nv_mthd(dev, 0xa097, 0x1284, 0x08000080);
+ nv_mthd(dev, 0xa097, 0x1450, 0x00400008);
+ nv_mthd(dev, 0xa097, 0x1454, 0x08000080);
+ nv_mthd(dev, 0xa097, 0x0214, 0x00000000);
+}
+
+static void
+nve0_grctx_generate_902d(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x3410, 0x00000000);
+}
+
+static void
+nve0_graph_generate_unk40xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404010, 0x0);
+ nv_wr32(dev, 0x404014, 0x0);
+ nv_wr32(dev, 0x404018, 0x0);
+ nv_wr32(dev, 0x40401c, 0x0);
+ nv_wr32(dev, 0x404020, 0x0);
+ nv_wr32(dev, 0x404024, 0xe000);
+ nv_wr32(dev, 0x404028, 0x0);
+ nv_wr32(dev, 0x4040a8, 0x0);
+ nv_wr32(dev, 0x4040ac, 0x0);
+ nv_wr32(dev, 0x4040b0, 0x0);
+ nv_wr32(dev, 0x4040b4, 0x0);
+ nv_wr32(dev, 0x4040b8, 0x0);
+ nv_wr32(dev, 0x4040bc, 0x0);
+ nv_wr32(dev, 0x4040c0, 0x0);
+ nv_wr32(dev, 0x4040c4, 0x0);
+ nv_wr32(dev, 0x4040c8, 0xf800008f);
+ nv_wr32(dev, 0x4040d0, 0x0);
+ nv_wr32(dev, 0x4040d4, 0x0);
+ nv_wr32(dev, 0x4040d8, 0x0);
+ nv_wr32(dev, 0x4040dc, 0x0);
+ nv_wr32(dev, 0x4040e0, 0x0);
+ nv_wr32(dev, 0x4040e4, 0x0);
+ nv_wr32(dev, 0x4040e8, 0x1000);
+ nv_wr32(dev, 0x4040f8, 0x0);
+ nv_wr32(dev, 0x404130, 0x0);
+ nv_wr32(dev, 0x404134, 0x0);
+ nv_wr32(dev, 0x404138, 0x20000040);
+ nv_wr32(dev, 0x404150, 0x2e);
+ nv_wr32(dev, 0x404154, 0x400);
+ nv_wr32(dev, 0x404158, 0x200);
+ nv_wr32(dev, 0x404164, 0x55);
+ nv_wr32(dev, 0x4041a0, 0x0);
+ nv_wr32(dev, 0x4041a4, 0x0);
+ nv_wr32(dev, 0x4041a8, 0x0);
+ nv_wr32(dev, 0x4041ac, 0x0);
+ nv_wr32(dev, 0x404200, 0x0);
+ nv_wr32(dev, 0x404204, 0x0);
+ nv_wr32(dev, 0x404208, 0x0);
+ nv_wr32(dev, 0x40420c, 0x0);
+}
+
+static void
+nve0_graph_generate_unk44xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404404, 0x0);
+ nv_wr32(dev, 0x404408, 0x0);
+ nv_wr32(dev, 0x40440c, 0x0);
+ nv_wr32(dev, 0x404410, 0x0);
+ nv_wr32(dev, 0x404414, 0x0);
+ nv_wr32(dev, 0x404418, 0x0);
+ nv_wr32(dev, 0x40441c, 0x0);
+ nv_wr32(dev, 0x404420, 0x0);
+ nv_wr32(dev, 0x404424, 0x0);
+ nv_wr32(dev, 0x404428, 0x0);
+ nv_wr32(dev, 0x40442c, 0x0);
+ nv_wr32(dev, 0x404430, 0x0);
+ nv_wr32(dev, 0x404434, 0x0);
+ nv_wr32(dev, 0x404438, 0x0);
+ nv_wr32(dev, 0x404460, 0x0);
+ nv_wr32(dev, 0x404464, 0x0);
+ nv_wr32(dev, 0x404468, 0xffffff);
+ nv_wr32(dev, 0x40446c, 0x0);
+ nv_wr32(dev, 0x404480, 0x1);
+ nv_wr32(dev, 0x404498, 0x1);
+}
+
+static void
+nve0_graph_generate_unk46xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404604, 0x14);
+ nv_wr32(dev, 0x404608, 0x0);
+ nv_wr32(dev, 0x40460c, 0x3fff);
+ nv_wr32(dev, 0x404610, 0x100);
+ nv_wr32(dev, 0x404618, 0x0);
+ nv_wr32(dev, 0x40461c, 0x0);
+ nv_wr32(dev, 0x404620, 0x0);
+ nv_wr32(dev, 0x404624, 0x0);
+ nv_wr32(dev, 0x40462c, 0x0);
+ nv_wr32(dev, 0x404630, 0x0);
+ nv_wr32(dev, 0x404640, 0x0);
+ nv_wr32(dev, 0x404654, 0x0);
+ nv_wr32(dev, 0x404660, 0x0);
+ nv_wr32(dev, 0x404678, 0x0);
+ nv_wr32(dev, 0x40467c, 0x2);
+ nv_wr32(dev, 0x404680, 0x0);
+ nv_wr32(dev, 0x404684, 0x0);
+ nv_wr32(dev, 0x404688, 0x0);
+ nv_wr32(dev, 0x40468c, 0x0);
+ nv_wr32(dev, 0x404690, 0x0);
+ nv_wr32(dev, 0x404694, 0x0);
+ nv_wr32(dev, 0x404698, 0x0);
+ nv_wr32(dev, 0x40469c, 0x0);
+ nv_wr32(dev, 0x4046a0, 0x7f0080);
+ nv_wr32(dev, 0x4046a4, 0x0);
+ nv_wr32(dev, 0x4046a8, 0x0);
+ nv_wr32(dev, 0x4046ac, 0x0);
+ nv_wr32(dev, 0x4046b0, 0x0);
+ nv_wr32(dev, 0x4046b4, 0x0);
+ nv_wr32(dev, 0x4046b8, 0x0);
+ nv_wr32(dev, 0x4046bc, 0x0);
+ nv_wr32(dev, 0x4046c0, 0x0);
+ nv_wr32(dev, 0x4046c8, 0x0);
+ nv_wr32(dev, 0x4046cc, 0x0);
+ nv_wr32(dev, 0x4046d0, 0x0);
+}
+
+static void
+nve0_graph_generate_unk47xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404700, 0x0);
+ nv_wr32(dev, 0x404704, 0x0);
+ nv_wr32(dev, 0x404708, 0x0);
+ nv_wr32(dev, 0x404718, 0x0);
+ nv_wr32(dev, 0x40471c, 0x0);
+ nv_wr32(dev, 0x404720, 0x0);
+ nv_wr32(dev, 0x404724, 0x0);
+ nv_wr32(dev, 0x404728, 0x0);
+ nv_wr32(dev, 0x40472c, 0x0);
+ nv_wr32(dev, 0x404730, 0x0);
+ nv_wr32(dev, 0x404734, 0x100);
+ nv_wr32(dev, 0x404738, 0x0);
+ nv_wr32(dev, 0x40473c, 0x0);
+ nv_wr32(dev, 0x404744, 0x0);
+ nv_wr32(dev, 0x404748, 0x0);
+ nv_wr32(dev, 0x404754, 0x0);
+}
+
+static void
+nve0_graph_generate_unk58xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x405800, 0xf8000bf);
+ nv_wr32(dev, 0x405830, 0x2180648);
+ nv_wr32(dev, 0x405834, 0x8000000);
+ nv_wr32(dev, 0x405838, 0x0);
+ nv_wr32(dev, 0x405854, 0x0);
+ nv_wr32(dev, 0x405870, 0x1);
+ nv_wr32(dev, 0x405874, 0x1);
+ nv_wr32(dev, 0x405878, 0x1);
+ nv_wr32(dev, 0x40587c, 0x1);
+ nv_wr32(dev, 0x405a00, 0x0);
+ nv_wr32(dev, 0x405a04, 0x0);
+ nv_wr32(dev, 0x405a18, 0x0);
+ nv_wr32(dev, 0x405b00, 0x0);
+ nv_wr32(dev, 0x405b10, 0x1000);
+}
+
+static void
+nve0_graph_generate_unk60xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x406020, 0x4103c1);
+ nv_wr32(dev, 0x406028, 0x1);
+ nv_wr32(dev, 0x40602c, 0x1);
+ nv_wr32(dev, 0x406030, 0x1);
+ nv_wr32(dev, 0x406034, 0x1);
+}
+
+static void
+nve0_graph_generate_unk64xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x4064a8, 0x0);
+ nv_wr32(dev, 0x4064ac, 0x3fff);
+ nv_wr32(dev, 0x4064b4, 0x0);
+ nv_wr32(dev, 0x4064b8, 0x0);
+ nv_wr32(dev, 0x4064c0, 0x801a00f0);
+ nv_wr32(dev, 0x4064c4, 0x192ffff);
+ nv_wr32(dev, 0x4064c8, 0x1800600);
+ nv_wr32(dev, 0x4064cc, 0x0);
+ nv_wr32(dev, 0x4064d0, 0x0);
+ nv_wr32(dev, 0x4064d4, 0x0);
+ nv_wr32(dev, 0x4064d8, 0x0);
+ nv_wr32(dev, 0x4064dc, 0x0);
+ nv_wr32(dev, 0x4064e0, 0x0);
+ nv_wr32(dev, 0x4064e4, 0x0);
+ nv_wr32(dev, 0x4064e8, 0x0);
+ nv_wr32(dev, 0x4064ec, 0x0);
+ nv_wr32(dev, 0x4064fc, 0x22a);
+}
+
+static void
+nve0_graph_generate_unk70xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x407040, 0x0);
+}
+
+static void
+nve0_graph_generate_unk78xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x407804, 0x23);
+ nv_wr32(dev, 0x40780c, 0xa418820);
+ nv_wr32(dev, 0x407810, 0x62080e6);
+ nv_wr32(dev, 0x407814, 0x20398a4);
+ nv_wr32(dev, 0x407818, 0xe629062);
+ nv_wr32(dev, 0x40781c, 0xa418820);
+ nv_wr32(dev, 0x407820, 0xe6);
+ nv_wr32(dev, 0x4078bc, 0x103);
+}
+
+static void
+nve0_graph_generate_unk80xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x408000, 0x0);
+ nv_wr32(dev, 0x408004, 0x0);
+ nv_wr32(dev, 0x408008, 0x30);
+ nv_wr32(dev, 0x40800c, 0x0);
+ nv_wr32(dev, 0x408010, 0x0);
+ nv_wr32(dev, 0x408014, 0x69);
+ nv_wr32(dev, 0x408018, 0xe100e100);
+ nv_wr32(dev, 0x408064, 0x0);
+}
+
+static void
+nve0_graph_generate_unk88xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x408800, 0x2802a3c);
+ nv_wr32(dev, 0x408804, 0x40);
+ nv_wr32(dev, 0x408808, 0x1043e005);
+ nv_wr32(dev, 0x408840, 0xb);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x62000001);
+ nv_wr32(dev, 0x408908, 0xc8102f);
+ nv_wr32(dev, 0x408980, 0x11d);
+}
+
+static void
+nve0_graph_generate_gpc(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x418380, 0x16);
+ nv_wr32(dev, 0x418400, 0x38004e00);
+ nv_wr32(dev, 0x418404, 0x71e0ffff);
+ nv_wr32(dev, 0x41840c, 0x1008);
+ nv_wr32(dev, 0x418410, 0xfff0fff);
+ nv_wr32(dev, 0x418414, 0x2200fff);
+ nv_wr32(dev, 0x418450, 0x0);
+ nv_wr32(dev, 0x418454, 0x0);
+ nv_wr32(dev, 0x418458, 0x0);
+ nv_wr32(dev, 0x41845c, 0x0);
+ nv_wr32(dev, 0x418460, 0x0);
+ nv_wr32(dev, 0x418464, 0x0);
+ nv_wr32(dev, 0x418468, 0x1);
+ nv_wr32(dev, 0x41846c, 0x0);
+ nv_wr32(dev, 0x418470, 0x0);
+ nv_wr32(dev, 0x418600, 0x1f);
+ nv_wr32(dev, 0x418684, 0xf);
+ nv_wr32(dev, 0x418700, 0x2);
+ nv_wr32(dev, 0x418704, 0x80);
+ nv_wr32(dev, 0x418708, 0x0);
+ nv_wr32(dev, 0x41870c, 0x0);
+ nv_wr32(dev, 0x418710, 0x0);
+ nv_wr32(dev, 0x418800, 0x7006860a);
+ nv_wr32(dev, 0x418808, 0x0);
+ nv_wr32(dev, 0x41880c, 0x0);
+ nv_wr32(dev, 0x418810, 0x0);
+ nv_wr32(dev, 0x418828, 0x44);
+ nv_wr32(dev, 0x418830, 0x10000001);
+ nv_wr32(dev, 0x4188d8, 0x8);
+ nv_wr32(dev, 0x4188e0, 0x1000000);
+ nv_wr32(dev, 0x4188e8, 0x0);
+ nv_wr32(dev, 0x4188ec, 0x0);
+ nv_wr32(dev, 0x4188f0, 0x0);
+ nv_wr32(dev, 0x4188f4, 0x0);
+ nv_wr32(dev, 0x4188f8, 0x0);
+ nv_wr32(dev, 0x4188fc, 0x20100018);
+ nv_wr32(dev, 0x41891c, 0xff00ff);
+ nv_wr32(dev, 0x418924, 0x0);
+ nv_wr32(dev, 0x418928, 0xffff00);
+ nv_wr32(dev, 0x41892c, 0xff00);
+ nv_wr32(dev, 0x418a00, 0x0);
+ nv_wr32(dev, 0x418a04, 0x0);
+ nv_wr32(dev, 0x418a08, 0x0);
+ nv_wr32(dev, 0x418a0c, 0x10000);
+ nv_wr32(dev, 0x418a10, 0x0);
+ nv_wr32(dev, 0x418a14, 0x0);
+ nv_wr32(dev, 0x418a18, 0x0);
+ nv_wr32(dev, 0x418a20, 0x0);
+ nv_wr32(dev, 0x418a24, 0x0);
+ nv_wr32(dev, 0x418a28, 0x0);
+ nv_wr32(dev, 0x418a2c, 0x10000);
+ nv_wr32(dev, 0x418a30, 0x0);
+ nv_wr32(dev, 0x418a34, 0x0);
+ nv_wr32(dev, 0x418a38, 0x0);
+ nv_wr32(dev, 0x418a40, 0x0);
+ nv_wr32(dev, 0x418a44, 0x0);
+ nv_wr32(dev, 0x418a48, 0x0);
+ nv_wr32(dev, 0x418a4c, 0x10000);
+ nv_wr32(dev, 0x418a50, 0x0);
+ nv_wr32(dev, 0x418a54, 0x0);
+ nv_wr32(dev, 0x418a58, 0x0);
+ nv_wr32(dev, 0x418a60, 0x0);
+ nv_wr32(dev, 0x418a64, 0x0);
+ nv_wr32(dev, 0x418a68, 0x0);
+ nv_wr32(dev, 0x418a6c, 0x10000);
+ nv_wr32(dev, 0x418a70, 0x0);
+ nv_wr32(dev, 0x418a74, 0x0);
+ nv_wr32(dev, 0x418a78, 0x0);
+ nv_wr32(dev, 0x418a80, 0x0);
+ nv_wr32(dev, 0x418a84, 0x0);
+ nv_wr32(dev, 0x418a88, 0x0);
+ nv_wr32(dev, 0x418a8c, 0x10000);
+ nv_wr32(dev, 0x418a90, 0x0);
+ nv_wr32(dev, 0x418a94, 0x0);
+ nv_wr32(dev, 0x418a98, 0x0);
+ nv_wr32(dev, 0x418aa0, 0x0);
+ nv_wr32(dev, 0x418aa4, 0x0);
+ nv_wr32(dev, 0x418aa8, 0x0);
+ nv_wr32(dev, 0x418aac, 0x10000);
+ nv_wr32(dev, 0x418ab0, 0x0);
+ nv_wr32(dev, 0x418ab4, 0x0);
+ nv_wr32(dev, 0x418ab8, 0x0);
+ nv_wr32(dev, 0x418ac0, 0x0);
+ nv_wr32(dev, 0x418ac4, 0x0);
+ nv_wr32(dev, 0x418ac8, 0x0);
+ nv_wr32(dev, 0x418acc, 0x10000);
+ nv_wr32(dev, 0x418ad0, 0x0);
+ nv_wr32(dev, 0x418ad4, 0x0);
+ nv_wr32(dev, 0x418ad8, 0x0);
+ nv_wr32(dev, 0x418ae0, 0x0);
+ nv_wr32(dev, 0x418ae4, 0x0);
+ nv_wr32(dev, 0x418ae8, 0x0);
+ nv_wr32(dev, 0x418aec, 0x10000);
+ nv_wr32(dev, 0x418af0, 0x0);
+ nv_wr32(dev, 0x418af4, 0x0);
+ nv_wr32(dev, 0x418af8, 0x0);
+ nv_wr32(dev, 0x418b00, 0x6);
+ nv_wr32(dev, 0x418b08, 0xa418820);
+ nv_wr32(dev, 0x418b0c, 0x62080e6);
+ nv_wr32(dev, 0x418b10, 0x20398a4);
+ nv_wr32(dev, 0x418b14, 0xe629062);
+ nv_wr32(dev, 0x418b18, 0xa418820);
+ nv_wr32(dev, 0x418b1c, 0xe6);
+ nv_wr32(dev, 0x418bb8, 0x103);
+ nv_wr32(dev, 0x418c08, 0x1);
+ nv_wr32(dev, 0x418c10, 0x0);
+ nv_wr32(dev, 0x418c14, 0x0);
+ nv_wr32(dev, 0x418c18, 0x0);
+ nv_wr32(dev, 0x418c1c, 0x0);
+ nv_wr32(dev, 0x418c20, 0x0);
+ nv_wr32(dev, 0x418c24, 0x0);
+ nv_wr32(dev, 0x418c28, 0x0);
+ nv_wr32(dev, 0x418c2c, 0x0);
+ nv_wr32(dev, 0x418c40, 0xffffffff);
+ nv_wr32(dev, 0x418c6c, 0x1);
+ nv_wr32(dev, 0x418c80, 0x20200004);
+ nv_wr32(dev, 0x418c8c, 0x1);
+ nv_wr32(dev, 0x419000, 0x780);
+ nv_wr32(dev, 0x419004, 0x0);
+ nv_wr32(dev, 0x419008, 0x0);
+ nv_wr32(dev, 0x419014, 0x4);
+}
+
+static void
+nve0_graph_generate_tpc(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x419848, 0x0);
+ nv_wr32(dev, 0x419864, 0x129);
+ nv_wr32(dev, 0x419888, 0x0);
+ nv_wr32(dev, 0x419a00, 0xf0);
+ nv_wr32(dev, 0x419a04, 0x1);
+ nv_wr32(dev, 0x419a08, 0x21);
+ nv_wr32(dev, 0x419a0c, 0x20000);
+ nv_wr32(dev, 0x419a10, 0x0);
+ nv_wr32(dev, 0x419a14, 0x200);
+ nv_wr32(dev, 0x419a1c, 0xc000);
+ nv_wr32(dev, 0x419a20, 0x800);
+ nv_wr32(dev, 0x419a30, 0x1);
+ nv_wr32(dev, 0x419ac4, 0x37f440);
+ nv_wr32(dev, 0x419c00, 0xa);
+ nv_wr32(dev, 0x419c04, 0x80000006);
+ nv_wr32(dev, 0x419c08, 0x2);
+ nv_wr32(dev, 0x419c20, 0x0);
+ nv_wr32(dev, 0x419c24, 0x84210);
+ nv_wr32(dev, 0x419c28, 0x3efbefbe);
+ nv_wr32(dev, 0x419ce8, 0x0);
+ nv_wr32(dev, 0x419cf4, 0x3203);
+ nv_wr32(dev, 0x419e04, 0x0);
+ nv_wr32(dev, 0x419e08, 0x0);
+ nv_wr32(dev, 0x419e0c, 0x0);
+ nv_wr32(dev, 0x419e10, 0x402);
+ nv_wr32(dev, 0x419e44, 0x13eff2);
+ nv_wr32(dev, 0x419e48, 0x0);
+ nv_wr32(dev, 0x419e4c, 0x7f);
+ nv_wr32(dev, 0x419e50, 0x0);
+ nv_wr32(dev, 0x419e54, 0x0);
+ nv_wr32(dev, 0x419e58, 0x0);
+ nv_wr32(dev, 0x419e5c, 0x0);
+ nv_wr32(dev, 0x419e60, 0x0);
+ nv_wr32(dev, 0x419e64, 0x0);
+ nv_wr32(dev, 0x419e68, 0x0);
+ nv_wr32(dev, 0x419e6c, 0x0);
+ nv_wr32(dev, 0x419e70, 0x0);
+ nv_wr32(dev, 0x419e74, 0x0);
+ nv_wr32(dev, 0x419e78, 0x0);
+ nv_wr32(dev, 0x419e7c, 0x0);
+ nv_wr32(dev, 0x419e80, 0x0);
+ nv_wr32(dev, 0x419e84, 0x0);
+ nv_wr32(dev, 0x419e88, 0x0);
+ nv_wr32(dev, 0x419e8c, 0x0);
+ nv_wr32(dev, 0x419e90, 0x0);
+ nv_wr32(dev, 0x419e94, 0x0);
+ nv_wr32(dev, 0x419e98, 0x0);
+ nv_wr32(dev, 0x419eac, 0x1fcf);
+ nv_wr32(dev, 0x419eb0, 0xd3f);
+ nv_wr32(dev, 0x419ec8, 0x1304f);
+ nv_wr32(dev, 0x419f30, 0x0);
+ nv_wr32(dev, 0x419f34, 0x0);
+ nv_wr32(dev, 0x419f38, 0x0);
+ nv_wr32(dev, 0x419f3c, 0x0);
+ nv_wr32(dev, 0x419f40, 0x0);
+ nv_wr32(dev, 0x419f44, 0x0);
+ nv_wr32(dev, 0x419f48, 0x0);
+ nv_wr32(dev, 0x419f4c, 0x0);
+ nv_wr32(dev, 0x419f58, 0x0);
+ nv_wr32(dev, 0x419f78, 0xb);
+}
+
+static void
+nve0_graph_generate_tpcunk(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x41be24, 0x6);
+ nv_wr32(dev, 0x41bec0, 0x12180000);
+ nv_wr32(dev, 0x41bec4, 0x37f7f);
+ nv_wr32(dev, 0x41bee4, 0x6480430);
+ nv_wr32(dev, 0x41bf00, 0xa418820);
+ nv_wr32(dev, 0x41bf04, 0x62080e6);
+ nv_wr32(dev, 0x41bf08, 0x20398a4);
+ nv_wr32(dev, 0x41bf0c, 0xe629062);
+ nv_wr32(dev, 0x41bf10, 0xa418820);
+ nv_wr32(dev, 0x41bf14, 0xe6);
+ nv_wr32(dev, 0x41bfd0, 0x900103);
+ nv_wr32(dev, 0x41bfe0, 0x400001);
+ nv_wr32(dev, 0x41bfe4, 0x0);
+}
+
+int
+nve0_grctx_generate(struct nouveau_channel *chan)
+{
+ struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ u32 data[6] = {}, data2[2] = {}, tmp;
+ u32 tpc_set = 0, tpc_mask = 0;
+ u8 tpcnr[GPC_MAX], a, b;
+ u8 shift, ntpcv;
+ int i, gpc, tpc, id;
+
+ nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x400204, 0x00000000);
+ nv_wr32(dev, 0x400208, 0x00000000);
+
+ nve0_graph_generate_unk40xx(dev);
+ nve0_graph_generate_unk44xx(dev);
+ nve0_graph_generate_unk46xx(dev);
+ nve0_graph_generate_unk47xx(dev);
+ nve0_graph_generate_unk58xx(dev);
+ nve0_graph_generate_unk60xx(dev);
+ nve0_graph_generate_unk64xx(dev);
+ nve0_graph_generate_unk70xx(dev);
+ nve0_graph_generate_unk78xx(dev);
+ nve0_graph_generate_unk80xx(dev);
+ nve0_graph_generate_unk88xx(dev);
+ nve0_graph_generate_gpc(dev);
+ nve0_graph_generate_tpc(dev);
+ nve0_graph_generate_tpcunk(dev);
+
+ nv_wr32(dev, 0x404154, 0x0);
+
+ for (i = 0; i < grch->mmio_nr * 8; i += 8) {
+ u32 reg = nv_ro32(grch->mmio, i + 0);
+ u32 val = nv_ro32(grch->mmio, i + 4);
+ nv_wr32(dev, reg, val);
+ }
+
+ nv_wr32(dev, 0x418c6c, 0x1);
+ nv_wr32(dev, 0x41980c, 0x10);
+ nv_wr32(dev, 0x41be08, 0x4);
+ nv_wr32(dev, 0x4064c0, 0x801a00f0);
+ nv_wr32(dev, 0x405800, 0xf8000bf);
+ nv_wr32(dev, 0x419c00, 0xa);
+
+ for (tpc = 0, id = 0; tpc < 4; tpc++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tpc < priv->tpc_nr[gpc]) {
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0698), id);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x04e8), id);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0088), id++);
+ }
+
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+ }
+ }
+
+ tmp = 0;
+ for (i = 0; i < priv->gpc_nr; i++)
+ tmp |= priv->tpc_nr[i] << (i * 4);
+ nv_wr32(dev, 0x406028, tmp);
+ nv_wr32(dev, 0x405870, tmp);
+
+ nv_wr32(dev, 0x40602c, 0x0);
+ nv_wr32(dev, 0x405874, 0x0);
+ nv_wr32(dev, 0x406030, 0x0);
+ nv_wr32(dev, 0x405878, 0x0);
+ nv_wr32(dev, 0x406034, 0x0);
+ nv_wr32(dev, 0x40587c, 0x0);
+
+ /* calculate first set of magics */
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+
+ gpc = -1;
+ for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpcnr[gpc]--;
+
+ data[tpc / 6] |= gpc << ((tpc % 6) * 5);
+ }
+
+ for (; tpc < 32; tpc++)
+ data[tpc / 6] |= 7 << ((tpc % 6) * 5);
+
+ /* and the second... */
+ shift = 0;
+ ntpcv = priv->tpc_total;
+ while (!(ntpcv & (1 << 4))) {
+ ntpcv <<= 1;
+ shift++;
+ }
+
+ data2[0] = ntpcv << 16;
+ data2[0] |= shift << 21;
+ data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+ data2[0] |= priv->tpc_total << 8;
+ data2[0] |= priv->magic_not_rop_nr;
+ for (i = 1; i < 7; i++)
+ data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+ /* and write it all the various parts of PGRAPH */
+ nv_wr32(dev, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
+
+ nv_wr32(dev, 0x41bfd0, data2[0]);
+ nv_wr32(dev, 0x41bfe4, data2[1]);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x41bf00 + (i * 4), data[i]);
+
+ nv_wr32(dev, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x40780c + (i * 4), data[i]);
+
+
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+ tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+
+ for (i = 0, gpc = -1, b = -1; i < 32; i++) {
+ a = (i * (priv->tpc_total - 1)) / 32;
+ if (a != b) {
+ b = a;
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ tpc_set |= 1 << ((gpc * 8) + tpc);
+ }
+
+ nv_wr32(dev, 0x406800 + (i * 0x20), tpc_set);
+ nv_wr32(dev, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
+ }
+
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x4064d0 + (i * 0x04), 0x00000000);
+
+ nv_wr32(dev, 0x405b00, 0x201);
+ nv_wr32(dev, 0x408850, 0x2);
+ nv_wr32(dev, 0x408958, 0x2);
+ nv_wr32(dev, 0x419f78, 0xa);
+
+ nve0_grctx_generate_icmd(dev);
+ nve0_grctx_generate_a097(dev);
+ nve0_grctx_generate_902d(dev);
+
+ nv_mask(dev, 0x000260, 0x00000001, 0x00000001);
+ nv_wr32(dev, 0x418800, 0x7026860a); //XXX
+ nv_wr32(dev, 0x41be10, 0x00bb8bc7); //XXX
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 9d83729956ff..a6598fd66423 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -70,8 +70,9 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
- radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
- radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o si_blit_shaders.o
+ evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
+ atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
+ si_blit_shaders.o radeon_prime.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index af1054f8202a..01d77d1554f4 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -591,8 +591,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
- /* if (connector && connector->display_info.bpc)
- bpc = connector->display_info.bpc; */
+ bpc = radeon_get_monitor_bpc(connector);
encoder_mode = atombios_get_encoder_mode(encoder);
is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
@@ -968,9 +967,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
-
- /* if (connector->display_info.bpc)
- bpc = connector->display_info.bpc; */
+ bpc = radeon_get_monitor_bpc(connector);
switch (encoder_mode) {
case ATOM_ENCODER_MODE_DP_MST:
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index c57d85664e77..5131b3b0f7d2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -405,13 +405,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
/* get bpc from the EDID */
static int convert_bpc_to_bpp(int bpc)
{
-#if 0
if (bpc == 0)
return 24;
else
return bpc * 3;
-#endif
- return 24;
}
/* get the max pix clock supported by the link rate and lane num */
@@ -463,7 +460,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
- int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
+ int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int max_link_rate = dp_get_max_link_rate(dpcd);
int max_lane_num = dp_get_max_lane_number(dpcd);
int lane_num;
@@ -482,7 +479,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
- int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
+ int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int lane_num, max_pix_clock;
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
@@ -533,6 +530,23 @@ u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
dig_connector->dp_i2c_bus->rec.i2c_id, 0);
}
+static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 buf[3];
+
+ if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0))
+ DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0))
+ DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+}
+
bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
@@ -546,6 +560,9 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
for (i = 0; i < 8; i++)
DRM_DEBUG_KMS("%02x ", msg[i]);
DRM_DEBUG_KMS("\n");
+
+ radeon_dp_probe_oui(radeon_connector);
+
return true;
}
dig_connector->dpcd[0] = 0;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2d39f9977e00..e7b1ec5ae8c6 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -545,7 +545,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
hpd_id = radeon_connector->hpd.hpd;
- /* bpc = connector->display_info.bpc; */
+ bpc = radeon_get_monitor_bpc(connector);
}
/* no dig encoder assigned */
@@ -1163,7 +1163,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- /* bpc = connector->display_info.bpc; */
+ bpc = radeon_get_monitor_bpc(connector);
}
memset(&args, 0, sizeof(args));
@@ -1926,7 +1926,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
r600_hdmi_enable(encoder);
- r600_hdmi_setmode(encoder, adjusted_mode);
+ if (ASIC_IS_DCE4(rdev))
+ evergreen_hdmi_setmode(encoder, adjusted_mode);
+ else
+ r600_hdmi_setmode(encoder, adjusted_mode);
}
}
@@ -2081,6 +2084,7 @@ radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
+ struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
@@ -2089,8 +2093,16 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
(radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (dig)
+ if (dig) {
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+ if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
+ if (rdev->family >= CHIP_R600)
+ dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
+ else
+ /* RS600/690/740 have only 1 afmt block */
+ dig->afmt = rdev->mode_info.afmt[0];
+ }
+ }
}
radeon_atom_output_lock(encoder, true);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index cfa372cb1cb3..58991af90502 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2424,27 +2424,18 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
u32 srbm_status;
u32 grbm_status;
u32 grbm_status_se0, grbm_status_se1;
- struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
- int r;
srbm_status = RREG32(SRBM_STATUS);
grbm_status = RREG32(GRBM_STATUS);
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, ring);
+ radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- ring->rptr = RREG32(CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2594,6 +2585,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+ u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2614,6 +2606,13 @@ int evergreen_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+
if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
@@ -2690,6 +2689,30 @@ int evergreen_irq_set(struct radeon_device *rdev)
DRM_DEBUG("evergreen_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
+ if (rdev->irq.afmt[0]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
+ afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[1]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
+ afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[2]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
+ afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[3]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
+ afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[4]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
+ afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[5]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
+ afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
if (rdev->irq.gui_idle) {
DRM_DEBUG("gui idle\n");
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
@@ -2732,6 +2755,13 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
+
return 0;
}
@@ -2756,6 +2786,13 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
+ rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+
if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
@@ -2829,6 +2866,36 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
+ if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
+ }
}
void evergreen_irq_disable(struct radeon_device *rdev)
@@ -2878,6 +2945,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
+ bool queue_hdmi = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -3111,6 +3179,55 @@ restart_ih:
break;
}
break;
+ case 44: /* hdmi */
+ switch (src_data) {
+ case 0:
+ if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+ }
+ break;
+ case 1:
+ if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+ }
+ break;
+ case 2:
+ if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI2\n");
+ }
+ break;
+ case 3:
+ if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI3\n");
+ }
+ break;
+ case 4:
+ if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI4\n");
+ }
+ break;
+ case 5:
+ if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI5\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@@ -3154,6 +3271,8 @@ restart_ih:
goto restart_ih;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
+ if (queue_hdmi)
+ schedule_work(&rdev->audio_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -3248,12 +3367,9 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
r = r600_audio_init(rdev);
if (r) {
@@ -3319,10 +3435,6 @@ int evergreen_init(struct radeon_device *rdev)
{
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -3434,7 +3546,6 @@ void evergreen_fini(struct radeon_device *rdev)
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 222acd2d33df..1e96bd458cfd 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -637,7 +637,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
if (rdev->r600_blit.shader_obj)
goto done;
- mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
if (rdev->family < CHIP_CAYMAN)
@@ -669,7 +668,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
obj_size = ALIGN(obj_size, 256);
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_obj);
+ NULL, &rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("evergreen failed to allocate shader\n");
return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 70089d32b80f..4e7dd2b4843d 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1057,7 +1057,7 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
uint32_t header, h_idx, reg, wait_reg_mem_info;
volatile uint32_t *ib;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
/* parse the WAIT_REG_MEM */
r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
@@ -1215,7 +1215,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
if (!(evergreen_reg_safe_bm[i] & m))
return 0;
}
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
switch (reg) {
/* force following reg to 0 in an attempt to disable out buffer
* which will need us to better understand how it works to perform
@@ -1896,7 +1896,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u32 idx_value;
track = (struct evergreen_cs_track *)p->track;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
idx_value = radeon_get_ib_value(p, idx);
@@ -2610,8 +2610,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
- for (r = 0; r < p->ib->length_dw; r++) {
- printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
+ for (r = 0; r < p->ib.length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
mdelay(1);
}
#endif
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
new file mode 100644
index 000000000000..a51f880985f8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ * Rafał Miłecki
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+#include "atom.h"
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr.cts_32khz));
+ WREG32(HDMI_ACR_32_1 + offset, acr.n_32khz);
+
+ WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr.cts_44_1khz));
+ WREG32(HDMI_ACR_44_1 + offset, acr.n_44_1khz);
+
+ WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr.cts_48khz));
+ WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
+}
+
+/*
+ * calculate the crc for a given info frame
+ */
+static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
+ uint8_t versionNumber,
+ uint8_t length,
+ uint8_t *frame)
+{
+ int i;
+ frame[0] = packetType + versionNumber + length;
+ for (i = 1; i <= length; i++)
+ frame[0] += frame[i];
+ frame[0] = 0x100 - frame[0];
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void evergreen_hdmi_videoinfoframe(
+ struct drm_encoder *encoder,
+ uint8_t color_format,
+ int active_information_present,
+ uint8_t active_format_aspect_ratio,
+ uint8_t scan_information,
+ uint8_t colorimetry,
+ uint8_t ex_colorimetry,
+ uint8_t quantization,
+ int ITC,
+ uint8_t picture_aspect_ratio,
+ uint8_t video_format_identification,
+ uint8_t pixel_repetition,
+ uint8_t non_uniform_picture_scaling,
+ uint8_t bar_info_data_valid,
+ uint16_t top_bar,
+ uint16_t bottom_bar,
+ uint16_t left_bar,
+ uint16_t right_bar
+)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ uint8_t frame[14];
+
+ frame[0x0] = 0;
+ frame[0x1] =
+ (scan_information & 0x3) |
+ ((bar_info_data_valid & 0x3) << 2) |
+ ((active_information_present & 0x1) << 4) |
+ ((color_format & 0x3) << 5);
+ frame[0x2] =
+ (active_format_aspect_ratio & 0xF) |
+ ((picture_aspect_ratio & 0x3) << 4) |
+ ((colorimetry & 0x3) << 6);
+ frame[0x3] =
+ (non_uniform_picture_scaling & 0x3) |
+ ((quantization & 0x3) << 2) |
+ ((ex_colorimetry & 0x7) << 4) |
+ ((ITC & 0x1) << 7);
+ frame[0x4] = (video_format_identification & 0x7F);
+ frame[0x5] = (pixel_repetition & 0xF);
+ frame[0x6] = (top_bar & 0xFF);
+ frame[0x7] = (top_bar >> 8);
+ frame[0x8] = (bottom_bar & 0xFF);
+ frame[0x9] = (bottom_bar >> 8);
+ frame[0xA] = (left_bar & 0xFF);
+ frame[0xB] = (left_bar >> 8);
+ frame[0xC] = (right_bar & 0xFF);
+ frame[0xD] = (right_bar >> 8);
+
+ evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+ /* Our header values (type, version, length) should be alright, Intel
+ * is using the same. Checksum function also seems to be OK, it works
+ * fine for audio infoframe. However calculated value is always lower
+ * by 2 in comparison to fglrx. It breaks displaying anything in case
+ * of TVs that strictly check the checksum. Hack it manually here to
+ * workaround this issue. */
+ frame[0x0] += 2;
+
+ WREG32(AFMT_AVI_INFO0 + offset,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(AFMT_AVI_INFO1 + offset,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+ WREG32(AFMT_AVI_INFO2 + offset,
+ frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+ WREG32(AFMT_AVI_INFO3 + offset,
+ frame[0xC] | (frame[0xD] << 8));
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
+
+ if (ASIC_IS_DCE5(rdev))
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
+
+ r600_audio_set_clock(encoder, mode->clock);
+
+ WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_NULL_SEND); /* send null packets when required */
+
+ WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+ WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI_ACR_SOURCE); /* select SW CTS value */
+
+ WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_NULL_SEND | /* send null packets when required */
+ HDMI_GC_SEND | /* send general control packets */
+ HDMI_GC_CONT); /* send general control packets every frame */
+
+ WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+
+ WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
+ AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
+
+ WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
+ HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+ WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
+
+ evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0);
+
+ evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+ /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+ WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 96c10b3991aa..8beac1065025 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -232,6 +232,4 @@
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
-#define EVERGREEN_HDMI_CONFIG_OFFSET 0xf0
-
#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b4eefc355f16..79130bfd1d6f 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -112,6 +112,226 @@
#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
#define CP_DEBUG 0xC1FC
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE 0x05ac
+# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE 0x05b0
+#define DCCG_AUDIO_DTO0_MODULE 0x05b4
+#define DCCG_AUDIO_DTO0_LOAD 0x05b8
+#define DCCG_AUDIO_DTO0_CNTL 0x05bc
+
+#define DCCG_AUDIO_DTO1_PHASE 0x05c0
+#define DCCG_AUDIO_DTO1_MODULE 0x05c4
+#define DCCG_AUDIO_DTO1_LOAD 0x05c8
+#define DCCG_AUDIO_DTO1_CNTL 0x05cc
+
+/* DCE 4.0 AFMT */
+#define HDMI_CONTROL 0x7030
+# define HDMI_KEEPOUT_MODE (1 << 0)
+# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
+# define HDMI_ERROR_ACK (1 << 8)
+# define HDMI_ERROR_MASK (1 << 9)
+# define HDMI_DEEP_COLOR_ENABLE (1 << 24)
+# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28)
+# define HDMI_24BIT_DEEP_COLOR 0
+# define HDMI_30BIT_DEEP_COLOR 1
+# define HDMI_36BIT_DEEP_COLOR 2
+#define HDMI_STATUS 0x7034
+# define HDMI_ACTIVE_AVMUTE (1 << 0)
+# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
+# define HDMI_VBI_PACKET_ERROR (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL 0x7038
+# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL 0x703c
+# define HDMI_ACR_SEND (1 << 0)
+# define HDMI_ACR_CONT (1 << 1)
+# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
+# define HDMI_ACR_HW 0
+# define HDMI_ACR_32 1
+# define HDMI_ACR_44 2
+# define HDMI_ACR_48 3
+# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
+# define HDMI_ACR_AUTO_SEND (1 << 12)
+# define HDMI_ACR_N_MULTIPLE(x) (((x) & 7) << 16)
+# define HDMI_ACR_X1 1
+# define HDMI_ACR_X2 2
+# define HDMI_ACR_X4 4
+# define HDMI_ACR_AUDIO_PRIORITY (1 << 31)
+#define HDMI_VBI_PACKET_CONTROL 0x7040
+# define HDMI_NULL_SEND (1 << 0)
+# define HDMI_GC_SEND (1 << 4)
+# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0 0x7044
+# define HDMI_AVI_INFO_SEND (1 << 0)
+# define HDMI_AVI_INFO_CONT (1 << 1)
+# define HDMI_AUDIO_INFO_SEND (1 << 4)
+# define HDMI_AUDIO_INFO_CONT (1 << 5)
+# define HDMI_MPEG_INFO_SEND (1 << 8)
+# define HDMI_MPEG_INFO_CONT (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1 0x7048
+# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL 0x704c
+# define HDMI_GENERIC0_SEND (1 << 0)
+# define HDMI_GENERIC0_CONT (1 << 1)
+# define HDMI_GENERIC1_SEND (1 << 4)
+# define HDMI_GENERIC1_CONT (1 << 5)
+# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+#define HDMI_GC 0x7058
+# define HDMI_GC_AVMUTE (1 << 0)
+# define HDMI_GC_AVMUTE_CONT (1 << 2)
+#define AFMT_AUDIO_PACKET_CONTROL2 0x705c
+# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
+# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
+# define AFMT_60958_CS_SOURCE (1 << 4)
+# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
+# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0 0x7084
+# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
+# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
+# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
+# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
+# define AFMT_AVI_INFO_Y_RGB 0
+# define AFMT_AVI_INFO_Y_YCBCR422 1
+# define AFMT_AVI_INFO_Y_YCBCR444 2
+# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
+# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
+# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
+# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
+# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
+# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
+# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
+# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
+# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
+# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1 0x7088
+# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_CN(x) (((x) & 0x3) << 12)
+# define AFMT_AVI_INFO_YQ(x) (((x) & 0x3) << 14)
+# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2 0x708c
+# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3 0x7090
+# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0 0x7094
+# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
+# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
+# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1 0x7098
+# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
+# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR 0x709c
+#define AFMT_GENERIC0_0 0x70a0
+#define AFMT_GENERIC0_1 0x70a4
+#define AFMT_GENERIC0_2 0x70a8
+#define AFMT_GENERIC0_3 0x70ac
+#define AFMT_GENERIC0_4 0x70b0
+#define AFMT_GENERIC0_5 0x70b4
+#define AFMT_GENERIC0_6 0x70b8
+#define AFMT_GENERIC1_HDR 0x70bc
+#define AFMT_GENERIC1_0 0x70c0
+#define AFMT_GENERIC1_1 0x70c4
+#define AFMT_GENERIC1_2 0x70c8
+#define AFMT_GENERIC1_3 0x70cc
+#define AFMT_GENERIC1_4 0x70d0
+#define AFMT_GENERIC1_5 0x70d4
+#define AFMT_GENERIC1_6 0x70d8
+#define HDMI_ACR_32_0 0x70dc
+# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1 0x70e0
+# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0 0x70e4
+# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1 0x70e8
+# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0 0x70ec
+# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1 0x70f0
+# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0 0x70f4
+#define HDMI_ACR_STATUS_1 0x70f8
+#define AFMT_AUDIO_INFO0 0x70fc
+# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
+# define AFMT_AUDIO_INFO_CT(x) (((x) & 0xf) << 11)
+# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
+# define AFMT_AUDIO_INFO_CXT(x) (((x) & 0x1f) << 24)
+#define AFMT_AUDIO_INFO1 0x7100
+# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
+# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
+# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+# define AFMT_AUDIO_INFO_LFEBPL(x) (((x) & 3) << 16)
+#define AFMT_60958_0 0x7104
+# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
+# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
+# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
+# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
+# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
+# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
+# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+#define AFMT_60958_1 0x7108
+# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
+# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
+# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL 0x710c
+# define AFMT_AUDIO_CRC_EN (1 << 0)
+#define AFMT_RAMP_CONTROL0 0x7110
+# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_RAMP_DATA_SIGN (1 << 31)
+#define AFMT_RAMP_CONTROL1 0x7114
+# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2 0x7118
+# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3 0x711c
+# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_60958_2 0x7120
+# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
+# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
+# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
+#define AFMT_STATUS 0x7128
+# define AFMT_AUDIO_ENABLE (1 << 4)
+# define AFMT_AUDIO_HBR_ENABLE (1 << 8)
+# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL 0x712c
+# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
+# define AFMT_RESET_FIFO_WHEN_AUDIO_DIS (1 << 11) /* set to 1 */
+# define AFMT_AUDIO_TEST_EN (1 << 12)
+# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
+# define AFMT_60958_CS_UPDATE (1 << 26)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL 0x7130
+# define AFMT_GENERIC0_UPDATE (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0 0x7134
+# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - afmt regs */
+# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
+# define AFMT_MPEG_INFO_UPDATE (1 << 10)
+#define AFMT_GENERIC0_7 0x7138
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index a48ca53fcd6a..b01c2dd627b0 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1392,35 +1392,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
return 0;
}
-bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 srbm_status;
- u32 grbm_status;
- u32 grbm_status_se0, grbm_status_se1;
- struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
- int r;
-
- srbm_status = RREG32(SRBM_STATUS);
- grbm_status = RREG32(GRBM_STATUS);
- grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
- grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
- if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, ring);
- return false;
- }
- /* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- /* XXX deal with CP0,1,2 */
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
-}
-
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
@@ -1601,12 +1572,9 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
r = radeon_vm_manager_start(rdev);
if (r)
@@ -1661,10 +1629,6 @@ int cayman_init(struct radeon_device *rdev)
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -1776,7 +1740,6 @@ void cayman_fini(struct radeon_device *rdev)
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index fe33d35dae8c..fb44e7e49083 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -139,9 +139,9 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
}
tmp |= tile_flags;
- p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
+ p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
} else
- p->ib->ptr[idx] = (value & 0xffc00000) | tmp;
+ p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
return 0;
}
@@ -156,7 +156,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
volatile uint32_t *ib;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
c = radeon_get_ib_value(p, idx++) & 0x1F;
if (c > 16) {
@@ -660,7 +660,7 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
WREG32(RADEON_AIC_CNTL, tmp);
r100_pci_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
@@ -1180,6 +1180,10 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
+
+ /* at this point everything should be setup correctly to enable master */
+ pci_set_master(rdev->pdev);
+
radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
if (r) {
@@ -1271,7 +1275,7 @@ void r100_cs_dump_packet(struct radeon_cs_parser *p,
unsigned i;
unsigned idx;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx;
for (i = 0; i <= (pkt->count + 1); i++, idx++) {
DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@@ -1350,7 +1354,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
uint32_t header, h_idx, reg;
volatile uint32_t *ib;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
/* parse the wait until */
r = r100_cs_packet_parse(p, &waitreloc, p->idx);
@@ -1529,7 +1533,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
u32 tile_flags = 0;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
@@ -1885,7 +1889,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
volatile uint32_t *ib;
int r;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch (pkt->opcode) {
@@ -2004,6 +2008,8 @@ int r100_cs_parse(struct radeon_cs_parser *p)
int r;
track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (!track)
+ return -ENOMEM;
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
@@ -2155,79 +2161,18 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
-{
- lockup->last_cp_rptr = ring->rptr;
- lockup->last_jiffies = jiffies;
-}
-
-/**
- * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
- * @rdev: radeon device structure
- * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
- * @cp: radeon_cp structure holding CP information
- *
- * We don't need to initialize the lockup tracking information as we will either
- * have CP rptr to a different value of jiffies wrap around which will force
- * initialization of the lockup tracking informations.
- *
- * A possible false positivie is if we get call after while and last_cp_rptr ==
- * the current CP rptr, even if it's unlikely it might happen. To avoid this
- * if the elapsed time since last call is bigger than 2 second than we return
- * false and update the tracking information. Due to this the caller must call
- * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
- * the fencing code should be cautious about that.
- *
- * Caller should write to the ring to force CP to do something so we don't get
- * false positive when CP is just gived nothing to do.
- *
- **/
-bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
-{
- unsigned long cjiffies, elapsed;
-
- cjiffies = jiffies;
- if (!time_after(cjiffies, lockup->last_jiffies)) {
- /* likely a wrap around */
- lockup->last_cp_rptr = ring->rptr;
- lockup->last_jiffies = jiffies;
- return false;
- }
- if (ring->rptr != lockup->last_cp_rptr) {
- /* CP is still working no lockup */
- lockup->last_cp_rptr = ring->rptr;
- lockup->last_jiffies = jiffies;
- return false;
- }
- elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
- if (elapsed >= 10000) {
- dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
- return true;
- }
- /* give a chance to the GPU ... */
- return false;
-}
-
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rbbm_status;
- int r;
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r100.lockup, ring);
+ radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
void r100_bm_disable(struct radeon_device *rdev)
@@ -2296,7 +2241,6 @@ int r100_asic_reset(struct radeon_device *rdev)
if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
- rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
@@ -3742,7 +3686,7 @@ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
- struct radeon_ib *ib;
+ struct radeon_ib ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
@@ -3758,22 +3702,22 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
if (r) {
return r;
}
- ib->ptr[0] = PACKET0(scratch, 0);
- ib->ptr[1] = 0xDEADBEEF;
- ib->ptr[2] = PACKET2(0);
- ib->ptr[3] = PACKET2(0);
- ib->ptr[4] = PACKET2(0);
- ib->ptr[5] = PACKET2(0);
- ib->ptr[6] = PACKET2(0);
- ib->ptr[7] = PACKET2(0);
- ib->length_dw = 8;
- r = radeon_ib_schedule(rdev, ib);
+ ib.ptr[0] = PACKET0(scratch, 0);
+ ib.ptr[1] = 0xDEADBEEF;
+ ib.ptr[2] = PACKET2(0);
+ ib.ptr[3] = PACKET2(0);
+ ib.ptr[4] = PACKET2(0);
+ ib.ptr[5] = PACKET2(0);
+ ib.ptr[6] = PACKET2(0);
+ ib.ptr[7] = PACKET2(0);
+ ib.length_dw = 8;
+ r = radeon_ib_schedule(rdev, &ib);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
return r;
}
- r = radeon_fence_wait(ib->fence, false);
+ r = radeon_fence_wait(ib.fence, false);
if (r) {
return r;
}
@@ -3965,12 +3909,9 @@ static int r100_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index a59cc474d537..a26144d01207 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -154,7 +154,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
u32 tile_flags = 0;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch (reg) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index fa14383f9ca0..97722a33e513 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -377,28 +377,6 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
-bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 rbbm_status;
- int r;
-
- rbbm_status = RREG32(R_000E40_RBBM_STATUS);
- if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
- return false;
- }
- /* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- ring->rptr = RREG32(RADEON_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
-}
-
int r300_asic_reset(struct radeon_device *rdev)
{
struct r100_mc_save save;
@@ -449,7 +427,6 @@ int r300_asic_reset(struct radeon_device *rdev)
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
- rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
@@ -627,7 +604,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
int r;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
@@ -1169,7 +1146,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
unsigned idx;
int r;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch(pkt->opcode) {
@@ -1418,12 +1395,9 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index f3fcaacfea01..99137be7a300 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -279,12 +279,9 @@ static int r420_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index ebcc15b03c9f..b5cf8375cd25 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -207,12 +207,10 @@ static int r520_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c8187c4b6ae8..f388a1d73b63 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -713,6 +713,14 @@ void r600_hpd_init(struct radeon_device *rdev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ */
+ continue;
+ }
if (ASIC_IS_DCE3(rdev)) {
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
if (ASIC_IS_DCE32(rdev))
@@ -1223,7 +1231,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->vram_scratch.robj);
+ NULL, &rdev->vram_scratch.robj);
if (r) {
return r;
}
@@ -1350,31 +1358,17 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
u32 srbm_status;
u32 grbm_status;
u32 grbm_status2;
- struct r100_gpu_lockup *lockup;
- int r;
-
- if (rdev->family >= CHIP_RV770)
- lockup = &rdev->config.rv770.lockup;
- else
- lockup = &rdev->config.r600.lockup;
srbm_status = RREG32(R_000E50_SRBM_STATUS);
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
- r100_gpu_lockup_update(lockup, ring);
+ radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
int r600_asic_reset(struct radeon_device *rdev)
@@ -2377,20 +2371,15 @@ int r600_copy_blit(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
+ struct radeon_sa_bo *vb = NULL;
int r;
- mutex_lock(&rdev->r600_blit.mutex);
- rdev->r600_blit.vb_ib = NULL;
- r = r600_blit_prepare_copy(rdev, num_gpu_pages);
+ r = r600_blit_prepare_copy(rdev, num_gpu_pages, &vb);
if (r) {
- if (rdev->r600_blit.vb_ib)
- radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
- mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages);
- r600_blit_done_copy(rdev, fence);
- mutex_unlock(&rdev->r600_blit.mutex);
+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
+ r600_blit_done_copy(rdev, fence, vb);
return 0;
}
@@ -2494,12 +2483,9 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
@@ -2574,10 +2560,6 @@ int r600_init(struct radeon_device *rdev)
if (r600_debugfs_mc_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for mc !\n");
}
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -2675,7 +2657,6 @@ void r600_fini(struct radeon_device *rdev)
r600_vram_scratch_fini(rdev);
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
@@ -2704,7 +2685,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
- struct radeon_ib *ib;
+ struct radeon_ib ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
@@ -2722,18 +2703,18 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r;
}
- ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
- ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- ib->ptr[2] = 0xDEADBEEF;
- ib->length_dw = 3;
- r = radeon_ib_schedule(rdev, ib);
+ ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+ ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ ib.ptr[2] = 0xDEADBEEF;
+ ib.length_dw = 3;
+ r = radeon_ib_schedule(rdev, &ib);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
return r;
}
- r = radeon_fence_wait(ib->fence, false);
+ r = radeon_fence_wait(ib.fence, false);
if (r) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
return r;
@@ -2745,7 +2726,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
} else {
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
@@ -2788,7 +2769,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
- &rdev->ih.ring_obj);
+ NULL, &rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
return r;
@@ -2968,6 +2949,15 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, tmp);
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
+ } else {
+ tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
}
} else {
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
@@ -2978,6 +2968,10 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+ tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
}
}
@@ -3047,6 +3041,9 @@ int r600_irq_init(struct radeon_device *rdev)
else
r600_disable_interrupt_state(rdev);
+ /* at this point everything should be setup correctly to enable master */
+ pci_set_master(rdev->pdev);
+
/* enable irqs */
r600_enable_interrupts(rdev);
@@ -3071,7 +3068,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
- u32 hdmi1, hdmi2;
+ u32 hdmi0, hdmi1;
u32 d1grph = 0, d2grph = 0;
if (!rdev->irq.installed) {
@@ -3086,9 +3083,7 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
- hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
if (ASIC_IS_DCE3(rdev)) {
- hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -3096,12 +3091,18 @@ int r600_irq_set(struct radeon_device *rdev)
if (ASIC_IS_DCE32(rdev)) {
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ } else {
+ hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
} else {
- hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
@@ -3143,13 +3144,13 @@ int r600_irq_set(struct radeon_device *rdev)
DRM_DEBUG("r600_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
- if (rdev->irq.hdmi[0]) {
- DRM_DEBUG("r600_irq_set: hdmi 1\n");
- hdmi1 |= R600_HDMI_INT_EN;
+ if (rdev->irq.afmt[0]) {
+ DRM_DEBUG("r600_irq_set: hdmi 0\n");
+ hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
}
- if (rdev->irq.hdmi[1]) {
- DRM_DEBUG("r600_irq_set: hdmi 2\n");
- hdmi2 |= R600_HDMI_INT_EN;
+ if (rdev->irq.afmt[1]) {
+ DRM_DEBUG("r600_irq_set: hdmi 0\n");
+ hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.gui_idle) {
DRM_DEBUG("gui idle\n");
@@ -3161,9 +3162,7 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
- WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
- WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -3171,12 +3170,18 @@ int r600_irq_set(struct radeon_device *rdev)
if (ASIC_IS_DCE32(rdev)) {
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
+ } else {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+ WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
}
} else {
- WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+ WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
}
return 0;
@@ -3190,10 +3195,19 @@ static void r600_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+ if (ASIC_IS_DCE32(rdev)) {
+ rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
+ rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
+ } else {
+ rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+ rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
+ }
} else {
rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
+ rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+ rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
}
rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
@@ -3259,17 +3273,32 @@ static void r600_irq_ack(struct radeon_device *rdev)
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
- }
- if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
- WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
- }
- if (ASIC_IS_DCE3(rdev)) {
- if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
- WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+ }
+ if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
}
} else {
- if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
- WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
+ tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
+ tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+ WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
+ } else {
+ tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
+ tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+ WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
+ }
}
}
}
@@ -3345,6 +3374,7 @@ int r600_irq_process(struct radeon_device *rdev)
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
+ bool queue_hdmi = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -3480,9 +3510,26 @@ restart_ih:
break;
}
break;
- case 21: /* HDMI */
- DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
- r600_audio_schedule_polling(rdev);
+ case 21: /* hdmi */
+ switch (src_data) {
+ case 4:
+ if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+ }
+ break;
+ case 5:
+ if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
@@ -3514,6 +3561,8 @@ restart_ih:
goto restart_ih;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
+ if (queue_hdmi)
+ schedule_work(&rdev->audio_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index ba66f3093d46..7c4fa77f018f 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -29,7 +29,28 @@
#include "radeon_asic.h"
#include "atom.h"
-#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
+/*
+ * check if enc_priv stores radeon_encoder_atom_dig
+ */
+static bool radeon_dig_encoder(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return true;
+ }
+ return false;
+}
/*
* check if the chipset is supported
@@ -42,118 +63,85 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev)
|| rdev->family == CHIP_RS740;
}
-/*
- * current number of channels
- */
-int r600_audio_channels(struct radeon_device *rdev)
+struct r600_audio r600_audio_status(struct radeon_device *rdev)
{
- return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
-}
+ struct r600_audio status;
+ uint32_t value;
-/*
- * current bits per sample
- */
-int r600_audio_bits_per_sample(struct radeon_device *rdev)
-{
- uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
- switch (value) {
- case 0x0: return 8;
- case 0x1: return 16;
- case 0x2: return 20;
- case 0x3: return 24;
- case 0x4: return 32;
- }
+ value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
- dev_err(rdev->dev, "Unknown bits per sample 0x%x using 16 instead\n",
- (int)value);
+ /* number of channels */
+ status.channels = (value & 0x7) + 1;
- return 16;
-}
-
-/*
- * current sampling rate in HZ
- */
-int r600_audio_rate(struct radeon_device *rdev)
-{
- uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
- uint32_t result;
+ /* bits per sample */
+ switch ((value & 0xF0) >> 4) {
+ case 0x0:
+ status.bits_per_sample = 8;
+ break;
+ case 0x1:
+ status.bits_per_sample = 16;
+ break;
+ case 0x2:
+ status.bits_per_sample = 20;
+ break;
+ case 0x3:
+ status.bits_per_sample = 24;
+ break;
+ case 0x4:
+ status.bits_per_sample = 32;
+ break;
+ default:
+ dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
+ (int)value);
+ status.bits_per_sample = 16;
+ }
+ /* current sampling rate in HZ */
if (value & 0x4000)
- result = 44100;
+ status.rate = 44100;
else
- result = 48000;
+ status.rate = 48000;
+ status.rate *= ((value >> 11) & 0x7) + 1;
+ status.rate /= ((value >> 8) & 0x7) + 1;
- result *= ((value >> 11) & 0x7) + 1;
- result /= ((value >> 8) & 0x7) + 1;
+ value = RREG32(R600_AUDIO_STATUS_BITS);
- return result;
-}
+ /* iec 60958 status bits */
+ status.status_bits = value & 0xff;
-/*
- * iec 60958 status bits
- */
-uint8_t r600_audio_status_bits(struct radeon_device *rdev)
-{
- return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
-}
+ /* iec 60958 category code */
+ status.category_code = (value >> 8) & 0xff;
-/*
- * iec 60958 category code
- */
-uint8_t r600_audio_category_code(struct radeon_device *rdev)
-{
- return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
-}
-
-/*
- * schedule next audio update event
- */
-void r600_audio_schedule_polling(struct radeon_device *rdev)
-{
- mod_timer(&rdev->audio_timer,
- jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+ return status;
}
/*
* update all hdmi interfaces with current audio parameters
*/
-static void r600_audio_update_hdmi(unsigned long param)
+void r600_audio_update_hdmi(struct work_struct *work)
{
- struct radeon_device *rdev = (struct radeon_device *)param;
+ struct radeon_device *rdev = container_of(work, struct radeon_device,
+ audio_work);
struct drm_device *dev = rdev->ddev;
-
- int channels = r600_audio_channels(rdev);
- int rate = r600_audio_rate(rdev);
- int bps = r600_audio_bits_per_sample(rdev);
- uint8_t status_bits = r600_audio_status_bits(rdev);
- uint8_t category_code = r600_audio_category_code(rdev);
-
+ struct r600_audio audio_status = r600_audio_status(rdev);
struct drm_encoder *encoder;
- int changes = 0, still_going = 0;
-
- changes |= channels != rdev->audio_channels;
- changes |= rate != rdev->audio_rate;
- changes |= bps != rdev->audio_bits_per_sample;
- changes |= status_bits != rdev->audio_status_bits;
- changes |= category_code != rdev->audio_category_code;
-
- if (changes) {
- rdev->audio_channels = channels;
- rdev->audio_rate = rate;
- rdev->audio_bits_per_sample = bps;
- rdev->audio_status_bits = status_bits;
- rdev->audio_category_code = category_code;
+ bool changed = false;
+
+ if (rdev->audio_status.channels != audio_status.channels ||
+ rdev->audio_status.rate != audio_status.rate ||
+ rdev->audio_status.bits_per_sample != audio_status.bits_per_sample ||
+ rdev->audio_status.status_bits != audio_status.status_bits ||
+ rdev->audio_status.category_code != audio_status.category_code) {
+ rdev->audio_status = audio_status;
+ changed = true;
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- still_going |= radeon_encoder->audio_polling_active;
- if (changes || r600_hdmi_buffer_status_changed(encoder))
+ if (!radeon_dig_encoder(encoder))
+ continue;
+ if (changed || r600_hdmi_buffer_status_changed(encoder))
r600_hdmi_update_audio_settings(encoder);
}
-
- if (still_going)
- r600_audio_schedule_polling(rdev);
}
/*
@@ -177,7 +165,7 @@ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
}
/*
- * initialize the audio vars and register the update timer
+ * initialize the audio vars
*/
int r600_audio_init(struct radeon_device *rdev)
{
@@ -186,51 +174,16 @@ int r600_audio_init(struct radeon_device *rdev)
r600_audio_engine_enable(rdev, true);
- rdev->audio_channels = -1;
- rdev->audio_rate = -1;
- rdev->audio_bits_per_sample = -1;
- rdev->audio_status_bits = 0;
- rdev->audio_category_code = 0;
-
- setup_timer(
- &rdev->audio_timer,
- r600_audio_update_hdmi,
- (unsigned long)rdev);
+ rdev->audio_status.channels = -1;
+ rdev->audio_status.rate = -1;
+ rdev->audio_status.bits_per_sample = -1;
+ rdev->audio_status.status_bits = 0;
+ rdev->audio_status.category_code = 0;
return 0;
}
/*
- * enable the polling timer, to check for status changes
- */
-void r600_audio_enable_polling(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-
- DRM_DEBUG("r600_audio_enable_polling: %d\n",
- radeon_encoder->audio_polling_active);
- if (radeon_encoder->audio_polling_active)
- return;
-
- radeon_encoder->audio_polling_active = 1;
- if (rdev->audio_enabled)
- mod_timer(&rdev->audio_timer, jiffies + 1);
-}
-
-/*
- * disable the polling timer, so we get no more status updates
- */
-void r600_audio_disable_polling(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DRM_DEBUG("r600_audio_disable_polling: %d\n",
- radeon_encoder->audio_polling_active);
- radeon_encoder->audio_polling_active = 0;
-}
-
-/*
* atach the audio codec to the clock source of the encoder
*/
void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
@@ -297,7 +250,5 @@ void r600_audio_fini(struct radeon_device *rdev)
if (!rdev->audio_enabled)
return;
- del_timer(&rdev->audio_timer);
-
r600_audio_engine_enable(rdev, false);
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index db38f587f27a..03b6e0d3d503 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -513,7 +513,6 @@ int r600_blit_init(struct radeon_device *rdev)
rdev->r600_blit.primitives.set_default_state = set_default_state;
rdev->r600_blit.ring_size_common = 40; /* shaders + def state */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
@@ -528,7 +527,6 @@ int r600_blit_init(struct radeon_device *rdev)
if (rdev->r600_blit.shader_obj)
goto done;
- mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
if (rdev->family >= CHIP_RV770)
@@ -554,7 +552,7 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size = ALIGN(obj_size, 256);
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_obj);
+ NULL, &rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
return r;
@@ -621,27 +619,6 @@ void r600_blit_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
-static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
-{
- int r;
- r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
- &rdev->r600_blit.vb_ib, size);
- if (r) {
- DRM_ERROR("failed to get IB for vertex buffer\n");
- return r;
- }
-
- rdev->r600_blit.vb_total = size;
- rdev->r600_blit.vb_used = 0;
- return 0;
-}
-
-static void r600_vb_ib_put(struct radeon_device *rdev)
-{
- radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
- radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
-}
-
static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int *width, int *height, int max_dim)
{
@@ -688,7 +665,8 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
}
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+ struct radeon_sa_bo **vb)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
@@ -705,46 +683,54 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
}
/* 48 bytes for vertex per loop */
- r = r600_vb_ib_get(rdev, (num_loops*48)+256);
- if (r)
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
+ (num_loops*48)+256, 256, true);
+ if (r) {
return r;
+ }
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
ring_size += rdev->r600_blit.ring_size_common;
r = radeon_ring_lock(rdev, ring, ring_size);
- if (r)
+ if (r) {
+ radeon_sa_bo_free(rdev, vb, NULL);
return r;
+ }
rdev->r600_blit.primitives.set_default_state(rdev);
rdev->r600_blit.primitives.set_shaders(rdev);
return 0;
}
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
+ struct radeon_sa_bo *vb)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- if (rdev->r600_blit.vb_ib)
- r600_vb_ib_put(rdev);
-
- if (fence)
- r = radeon_fence_emit(rdev, fence);
+ r = radeon_fence_emit(rdev, fence);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return;
+ }
- radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_sa_bo_free(rdev, &vb, fence);
}
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages)
+ unsigned num_gpu_pages,
+ struct radeon_sa_bo *vb)
{
u64 vb_gpu_addr;
- u32 *vb;
+ u32 *vb_cpu_addr;
- DRM_DEBUG("emitting copy %16llx %16llx %d %d\n",
- src_gpu_addr, dst_gpu_addr,
- num_gpu_pages, rdev->r600_blit.vb_used);
- vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
+ DRM_DEBUG("emitting copy %16llx %16llx %d\n",
+ src_gpu_addr, dst_gpu_addr, num_gpu_pages);
+ vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
+ vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
while (num_gpu_pages) {
int w, h;
@@ -756,39 +742,34 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
- if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
- WARN_ON(1);
- }
-
- vb[0] = 0;
- vb[1] = 0;
- vb[2] = 0;
- vb[3] = 0;
+ vb_cpu_addr[0] = 0;
+ vb_cpu_addr[1] = 0;
+ vb_cpu_addr[2] = 0;
+ vb_cpu_addr[3] = 0;
- vb[4] = 0;
- vb[5] = i2f(h);
- vb[6] = 0;
- vb[7] = i2f(h);
+ vb_cpu_addr[4] = 0;
+ vb_cpu_addr[5] = i2f(h);
+ vb_cpu_addr[6] = 0;
+ vb_cpu_addr[7] = i2f(h);
- vb[8] = i2f(w);
- vb[9] = i2f(h);
- vb[10] = i2f(w);
- vb[11] = i2f(h);
+ vb_cpu_addr[8] = i2f(w);
+ vb_cpu_addr[9] = i2f(h);
+ vb_cpu_addr[10] = i2f(w);
+ vb_cpu_addr[11] = i2f(h);
rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
w, h, w, src_gpu_addr, size_in_bytes);
rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
w, h, dst_gpu_addr);
rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
- vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
rdev->r600_blit.primitives.draw_auto(rdev);
rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
size_in_bytes, dst_gpu_addr);
- vb += 12;
- rdev->r600_blit.vb_used += 4*12;
+ vb_cpu_addr += 12;
+ vb_gpu_addr += 4*12;
src_gpu_addr += size_in_bytes;
dst_gpu_addr += size_in_bytes;
num_gpu_pages -= pages_per_loop;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index b8e12af304a9..0133f5f09bd6 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -345,7 +345,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
u32 height, height_align, pitch, pitch_align, depth_align;
u64 base_offset, base_align;
struct array_mode_checker array_check;
- volatile u32 *ib = p->ib->ptr;
+ volatile u32 *ib = p->ib.ptr;
unsigned array_mode;
u32 format;
@@ -471,7 +471,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
u64 base_offset, base_align;
struct array_mode_checker array_check;
int array_mode;
- volatile u32 *ib = p->ib->ptr;
+ volatile u32 *ib = p->ib.ptr;
if (track->db_bo == NULL) {
@@ -961,7 +961,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
uint32_t header, h_idx, reg, wait_reg_mem_info;
volatile uint32_t *ib;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
/* parse the WAIT_REG_MEM */
r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
@@ -1110,7 +1110,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
m = 1 << ((reg >> 2) & 31);
if (!(r600_reg_safe_bm[i] & m))
return 0;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
switch (reg) {
/* force following reg to 0 in an attempt to disable out buffer
* which will need us to better understand how it works to perform
@@ -1714,7 +1714,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u32 idx_value;
track = (struct r600_cs_track *)p->track;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
idx_value = radeon_get_ib_value(p, idx);
@@ -2249,8 +2249,8 @@ int r600_cs_parse(struct radeon_cs_parser *p)
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
- for (r = 0; r < p->ib->length_dw; r++) {
- printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
+ for (r = 0; r < p->ib.length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
mdelay(1);
}
#endif
@@ -2298,7 +2298,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
{
struct radeon_cs_parser parser;
struct radeon_cs_chunk *ib_chunk;
- struct radeon_ib fake_ib;
struct r600_cs_track *track;
int r;
@@ -2314,9 +2313,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
parser.dev = &dev->pdev->dev;
parser.rdev = NULL;
parser.family = family;
- parser.ib = &fake_ib;
parser.track = track;
- fake_ib.ptr = ib;
+ parser.ib.ptr = ib;
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
@@ -2333,8 +2331,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
* input memory (cached) and write to the IB (which can be
* uncached). */
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
- parser.ib->length_dw = ib_chunk->length_dw;
- *l = parser.ib->length_dw;
+ parser.ib.length_dw = ib_chunk->length_dw;
+ *l = parser.ib.length_dw;
r = r600_cs_parse(&parser);
if (r) {
DRM_ERROR("Invalid command stream !\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 0b5920671450..226379e00ac1 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -27,6 +27,7 @@
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_asic.h"
+#include "r600d.h"
#include "atom.h"
/*
@@ -52,19 +53,7 @@ enum r600_hdmi_iec_status_bits {
AUDIO_STATUS_LEVEL = 0x80
};
-struct {
- uint32_t Clock;
-
- int N_32kHz;
- int CTS_32kHz;
-
- int N_44_1kHz;
- int CTS_44_1kHz;
-
- int N_48kHz;
- int CTS_48kHz;
-
-} r600_hdmi_ACR[] = {
+struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
/* 32kHz 44.1kHz 48kHz */
/* Clock N CTS N CTS N CTS */
{ 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
@@ -83,7 +72,7 @@ struct {
/*
* calculate CTS value if it's not found in the table
*/
-static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
+static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
{
if (*CTS == 0)
*CTS = clock * N / (128 * freq) * 1000;
@@ -91,6 +80,24 @@ static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
N, *CTS, freq);
}
+struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
+{
+ struct radeon_hdmi_acr res;
+ u8 i;
+
+ for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
+ r600_hdmi_predefined_acr[i].clock != 0; i++)
+ ;
+ res = r600_hdmi_predefined_acr[i];
+
+ /* In case some CTS are missing */
+ r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
+ r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
+ r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
+
+ return res;
+}
+
/*
* update the N and CTS parameters for a given pixel clock rate
*/
@@ -98,30 +105,19 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
- int CTS;
- int N;
- int i;
+ struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
+ WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
- for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
-
- CTS = r600_hdmi_ACR[i].CTS_32kHz;
- N = r600_hdmi_ACR[i].N_32kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
- WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
- WREG32(offset+R600_HDMI_32kHz_N, N);
-
- CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
- N = r600_hdmi_ACR[i].N_44_1kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
- WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
- WREG32(offset+R600_HDMI_44_1kHz_N, N);
-
- CTS = r600_hdmi_ACR[i].CTS_48kHz;
- N = r600_hdmi_ACR[i].N_48kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
- WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
- WREG32(offset+R600_HDMI_48kHz_N, N);
+ WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
+ WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
+
+ WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
+ WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
}
/*
@@ -165,7 +161,9 @@ static void r600_hdmi_videoinfoframe(
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
uint8_t frame[14];
@@ -204,13 +202,13 @@ static void r600_hdmi_videoinfoframe(
* workaround this issue. */
frame[0x0] += 2;
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
+ WREG32(HDMI0_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1,
+ WREG32(HDMI0_AVI_INFO1 + offset,
frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2,
+ WREG32(HDMI0_AVI_INFO2 + offset,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3,
+ WREG32(HDMI0_AVI_INFO3 + offset,
frame[0xC] | (frame[0xD] << 8));
}
@@ -231,7 +229,9 @@ static void r600_hdmi_audioinfoframe(
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
uint8_t frame[11];
@@ -249,22 +249,24 @@ static void r600_hdmi_audioinfoframe(
r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
- WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0,
+ WREG32(HDMI0_AUDIO_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1,
+ WREG32(HDMI0_AUDIO_INFO1 + offset,
frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
}
/*
* test if audio buffer is filled enough to start playing
*/
-static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
+static bool r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
- return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0;
+ return (RREG32(HDMI0_STATUS + offset) & 0x10) != 0;
}
/*
@@ -273,14 +275,15 @@ static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
int status, result;
- if (!radeon_encoder->hdmi_offset)
+ if (!dig->afmt || !dig->afmt->enabled)
return 0;
status = r600_hdmi_is_audio_buffer_filled(encoder);
- result = radeon_encoder->hdmi_buffer_status != status;
- radeon_encoder->hdmi_buffer_status = status;
+ result = dig->afmt->last_buffer_filled_status != status;
+ dig->afmt->last_buffer_filled_status = status;
return result;
}
@@ -288,26 +291,23 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
/*
* write the audio workaround status to the hardware
*/
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- uint32_t offset = radeon_encoder->hdmi_offset;
-
- if (!offset)
- return;
-
- if (!radeon_encoder->hdmi_audio_workaround ||
- r600_hdmi_is_audio_buffer_filled(encoder)) {
-
- /* disable audio workaround */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
-
- } else {
- /* enable audio workaround */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
- }
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+ bool hdmi_audio_workaround = false; /* FIXME */
+ u32 value;
+
+ if (!hdmi_audio_workaround ||
+ r600_hdmi_is_audio_buffer_filled(encoder))
+ value = 0; /* disable workaround */
+ else
+ value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
+ WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ value, ~HDMI0_AUDIO_TEST_EN);
}
@@ -318,39 +318,75 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
if (ASIC_IS_DCE5(rdev))
return;
- if (!offset)
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
return;
+ offset = dig->afmt->offset;
r600_audio_set_clock(encoder, mode->clock);
- WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000);
- WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0);
- WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND); /* send null packets when required */
- r600_hdmi_update_ACR(encoder, mode->clock);
+ WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+ if (ASIC_IS_DCE32(rdev)) {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ } else {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
+ HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ }
+
+ WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
+ HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI0_ACR_SOURCE); /* select SW CTS value */
+
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND | /* send null packets when required */
+ HDMI0_GC_SEND | /* send general control packets */
+ HDMI0_GC_CONT); /* send general control packets every frame */
- WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13);
+ /* TODO: HDMI0_AUDIO_INFO_UPDATE */
+ WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
- WREG32(offset+R600_HDMI_VERSION, 0x202);
+ WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+ WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ r600_hdmi_update_ACR(encoder, mode->clock);
+
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001);
+ WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
r600_hdmi_audio_workaround(encoder);
-
- /* audio packets per line, does anyone know how to calc this ? */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
}
/*
@@ -360,145 +396,82 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
-
- int channels = r600_audio_channels(rdev);
- int rate = r600_audio_rate(rdev);
- int bps = r600_audio_bits_per_sample(rdev);
- uint8_t status_bits = r600_audio_status_bits(rdev);
- uint8_t category_code = r600_audio_category_code(rdev);
-
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct r600_audio audio = r600_audio_status(rdev);
+ uint32_t offset;
uint32_t iec;
- if (!offset)
+ if (!dig->afmt || !dig->afmt->enabled)
return;
+ offset = dig->afmt->offset;
DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
- channels, rate, bps);
+ audio.channels, audio.rate, audio.bits_per_sample);
DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
- (int)status_bits, (int)category_code);
+ (int)audio.status_bits, (int)audio.category_code);
iec = 0;
- if (status_bits & AUDIO_STATUS_PROFESSIONAL)
+ if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
iec |= 1 << 0;
- if (status_bits & AUDIO_STATUS_NONAUDIO)
+ if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
iec |= 1 << 1;
- if (status_bits & AUDIO_STATUS_COPYRIGHT)
+ if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
iec |= 1 << 2;
- if (status_bits & AUDIO_STATUS_EMPHASIS)
+ if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
iec |= 1 << 3;
- iec |= category_code << 8;
-
- switch (rate) {
- case 32000: iec |= 0x3 << 24; break;
- case 44100: iec |= 0x0 << 24; break;
- case 88200: iec |= 0x8 << 24; break;
- case 176400: iec |= 0xc << 24; break;
- case 48000: iec |= 0x2 << 24; break;
- case 96000: iec |= 0xa << 24; break;
- case 192000: iec |= 0xe << 24; break;
+ iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
+
+ switch (audio.rate) {
+ case 32000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
+ break;
+ case 44100:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
+ break;
+ case 48000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
+ break;
+ case 88200:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
+ break;
+ case 96000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
+ break;
+ case 176400:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
+ break;
+ case 192000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
+ break;
}
- WREG32(offset+R600_HDMI_IEC60958_1, iec);
+ WREG32(HDMI0_60958_0 + offset, iec);
iec = 0;
- switch (bps) {
- case 16: iec |= 0x2; break;
- case 20: iec |= 0x3; break;
- case 24: iec |= 0xb; break;
+ switch (audio.bits_per_sample) {
+ case 16:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
+ break;
+ case 20:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
+ break;
+ case 24:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
+ break;
}
- if (status_bits & AUDIO_STATUS_V)
+ if (audio.status_bits & AUDIO_STATUS_V)
iec |= 0x5 << 16;
+ WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
- WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f);
-
- /* 0x021 or 0x031 sets the audio frame length */
- WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
- r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
+ r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0,
+ 0);
r600_hdmi_audio_workaround(encoder);
}
-static int r600_hdmi_find_free_block(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder;
- struct radeon_encoder *radeon_encoder;
- bool free_blocks[3] = { true, true, true };
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- radeon_encoder = to_radeon_encoder(encoder);
- switch (radeon_encoder->hdmi_offset) {
- case R600_HDMI_BLOCK1:
- free_blocks[0] = false;
- break;
- case R600_HDMI_BLOCK2:
- free_blocks[1] = false;
- break;
- case R600_HDMI_BLOCK3:
- free_blocks[2] = false;
- break;
- }
- }
-
- if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 ||
- rdev->family == CHIP_RS740) {
- return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
- } else if (rdev->family >= CHIP_R600) {
- if (free_blocks[0])
- return R600_HDMI_BLOCK1;
- else if (free_blocks[1])
- return R600_HDMI_BLOCK2;
- }
- return 0;
-}
-
-static void r600_hdmi_assign_block(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
- u16 eg_offsets[] = {
- EVERGREEN_CRTC0_REGISTER_OFFSET,
- EVERGREEN_CRTC1_REGISTER_OFFSET,
- EVERGREEN_CRTC2_REGISTER_OFFSET,
- EVERGREEN_CRTC3_REGISTER_OFFSET,
- EVERGREEN_CRTC4_REGISTER_OFFSET,
- EVERGREEN_CRTC5_REGISTER_OFFSET,
- };
-
- if (!dig) {
- dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
- return;
- }
-
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- if (dig->dig_encoder >= ARRAY_SIZE(eg_offsets)) {
- dev_err(rdev->dev, "Enabling HDMI on unknown dig\n");
- return;
- }
- radeon_encoder->hdmi_offset = EVERGREEN_HDMI_BASE +
- eg_offsets[dig->dig_encoder];
- radeon_encoder->hdmi_config_offset = radeon_encoder->hdmi_offset
- + EVERGREEN_HDMI_CONFIG_OFFSET;
- } else if (ASIC_IS_DCE3(rdev)) {
- radeon_encoder->hdmi_offset = dig->dig_encoder ?
- R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
- if (ASIC_IS_DCE32(rdev))
- radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
- R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
- } else if (rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 ||
- rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
- radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
- }
-}
-
/*
* enable the HDMI engine
*/
@@ -507,64 +480,57 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
+ u32 hdmi;
if (ASIC_IS_DCE5(rdev))
return;
- if (!radeon_encoder->hdmi_offset) {
- r600_hdmi_assign_block(encoder);
- if (!radeon_encoder->hdmi_offset) {
- dev_warn(rdev->dev, "Could not find HDMI block for "
- "0x%x encoder\n", radeon_encoder->encoder_id);
- return;
- }
- }
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
- offset = radeon_encoder->hdmi_offset;
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0x1, ~0x1);
- } else if (ASIC_IS_DCE32(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
- } else if (ASIC_IS_DCE3(rdev)) {
- /* TODO */
- } else if (rdev->family >= CHIP_R600) {
+ /* Older chipsets require setting HDMI and routing manually */
+ if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
~AVIVO_TMDSA_CNTL_HDMI_EN);
- WREG32(offset + R600_HDMI_ENABLE, 0x101);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
~AVIVO_LVTMA_CNTL_HDMI_EN);
- WREG32(offset + R600_HDMI_ENABLE, 0x105);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
break;
default:
- dev_err(rdev->dev, "Unknown HDMI output type\n");
+ dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+ radeon_encoder->encoder_id);
break;
}
+ WREG32(HDMI0_CONTROL + offset, hdmi);
}
- if (rdev->irq.installed
- && rdev->family != CHIP_RS600
- && rdev->family != CHIP_RS690
- && rdev->family != CHIP_RS740
- && !ASIC_IS_DCE4(rdev)) {
+ if (rdev->irq.installed) {
/* if irq is available use it */
- rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
+ rdev->irq.afmt[dig->afmt->id] = true;
radeon_irq_set(rdev);
-
- r600_audio_disable_polling(encoder);
- } else {
- /* if not fallback to polling */
- r600_audio_enable_polling(encoder);
}
+ dig->afmt->enabled = true;
+
DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+ offset, radeon_encoder->encoder_id);
}
/*
@@ -575,51 +541,51 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
if (ASIC_IS_DCE5(rdev))
return;
- offset = radeon_encoder->hdmi_offset;
- if (!offset) {
- dev_err(rdev->dev, "Disabling not enabled HDMI\n");
+ /* Called for ATOM_ENCODER_MODE_HDMI only */
+ if (!dig || !dig->afmt) {
+ WARN_ON(1);
return;
}
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- offset, radeon_encoder->encoder_id);
+ offset, radeon_encoder->encoder_id);
/* disable irq */
- rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false;
+ rdev->irq.afmt[dig->afmt->id] = false;
radeon_irq_set(rdev);
- /* disable polling */
- r600_audio_disable_polling(encoder);
-
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0, ~0x1);
- } else if (ASIC_IS_DCE32(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
- } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ /* Older chipsets not handled by AtomBIOS */
+ if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0,
~AVIVO_TMDSA_CNTL_HDMI_EN);
- WREG32(offset + R600_HDMI_ENABLE, 0);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, 0,
~AVIVO_LVTMA_CNTL_HDMI_EN);
- WREG32(offset + R600_HDMI_ENABLE, 0);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
break;
default:
- dev_err(rdev->dev, "Unknown HDMI output type\n");
+ dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+ radeon_encoder->encoder_id);
break;
}
+ WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
}
- radeon_encoder->hdmi_offset = 0;
- radeon_encoder->hdmi_config_offset = 0;
+ dig->afmt->enabled = false;
}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index f869897c7456..2b960cb5c18a 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -156,45 +156,10 @@
#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
#define R600_AUDIO_STATUS_BITS 0x73d8
-/* HDMI base register addresses */
-#define R600_HDMI_BLOCK1 0x7400
-#define R600_HDMI_BLOCK2 0x7700
-#define R600_HDMI_BLOCK3 0x7800
-
-/* HDMI registers */
-#define R600_HDMI_ENABLE 0x00
-#define R600_HDMI_STATUS 0x04
-# define R600_HDMI_INT_PENDING (1 << 29)
-#define R600_HDMI_CNTL 0x08
-# define R600_HDMI_INT_EN (1 << 28)
-# define R600_HDMI_INT_ACK (1 << 29)
-#define R600_HDMI_UNKNOWN_0 0x0C
-#define R600_HDMI_AUDIOCNTL 0x10
-#define R600_HDMI_VIDEOCNTL 0x14
-#define R600_HDMI_VERSION 0x18
-#define R600_HDMI_UNKNOWN_1 0x28
-#define R600_HDMI_VIDEOINFOFRAME_0 0x54
-#define R600_HDMI_VIDEOINFOFRAME_1 0x58
-#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
-#define R600_HDMI_VIDEOINFOFRAME_3 0x60
-#define R600_HDMI_32kHz_CTS 0xac
-#define R600_HDMI_32kHz_N 0xb0
-#define R600_HDMI_44_1kHz_CTS 0xb4
-#define R600_HDMI_44_1kHz_N 0xb8
-#define R600_HDMI_48kHz_CTS 0xbc
-#define R600_HDMI_48kHz_N 0xc0
-#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
-#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
-#define R600_HDMI_IEC60958_1 0xd4
-#define R600_HDMI_IEC60958_2 0xd8
-#define R600_HDMI_UNKNOWN_2 0xdc
-#define R600_HDMI_AUDIO_DEBUG_0 0xe0
-#define R600_HDMI_AUDIO_DEBUG_1 0xe4
-#define R600_HDMI_AUDIO_DEBUG_2 0xe8
-#define R600_HDMI_AUDIO_DEBUG_3 0xec
-
-/* HDMI additional config base register addresses */
-#define R600_HDMI_CONFIG1 0x7600
-#define R600_HDMI_CONFIG2 0x7a00
+#define DCE2_HDMI_OFFSET0 (0x7400 - 0x7400)
+#define DCE2_HDMI_OFFSET1 (0x7700 - 0x7400)
+/* DCE3.2 second instance starts at 0x7800 */
+#define DCE3_HDMI_OFFSET0 (0x7400 - 0x7400)
+#define DCE3_HDMI_OFFSET1 (0x7800 - 0x7400)
#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 59f9c993cc31..15bd3b216243 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -824,6 +824,239 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
+/* Audio clocks */
+#define DCCG_AUDIO_DTO0_PHASE 0x0514
+#define DCCG_AUDIO_DTO0_MODULE 0x0518
+#define DCCG_AUDIO_DTO0_LOAD 0x051c
+# define DTO_LOAD (1 << 31)
+#define DCCG_AUDIO_DTO0_CNTL 0x0520
+
+#define DCCG_AUDIO_DTO1_PHASE 0x0524
+#define DCCG_AUDIO_DTO1_MODULE 0x0528
+#define DCCG_AUDIO_DTO1_LOAD 0x052c
+#define DCCG_AUDIO_DTO1_CNTL 0x0530
+
+#define DCCG_AUDIO_DTO_SELECT 0x0534
+
+/* digital blocks */
+#define TMDSA_CNTL 0x7880
+# define TMDSA_HDMI_EN (1 << 2)
+#define LVTMA_CNTL 0x7a80
+# define LVTMA_HDMI_EN (1 << 2)
+#define DDIA_CNTL 0x7200
+# define DDIA_HDMI_EN (1 << 2)
+#define DIG0_CNTL 0x75a0
+# define DIG_MODE(x) (((x) & 7) << 8)
+# define DIG_MODE_DP 0
+# define DIG_MODE_LVDS 1
+# define DIG_MODE_TMDS_DVI 2
+# define DIG_MODE_TMDS_HDMI 3
+# define DIG_MODE_SDVO 4
+#define DIG1_CNTL 0x79a0
+
+/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
+ * instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
+ * different due to the new DIG blocks, but also have 2 instances.
+ * DCE 3.0 HDMI blocks are part of each DIG encoder.
+ */
+
+/* rs6xx/rs740/r6xx/dce3 */
+#define HDMI0_CONTROL 0x7400
+/* rs6xx/rs740/r6xx */
+# define HDMI0_ENABLE (1 << 0)
+# define HDMI0_STREAM(x) (((x) & 3) << 2)
+# define HDMI0_STREAM_TMDSA 0
+# define HDMI0_STREAM_LVTMA 1
+# define HDMI0_STREAM_DVOA 2
+# define HDMI0_STREAM_DDIA 3
+/* rs6xx/r6xx/dce3 */
+# define HDMI0_ERROR_ACK (1 << 8)
+# define HDMI0_ERROR_MASK (1 << 9)
+#define HDMI0_STATUS 0x7404
+# define HDMI0_ACTIVE_AVMUTE (1 << 0)
+# define HDMI0_AUDIO_ENABLE (1 << 4)
+# define HDMI0_AZ_FORMAT_WTRIG (1 << 28)
+# define HDMI0_AZ_FORMAT_WTRIG_INT (1 << 29)
+#define HDMI0_AUDIO_PACKET_CONTROL 0x7408
+# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0)
+# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8)
+# define HDMI0_AUDIO_TEST_EN (1 << 12)
+# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24)
+# define HDMI0_60958_CS_UPDATE (1 << 26)
+# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29)
+#define HDMI0_AUDIO_CRC_CONTROL 0x740c
+# define HDMI0_AUDIO_CRC_EN (1 << 0)
+#define HDMI0_VBI_PACKET_CONTROL 0x7410
+# define HDMI0_NULL_SEND (1 << 0)
+# define HDMI0_GC_SEND (1 << 4)
+# define HDMI0_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI0_INFOFRAME_CONTROL0 0x7414
+# define HDMI0_AVI_INFO_SEND (1 << 0)
+# define HDMI0_AVI_INFO_CONT (1 << 1)
+# define HDMI0_AUDIO_INFO_SEND (1 << 4)
+# define HDMI0_AUDIO_INFO_CONT (1 << 5)
+# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
+# define HDMI0_MPEG_INFO_SEND (1 << 8)
+# define HDMI0_MPEG_INFO_CONT (1 << 9)
+# define HDMI0_MPEG_INFO_UPDATE (1 << 10)
+#define HDMI0_INFOFRAME_CONTROL1 0x7418
+# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
+#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
+# define HDMI0_GENERIC0_SEND (1 << 0)
+# define HDMI0_GENERIC0_CONT (1 << 1)
+# define HDMI0_GENERIC0_UPDATE (1 << 2)
+# define HDMI0_GENERIC1_SEND (1 << 4)
+# define HDMI0_GENERIC1_CONT (1 << 5)
+# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+#define HDMI0_GC 0x7428
+# define HDMI0_GC_AVMUTE (1 << 0)
+#define HDMI0_AVI_INFO0 0x7454
+# define HDMI0_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define HDMI0_AVI_INFO_S(x) (((x) & 3) << 8)
+# define HDMI0_AVI_INFO_B(x) (((x) & 3) << 10)
+# define HDMI0_AVI_INFO_A(x) (((x) & 1) << 12)
+# define HDMI0_AVI_INFO_Y(x) (((x) & 3) << 13)
+# define HDMI0_AVI_INFO_Y_RGB 0
+# define HDMI0_AVI_INFO_Y_YCBCR422 1
+# define HDMI0_AVI_INFO_Y_YCBCR444 2
+# define HDMI0_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
+# define HDMI0_AVI_INFO_R(x) (((x) & 0xf) << 16)
+# define HDMI0_AVI_INFO_M(x) (((x) & 0x3) << 20)
+# define HDMI0_AVI_INFO_C(x) (((x) & 0x3) << 22)
+# define HDMI0_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
+# define HDMI0_AVI_INFO_SC(x) (((x) & 0x3) << 24)
+# define HDMI0_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
+#define HDMI0_AVI_INFO1 0x7458
+# define HDMI0_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+# define HDMI0_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+# define HDMI0_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO2 0x745c
+# define HDMI0_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
+# define HDMI0_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO3 0x7460
+# define HDMI0_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
+# define HDMI0_AVI_INFO_VERSION(x) (((x) & 3) << 24)
+#define HDMI0_MPEG_INFO0 0x7464
+# define HDMI0_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define HDMI0_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
+# define HDMI0_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
+# define HDMI0_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
+#define HDMI0_MPEG_INFO1 0x7468
+# define HDMI0_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
+# define HDMI0_MPEG_INFO_MF(x) (((x) & 3) << 8)
+# define HDMI0_MPEG_INFO_FR(x) (((x) & 1) << 12)
+#define HDMI0_GENERIC0_HDR 0x746c
+#define HDMI0_GENERIC0_0 0x7470
+#define HDMI0_GENERIC0_1 0x7474
+#define HDMI0_GENERIC0_2 0x7478
+#define HDMI0_GENERIC0_3 0x747c
+#define HDMI0_GENERIC0_4 0x7480
+#define HDMI0_GENERIC0_5 0x7484
+#define HDMI0_GENERIC0_6 0x7488
+#define HDMI0_GENERIC1_HDR 0x748c
+#define HDMI0_GENERIC1_0 0x7490
+#define HDMI0_GENERIC1_1 0x7494
+#define HDMI0_GENERIC1_2 0x7498
+#define HDMI0_GENERIC1_3 0x749c
+#define HDMI0_GENERIC1_4 0x74a0
+#define HDMI0_GENERIC1_5 0x74a4
+#define HDMI0_GENERIC1_6 0x74a8
+#define HDMI0_ACR_32_0 0x74ac
+# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_32_1 0x74b0
+# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_44_0 0x74b4
+# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_44_1 0x74b8
+# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_48_0 0x74bc
+# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_48_1 0x74c0
+# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_STATUS_0 0x74c4
+#define HDMI0_ACR_STATUS_1 0x74c8
+#define HDMI0_AUDIO_INFO0 0x74cc
+# define HDMI0_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define HDMI0_AUDIO_INFO_CC(x) (((x) & 7) << 8)
+#define HDMI0_AUDIO_INFO1 0x74d0
+# define HDMI0_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
+# define HDMI0_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
+# define HDMI0_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
+# define HDMI0_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#define HDMI0_60958_0 0x74d4
+# define HDMI0_60958_CS_A(x) (((x) & 1) << 0)
+# define HDMI0_60958_CS_B(x) (((x) & 1) << 1)
+# define HDMI0_60958_CS_C(x) (((x) & 1) << 2)
+# define HDMI0_60958_CS_D(x) (((x) & 3) << 3)
+# define HDMI0_60958_CS_MODE(x) (((x) & 3) << 6)
+# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
+# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+#define HDMI0_60958_1 0x74d8
+# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
+# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
+# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16)
+# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+#define HDMI0_ACR_PACKET_CONTROL 0x74dc
+# define HDMI0_ACR_SEND (1 << 0)
+# define HDMI0_ACR_CONT (1 << 1)
+# define HDMI0_ACR_SELECT(x) (((x) & 3) << 4)
+# define HDMI0_ACR_HW 0
+# define HDMI0_ACR_32 1
+# define HDMI0_ACR_44 2
+# define HDMI0_ACR_48 3
+# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
+# define HDMI0_ACR_AUTO_SEND (1 << 12)
+#define HDMI0_RAMP_CONTROL0 0x74e0
+# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL1 0x74e4
+# define HDMI0_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL2 0x74e8
+# define HDMI0_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL3 0x74ec
+# define HDMI0_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
+/* HDMI0_60958_2 is r7xx only */
+#define HDMI0_60958_2 0x74f0
+# define HDMI0_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
+/* r6xx only; second instance starts at 0x7700 */
+#define HDMI1_CONTROL 0x7700
+#define HDMI1_STATUS 0x7704
+#define HDMI1_AUDIO_PACKET_CONTROL 0x7708
+/* DCE3; second instance starts at 0x7800 NOT 0x7700 */
+#define DCE3_HDMI1_CONTROL 0x7800
+#define DCE3_HDMI1_STATUS 0x7804
+#define DCE3_HDMI1_AUDIO_PACKET_CONTROL 0x7808
+/* DCE3.2 (for interrupts) */
+#define AFMT_STATUS 0x7600
+# define AFMT_AUDIO_ENABLE (1 << 4)
+# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL 0x7604
+# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
+# define AFMT_AUDIO_TEST_EN (1 << 12)
+# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
+# define AFMT_60958_CS_UPDATE (1 << 26)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+
/*
* PM4
*/
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 138b95216d8d..1dc3a4aba020 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -94,33 +94,38 @@ extern int radeon_disp_priority;
extern int radeon_hw_i2c;
extern int radeon_pcie_gen2;
extern int radeon_msi;
+extern int radeon_lockup_timeout;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
* symbol;
*/
-#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
-#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
+#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
-#define RADEON_IB_POOL_SIZE 16
-#define RADEON_DEBUGFS_MAX_COMPONENTS 32
-#define RADEONFB_CONN_LIMIT 4
-#define RADEON_BIOS_NUM_SCRATCH 8
+#define RADEON_IB_POOL_SIZE 16
+#define RADEON_DEBUGFS_MAX_COMPONENTS 32
+#define RADEONFB_CONN_LIMIT 4
+#define RADEON_BIOS_NUM_SCRATCH 8
/* max number of rings */
-#define RADEON_NUM_RINGS 3
+#define RADEON_NUM_RINGS 3
+
+/* fence seq are set to this number when signaled */
+#define RADEON_FENCE_SIGNALED_SEQ 0LL
+#define RADEON_FENCE_NOTEMITED_SEQ (~0LL)
/* internal ring indices */
/* r1xx+ has gfx CP ring */
-#define RADEON_RING_TYPE_GFX_INDEX 0
+#define RADEON_RING_TYPE_GFX_INDEX 0
/* cayman has 2 compute CP rings */
-#define CAYMAN_RING_TYPE_CP1_INDEX 1
-#define CAYMAN_RING_TYPE_CP2_INDEX 2
+#define CAYMAN_RING_TYPE_CP1_INDEX 1
+#define CAYMAN_RING_TYPE_CP2_INDEX 2
/* hardcode those limit for now */
-#define RADEON_VA_RESERVED_SIZE (8 << 20)
-#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+#define RADEON_VA_RESERVED_SIZE (8 << 20)
+#define RADEON_IB_VM_MAX_SIZE (64 << 10)
/*
* Errata workarounds.
@@ -253,28 +258,20 @@ struct radeon_fence_driver {
uint32_t scratch_reg;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
- atomic_t seq;
- uint32_t last_seq;
- unsigned long last_jiffies;
- unsigned long last_timeout;
- wait_queue_head_t queue;
- struct list_head created;
- struct list_head emitted;
- struct list_head signaled;
+ /* seq is protected by ring emission lock */
+ uint64_t seq;
+ atomic64_t last_seq;
+ unsigned long last_activity;
bool initialized;
};
struct radeon_fence {
struct radeon_device *rdev;
struct kref kref;
- struct list_head list;
/* protected by radeon_fence.lock */
- uint32_t seq;
- bool emitted;
- bool signaled;
+ uint64_t seq;
/* RB, DMA, etc. */
- int ring;
- struct radeon_semaphore *semaphore;
+ unsigned ring;
};
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
@@ -285,11 +282,14 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_any(struct radeon_device *rdev,
+ struct radeon_fence **fences,
+ bool intr);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
-int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
/*
* Tiling registers
@@ -382,8 +382,11 @@ struct radeon_bo_list {
* alignment).
*/
struct radeon_sa_manager {
+ spinlock_t lock;
struct radeon_bo *bo;
- struct list_head sa_bo;
+ struct list_head *hole;
+ struct list_head flist[RADEON_NUM_RINGS];
+ struct list_head olist;
unsigned size;
uint64_t gpu_addr;
void *cpu_ptr;
@@ -394,10 +397,12 @@ struct radeon_sa_bo;
/* sub-allocation buffer */
struct radeon_sa_bo {
- struct list_head list;
+ struct list_head olist;
+ struct list_head flist;
struct radeon_sa_manager *manager;
- unsigned offset;
- unsigned size;
+ unsigned soffset;
+ unsigned eoffset;
+ struct radeon_fence *fence;
};
/*
@@ -428,42 +433,26 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
/*
* Semaphores.
*/
-struct radeon_ring;
-
-#define RADEON_SEMAPHORE_BO_SIZE 256
-
-struct radeon_semaphore_driver {
- rwlock_t lock;
- struct list_head bo;
-};
-
-struct radeon_semaphore_bo;
-
/* everything here is constant */
struct radeon_semaphore {
- struct list_head list;
+ struct radeon_sa_bo *sa_bo;
+ signed waiters;
uint64_t gpu_addr;
- uint32_t *cpu_ptr;
- struct radeon_semaphore_bo *bo;
};
-struct radeon_semaphore_bo {
- struct list_head list;
- struct radeon_ib *ib;
- struct list_head free;
- struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
- unsigned nused;
-};
-
-void radeon_semaphore_driver_fini(struct radeon_device *rdev);
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ bool sync_to[RADEON_NUM_RINGS],
+ int dst_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore);
+ struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence);
/*
* GART structures, functions & helpers
@@ -560,6 +549,7 @@ struct radeon_unpin_work {
struct r500_irq_stat_regs {
u32 disp_int;
+ u32 hdmi0_status;
};
struct r600_irq_stat_regs {
@@ -568,6 +558,8 @@ struct r600_irq_stat_regs {
u32 disp_int_cont2;
u32 d1grph_int;
u32 d2grph_int;
+ u32 hdmi0_status;
+ u32 hdmi1_status;
};
struct evergreen_irq_stat_regs {
@@ -583,6 +575,12 @@ struct evergreen_irq_stat_regs {
u32 d4grph_int;
u32 d5grph_int;
u32 d6grph_int;
+ u32 afmt_status1;
+ u32 afmt_status2;
+ u32 afmt_status3;
+ u32 afmt_status4;
+ u32 afmt_status5;
+ u32 afmt_status6;
};
union radeon_irq_stat_regs {
@@ -593,7 +591,7 @@ union radeon_irq_stat_regs {
#define RADEON_MAX_HPD_PINS 6
#define RADEON_MAX_CRTCS 6
-#define RADEON_MAX_HDMI_BLOCKS 2
+#define RADEON_MAX_AFMT_BLOCKS 6
struct radeon_irq {
bool installed;
@@ -605,7 +603,7 @@ struct radeon_irq {
bool gui_idle;
bool gui_idle_acked;
wait_queue_head_t idle_queue;
- bool hdmi[RADEON_MAX_HDMI_BLOCKS];
+ bool afmt[RADEON_MAX_AFMT_BLOCKS];
spinlock_t sw_lock;
int sw_refcount[RADEON_NUM_RINGS];
union radeon_irq_stat_regs stat_regs;
@@ -625,26 +623,14 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
*/
struct radeon_ib {
- struct radeon_sa_bo sa_bo;
- unsigned idx;
- uint32_t length_dw;
- uint64_t gpu_addr;
- uint32_t *ptr;
- struct radeon_fence *fence;
- unsigned vm_id;
- bool is_const_ib;
-};
-
-/*
- * locking -
- * mutex protects scheduled_ibs, ready, alloc_bm
- */
-struct radeon_ib_pool {
- struct radeon_mutex mutex;
- struct radeon_sa_manager sa_manager;
- struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
- bool ready;
- unsigned head_id;
+ struct radeon_sa_bo *sa_bo;
+ uint32_t length_dw;
+ uint64_t gpu_addr;
+ uint32_t *ptr;
+ struct radeon_fence *fence;
+ unsigned vm_id;
+ bool is_const_ib;
+ struct radeon_semaphore *semaphore;
};
struct radeon_ring {
@@ -659,10 +645,11 @@ struct radeon_ring {
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
+ unsigned long last_activity;
+ unsigned last_rptr;
uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask;
- struct mutex mutex;
bool ready;
u32 ptr_reg_shift;
u32 ptr_reg_mask;
@@ -679,7 +666,7 @@ struct radeon_vm {
unsigned last_pfn;
u64 pt_gpu_addr;
u64 *pt;
- struct radeon_sa_bo sa_bo;
+ struct radeon_sa_bo *sa_bo;
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
@@ -756,7 +743,6 @@ struct r600_blit_cp_primitives {
};
struct r600_blit {
- struct mutex mutex;
struct radeon_bo *shader_obj;
struct r600_blit_cp_primitives primitives;
int max_dim;
@@ -766,8 +752,6 @@ struct r600_blit {
u32 vs_offset, ps_offset;
u32 state_offset;
u32 state_len;
- u32 vb_used, vb_total;
- struct radeon_ib *vb_ib;
};
void r600_blit_suspend(struct radeon_device *rdev);
@@ -785,14 +769,14 @@ struct si_rlc {
};
int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib **ib, unsigned size);
-void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
-bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
+ struct radeon_ib *ib, unsigned size);
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_pool_start(struct radeon_device *rdev);
int radeon_ib_pool_suspend(struct radeon_device *rdev);
+int radeon_ib_ring_tests(struct radeon_device *rdev);
/* Ring access between begin & end cannot sleep */
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -800,8 +784,12 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsign
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
+void radeon_ring_lockup_update(struct radeon_ring *ring);
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
@@ -850,8 +838,8 @@ struct radeon_cs_parser {
int chunk_relocs_idx;
int chunk_flags_idx;
int chunk_const_ib_idx;
- struct radeon_ib *ib;
- struct radeon_ib *const_ib;
+ struct radeon_ib ib;
+ struct radeon_ib const_ib;
void *track;
unsigned family;
int parser_error;
@@ -1105,6 +1093,14 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance);
+struct r600_audio {
+ int channels;
+ int rate;
+ int bits_per_sample;
+ u8 status_bits;
+ u8 category_code;
+};
+
/*
* Benchmarking
*/
@@ -1144,7 +1140,6 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
- bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*asic_reset)(struct radeon_device *rdev);
/* ioctl hw specific callback. Some hw might want to perform special
* operation on specific ioctl. For instance on wait idle some hw
@@ -1173,6 +1168,7 @@ struct radeon_asic {
void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
} ring[RADEON_NUM_RINGS];
/* irqs */
struct {
@@ -1251,16 +1247,10 @@ struct radeon_asic {
/*
* Asic structures
*/
-struct r100_gpu_lockup {
- unsigned long last_jiffies;
- u32 last_cp_rptr;
-};
-
struct r100_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
u32 hdp_cntl;
- struct r100_gpu_lockup lockup;
};
struct r300_asic {
@@ -1268,7 +1258,6 @@ struct r300_asic {
unsigned reg_safe_bm_size;
u32 resync_scratch;
u32 hdp_cntl;
- struct r100_gpu_lockup lockup;
};
struct r600_asic {
@@ -1290,7 +1279,6 @@ struct r600_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
- struct r100_gpu_lockup lockup;
};
struct rv770_asic {
@@ -1316,7 +1304,6 @@ struct rv770_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
- struct r100_gpu_lockup lockup;
};
struct evergreen_asic {
@@ -1343,7 +1330,6 @@ struct evergreen_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
- struct r100_gpu_lockup lockup;
};
struct cayman_asic {
@@ -1382,7 +1368,6 @@ struct cayman_asic {
unsigned multi_gpu_tile_size;
unsigned tile_config;
- struct r100_gpu_lockup lockup;
};
struct si_asic {
@@ -1413,7 +1398,6 @@ struct si_asic {
unsigned multi_gpu_tile_size;
unsigned tile_config;
- struct r100_gpu_lockup lockup;
};
union radeon_asic_config {
@@ -1516,11 +1500,12 @@ struct radeon_device {
struct radeon_mode_info mode_info;
struct radeon_scratch scratch;
struct radeon_mman mman;
- rwlock_t fence_lock;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
- struct radeon_semaphore_driver semaphore_drv;
+ wait_queue_head_t fence_queue;
+ struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
- struct radeon_ib_pool ib_pool;
+ bool ib_pool_ready;
+ struct radeon_sa_manager ring_tmp_bo;
struct radeon_irq irq;
struct radeon_asic *asic;
struct radeon_gem gem;
@@ -1529,7 +1514,6 @@ struct radeon_device {
struct radeon_mutex cs_mutex;
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
- bool gpu_lockup;
bool shutdown;
bool suspend;
bool need_dma32;
@@ -1546,19 +1530,12 @@ struct radeon_device {
struct r600_ih ih; /* r6/700 interrupt ring */
struct si_rlc rlc;
struct work_struct hotplug_work;
+ struct work_struct audio_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
struct mutex vram_mutex;
-
- /* audio stuff */
- bool audio_enabled;
- struct timer_list audio_timer;
- int audio_channels;
- int audio_rate;
- int audio_bits_per_sample;
- uint8_t audio_status_bits;
- uint8_t audio_category_code;
-
+ bool audio_enabled;
+ struct r600_audio audio_status; /* audio stuff */
struct notifier_block acpi_nb;
/* only one userspace can use Hyperz features or CMASK at a time */
struct drm_file *hyperz_filp;
@@ -1730,7 +1707,6 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
@@ -1739,6 +1715,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
+#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
@@ -1828,6 +1805,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo);
+/* audio */
+void r600_audio_update_hdmi(struct work_struct *work);
/*
* R600 vram scratch functions
@@ -1848,10 +1827,32 @@ int r600_fmt_get_nblocksy(u32 format, u32 h);
/*
* r600 functions used by radeon_encoder.c
*/
+struct radeon_hdmi_acr {
+ u32 clock;
+
+ int n_32khz;
+ int cts_32khz;
+
+ int n_44_1khz;
+ int cts_44_1khz;
+
+ int n_48khz;
+ int cts_48khz;
+
+};
+
+extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
+
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+/*
+ * evergreen functions used by radeon_encoder.c
+ */
+
+extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+
extern int ni_init_microcode(struct radeon_device *rdev);
extern int ni_mc_load_microcode(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index be4dc2ff0e40..f533df5f7d50 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -134,7 +134,6 @@ static struct radeon_asic r100_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -152,6 +151,7 @@ static struct radeon_asic r100_asic = {
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -208,7 +208,6 @@ static struct radeon_asic r200_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -226,6 +225,7 @@ static struct radeon_asic r200_asic = {
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -282,7 +282,6 @@ static struct radeon_asic r300_asic = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -300,6 +299,7 @@ static struct radeon_asic r300_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -356,7 +356,6 @@ static struct radeon_asic r300_asic_pcie = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -374,6 +373,7 @@ static struct radeon_asic r300_asic_pcie = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -430,7 +430,6 @@ static struct radeon_asic r420_asic = {
.suspend = &r420_suspend,
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -448,6 +447,7 @@ static struct radeon_asic r420_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -504,7 +504,6 @@ static struct radeon_asic rs400_asic = {
.suspend = &rs400_suspend,
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -522,6 +521,7 @@ static struct radeon_asic rs400_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -578,7 +578,6 @@ static struct radeon_asic rs600_asic = {
.suspend = &rs600_suspend,
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -596,6 +595,7 @@ static struct radeon_asic rs600_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -652,7 +652,6 @@ static struct radeon_asic rs690_asic = {
.suspend = &rs690_suspend,
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -670,6 +669,7 @@ static struct radeon_asic rs690_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -726,7 +726,6 @@ static struct radeon_asic rv515_asic = {
.suspend = &rv515_suspend,
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -744,6 +743,7 @@ static struct radeon_asic rv515_asic = {
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -800,7 +800,6 @@ static struct radeon_asic r520_asic = {
.suspend = &rv515_suspend,
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -818,6 +817,7 @@ static struct radeon_asic r520_asic = {
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -874,7 +874,6 @@ static struct radeon_asic r600_asic = {
.suspend = &r600_suspend,
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
- .gpu_is_lockup = &r600_gpu_is_lockup,
.asic_reset = &r600_asic_reset,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
@@ -891,6 +890,7 @@ static struct radeon_asic r600_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
@@ -946,7 +946,6 @@ static struct radeon_asic rs780_asic = {
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
- .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -964,6 +963,7 @@ static struct radeon_asic rs780_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
@@ -1020,7 +1020,6 @@ static struct radeon_asic rv770_asic = {
.suspend = &rv770_suspend,
.resume = &rv770_resume,
.asic_reset = &r600_asic_reset,
- .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
@@ -1037,6 +1036,7 @@ static struct radeon_asic rv770_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
@@ -1092,7 +1092,6 @@ static struct radeon_asic evergreen_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1110,6 +1109,7 @@ static struct radeon_asic evergreen_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
@@ -1165,7 +1165,6 @@ static struct radeon_asic sumo_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1183,6 +1182,7 @@ static struct radeon_asic sumo_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
},
.irq = {
@@ -1238,7 +1238,6 @@ static struct radeon_asic btc_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1256,6 +1255,7 @@ static struct radeon_asic btc_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
@@ -1321,7 +1321,6 @@ static struct radeon_asic cayman_asic = {
.fini = &cayman_fini,
.suspend = &cayman_suspend,
.resume = &cayman_resume,
- .gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1340,6 +1339,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1349,6 +1349,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1358,6 +1359,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
@@ -1413,7 +1415,6 @@ static struct radeon_asic trinity_asic = {
.fini = &cayman_fini,
.suspend = &cayman_suspend,
.resume = &cayman_resume,
- .gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1432,6 +1433,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1441,6 +1443,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1450,6 +1453,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
@@ -1515,7 +1519,6 @@ static struct radeon_asic si_asic = {
.fini = &si_fini,
.suspend = &si_suspend,
.resume = &si_resume,
- .gpu_is_lockup = &si_gpu_is_lockup,
.asic_reset = &si_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1534,6 +1537,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &si_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &si_ring_ib_execute,
@@ -1543,6 +1547,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &si_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &si_ring_ib_execute,
@@ -1552,6 +1557,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &si_gpu_is_lockup,
}
},
.irq = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3d9f9f1d8f90..e76a941ef14e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -103,11 +103,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev);
void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
-void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
- struct radeon_ring *cp);
-bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
- struct r100_gpu_lockup *lockup,
- struct radeon_ring *cp);
void r100_ib_fini(struct radeon_device *rdev);
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r100_irq_disable(struct radeon_device *rdev);
@@ -159,7 +154,6 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
-extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -362,26 +356,20 @@ void r600_disable_interrupts(struct radeon_device *rdev);
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
-int r600_audio_tmds_index(struct drm_encoder *encoder);
void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
-int r600_audio_channels(struct radeon_device *rdev);
-int r600_audio_bits_per_sample(struct radeon_device *rdev);
-int r600_audio_rate(struct radeon_device *rdev);
-uint8_t r600_audio_status_bits(struct radeon_device *rdev);
-uint8_t r600_audio_category_code(struct radeon_device *rdev);
-void r600_audio_schedule_polling(struct radeon_device *rdev);
-void r600_audio_enable_polling(struct drm_encoder *encoder);
-void r600_audio_disable_polling(struct drm_encoder *encoder);
+struct r600_audio r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
-void r600_hdmi_init(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
/* r600 blit */
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+ struct radeon_sa_bo **vb);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
+ struct radeon_sa_bo *vb);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages);
+ unsigned num_gpu_pages,
+ struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
/*
@@ -446,7 +434,6 @@ int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
int cayman_suspend(struct radeon_device *rdev);
int cayman_resume(struct radeon_device *rdev);
-bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int cayman_asic_reset(struct radeon_device *rdev);
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cayman_vm_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index fef7b722b05d..364f5b1a04b9 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -103,7 +103,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
int time;
n = RADEON_BENCHMARK_ITERATIONS;
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
if (r) {
goto out_cleanup;
}
@@ -115,7 +115,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 2cad9fde92fc..576f4f6919f2 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1561,6 +1561,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
(rdev->pdev->subsystem_device == 0x4150)) {
/* Mac G5 tower 9600 */
rdev->mode_info.connector_table = CT_MAC_G5_9600;
+ } else if ((rdev->pdev->device == 0x4c66) &&
+ (rdev->pdev->subsystem_vendor == 0x1002) &&
+ (rdev->pdev->subsystem_device == 0x4c66)) {
+ /* SAM440ep RV250 embedded board */
+ rdev->mode_info.connector_table = CT_SAM440EP;
} else
#endif /* CONFIG_PPC_PMAC */
#ifdef CONFIG_PPC64
@@ -2134,6 +2139,67 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
+ case CT_SAM440EP:
+ DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
+ rdev->mode_info.connector_table);
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
+ /* DVI-I - secondary dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 3c2e7a000a2a..2914c5761cfc 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -84,6 +84,62 @@ static void radeon_property_change_mode(struct drm_encoder *encoder)
crtc->x, crtc->y, crtc->fb);
}
}
+
+int radeon_get_monitor_bpc(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector;
+ int bpc = 8;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ if (radeon_connector->use_digital) {
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ }
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ dig_connector = radeon_connector->con_priv;
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
+ drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_LVDS:
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
+ bpc = 6;
+ else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
+ bpc = 8;
+ }
+ break;
+ }
+ return bpc;
+}
+
static void
radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
{
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5cac83278338..c7d64a739033 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -118,44 +118,33 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
bool sync_to_ring[RADEON_NUM_RINGS] = { };
+ bool need_sync = false;
int i, r;
for (i = 0; i < p->nrelocs; i++) {
+ struct radeon_fence *fence;
+
if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
continue;
- if (!(p->relocs[i].flags & RADEON_RELOC_DONT_SYNC)) {
- struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
- if (!radeon_fence_signaled(fence)) {
- sync_to_ring[fence->ring] = true;
- }
+ fence = p->relocs[i].robj->tbo.sync_obj;
+ if (fence->ring != p->ring && !radeon_fence_signaled(fence)) {
+ sync_to_ring[fence->ring] = true;
+ need_sync = true;
}
}
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- /* no need to sync to our own or unused rings */
- if (i == p->ring || !sync_to_ring[i] || !p->rdev->ring[i].ready)
- continue;
-
- if (!p->ib->fence->semaphore) {
- r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
- if (r)
- return r;
- }
-
- r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
- if (r)
- return r;
- radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
- radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
+ if (!need_sync) {
+ return 0;
+ }
- r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3);
- if (r)
- return r;
- radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
- radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
+ r = radeon_semaphore_create(p->rdev, &p->ib.semaphore);
+ if (r) {
+ return r;
}
- return 0;
+
+ return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore,
+ sync_to_ring, p->ring);
}
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -172,6 +161,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
/* get chunks */
INIT_LIST_HEAD(&p->validated);
p->idx = 0;
+ p->ib.sa_bo = NULL;
+ p->ib.semaphore = NULL;
+ p->const_ib.sa_bo = NULL;
+ p->const_ib.semaphore = NULL;
p->chunk_ib_idx = -1;
p->chunk_relocs_idx = -1;
p->chunk_flags_idx = -1;
@@ -278,11 +271,16 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
- p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
- p->chunks[p->chunk_ib_idx].kpage[1] == NULL)
- return -ENOMEM;
+ if ((p->rdev->flags & RADEON_IS_AGP)) {
+ p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
+ p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
+ kfree(p->chunks[i].kpage[0]);
+ kfree(p->chunks[i].kpage[1]);
+ return -ENOMEM;
+ }
+ }
p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
p->chunks[p->chunk_ib_idx].last_copied_page = -1;
@@ -305,10 +303,9 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
-
- if (!error && parser->ib)
+ if (!error)
ttm_eu_fence_buffer_objects(&parser->validated,
- parser->ib->fence);
+ parser->ib.fence);
else
ttm_eu_backoff_reservation(&parser->validated);
@@ -323,12 +320,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->relocs_ptr);
for (i = 0; i < parser->nchunks; i++) {
kfree(parser->chunks[i].kdata);
- kfree(parser->chunks[i].kpage[0]);
- kfree(parser->chunks[i].kpage[1]);
+ if ((parser->rdev->flags & RADEON_IS_AGP)) {
+ kfree(parser->chunks[i].kpage[0]);
+ kfree(parser->chunks[i].kpage[1]);
+ }
}
kfree(parser->chunks);
kfree(parser->chunks_array);
radeon_ib_free(parser->rdev, &parser->ib);
+ radeon_ib_free(parser->rdev, &parser->const_ib);
}
static int radeon_cs_ib_chunk(struct radeon_device *rdev,
@@ -354,7 +354,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to get ib !\n");
return r;
}
- parser->ib->length_dw = ib_chunk->length_dw;
+ parser->ib.length_dw = ib_chunk->length_dw;
r = radeon_cs_parse(rdev, parser->ring, parser);
if (r || parser->parser_error) {
DRM_ERROR("Invalid command stream !\n");
@@ -369,8 +369,8 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
if (r) {
DRM_ERROR("Failed to synchronize rings !\n");
}
- parser->ib->vm_id = 0;
- r = radeon_ib_schedule(rdev, parser->ib);
+ parser->ib.vm_id = 0;
+ r = radeon_ib_schedule(rdev, &parser->ib);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
}
@@ -421,14 +421,14 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to get const ib !\n");
return r;
}
- parser->const_ib->is_const_ib = true;
- parser->const_ib->length_dw = ib_chunk->length_dw;
+ parser->const_ib.is_const_ib = true;
+ parser->const_ib.length_dw = ib_chunk->length_dw;
/* Copy the packet into the IB */
- if (DRM_COPY_FROM_USER(parser->const_ib->ptr, ib_chunk->user_ptr,
+ if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
ib_chunk->length_dw * 4)) {
return -EFAULT;
}
- r = radeon_ring_ib_parse(rdev, parser->ring, parser->const_ib);
+ r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
if (r) {
return r;
}
@@ -445,13 +445,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to get ib !\n");
return r;
}
- parser->ib->length_dw = ib_chunk->length_dw;
+ parser->ib.length_dw = ib_chunk->length_dw;
/* Copy the packet into the IB */
- if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
+ if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
ib_chunk->length_dw * 4)) {
return -EFAULT;
}
- r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
+ r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
if (r) {
return r;
}
@@ -472,34 +472,44 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
- parser->const_ib->vm_id = vm->id;
+ parser->const_ib.vm_id = vm->id;
/* ib pool is bind at 0 in virtual address space to gpu_addr is the
* offset inside the pool bo
*/
- parser->const_ib->gpu_addr = parser->const_ib->sa_bo.offset;
- r = radeon_ib_schedule(rdev, parser->const_ib);
+ parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
+ r = radeon_ib_schedule(rdev, &parser->const_ib);
if (r)
goto out;
}
- parser->ib->vm_id = vm->id;
+ parser->ib.vm_id = vm->id;
/* ib pool is bind at 0 in virtual address space to gpu_addr is the
* offset inside the pool bo
*/
- parser->ib->gpu_addr = parser->ib->sa_bo.offset;
- parser->ib->is_const_ib = false;
- r = radeon_ib_schedule(rdev, parser->ib);
+ parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
+ parser->ib.is_const_ib = false;
+ r = radeon_ib_schedule(rdev, &parser->ib);
out:
if (!r) {
if (vm->fence) {
radeon_fence_unref(&vm->fence);
}
- vm->fence = radeon_fence_ref(parser->ib->fence);
+ vm->fence = radeon_fence_ref(parser->ib.fence);
}
mutex_unlock(&fpriv->vm.mutex);
return r;
}
+static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
+{
+ if (r == -EDEADLK) {
+ r = radeon_gpu_reset(rdev);
+ if (!r)
+ r = -EAGAIN;
+ }
+ return r;
+}
+
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
@@ -521,6 +531,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r);
+ r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
@@ -529,6 +540,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
+ r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
@@ -542,6 +554,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
out:
radeon_cs_parser_fini(&parser, r);
+ r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
@@ -559,7 +572,7 @@ int radeon_cs_finish_pages(struct radeon_cs_parser *p)
size = PAGE_SIZE;
}
- if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
+ if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
ibc->user_ptr + (i * PAGE_SIZE),
size))
return -EFAULT;
@@ -573,9 +586,10 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
int i;
int size = PAGE_SIZE;
+ bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
- if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
+ if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
ibc->user_ptr + (i * PAGE_SIZE),
PAGE_SIZE)) {
p->parser_error = -EFAULT;
@@ -583,14 +597,16 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
}
}
- new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
-
if (pg_idx == ibc->last_page_index) {
size = (ibc->length_dw * 4) % PAGE_SIZE;
- if (size == 0)
- size = PAGE_SIZE;
+ if (size == 0)
+ size = PAGE_SIZE;
}
+ new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
+ if (copy1)
+ ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
+
if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
ibc->user_ptr + (pg_idx * PAGE_SIZE),
size)) {
@@ -598,8 +614,9 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
return 0;
}
- /* copy to IB here */
- memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
+ /* copy to IB for non single case */
+ if (!copy1)
+ memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
ibc->last_copied_page = pg_idx;
ibc->kpage_idx[new_page] = pg_idx;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5992502a3448..066c98b888a5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -193,7 +193,7 @@ int radeon_wb_init(struct radeon_device *rdev)
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
+ RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
@@ -225,9 +225,9 @@ int radeon_wb_init(struct radeon_device *rdev)
/* disable event_write fences */
rdev->wb.use_event = false;
/* disabled via module param */
- if (radeon_no_wb == 1)
+ if (radeon_no_wb == 1) {
rdev->wb.enabled = false;
- else {
+ } else {
if (rdev->flags & RADEON_IS_AGP) {
/* often unreliable on AGP */
rdev->wb.enabled = false;
@@ -237,8 +237,9 @@ int radeon_wb_init(struct radeon_device *rdev)
} else {
rdev->wb.enabled = true;
/* event_write fences are only available on r600+ */
- if (rdev->family >= CHIP_R600)
+ if (rdev->family >= CHIP_R600) {
rdev->wb.use_event = true;
+ }
}
}
/* always use writeback/events on NI, APUs */
@@ -696,6 +697,11 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
+static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
+ .set_gpu_state = radeon_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = radeon_switcheroo_can_switch,
+};
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
@@ -714,7 +720,6 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->is_atom_bios = false;
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- rdev->gpu_lockup = false;
rdev->accel_working = false;
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
@@ -724,21 +729,18 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex);
- radeon_mutex_init(&rdev->ib_pool.mutex);
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- mutex_init(&rdev->ring[i].mutex);
+ mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex);
- rwlock_init(&rdev->fence_lock);
- rwlock_init(&rdev->semaphore_drv.lock);
- INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
- INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
+ r = radeon_gem_init(rdev);
+ if (r)
+ return r;
/* initialize vm here */
rdev->vm_manager.use_bitmap = 1;
rdev->vm_manager.max_pfn = 1 << 20;
@@ -814,10 +816,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- vga_switcheroo_register_client(rdev->pdev,
- radeon_switcheroo_set_state,
- NULL,
- radeon_switcheroo_can_switch);
+ vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
r = radeon_init(rdev);
if (r)
@@ -914,9 +913,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
}
/* evict vram memory */
radeon_bo_evict_vram(rdev);
+
+ mutex_lock(&rdev->ring_lock);
/* wait for gpu to finish processing current batch */
for (i = 0; i < RADEON_NUM_RINGS; i++)
- radeon_fence_wait_last(rdev, i);
+ radeon_fence_wait_empty_locked(rdev, i);
+ mutex_unlock(&rdev->ring_lock);
radeon_save_bios_scratch_regs(rdev);
@@ -955,7 +957,6 @@ int radeon_resume_kms(struct drm_device *dev)
console_unlock();
return -1;
}
- pci_set_master(dev->pdev);
/* resume AGP if in use */
radeon_agp_resume(rdev);
radeon_resume(rdev);
@@ -988,9 +989,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
int r;
int resched;
- /* Prevent CS ioctl from interfering */
- radeon_mutex_lock(&rdev->cs_mutex);
-
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -1005,8 +1003,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
}
- radeon_mutex_unlock(&rdev->cs_mutex);
-
if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0a1d4bd65edc..64a008d14493 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -573,24 +573,6 @@ static const char *encoder_names[37] = {
"INTERNAL_VCE"
};
-static const char *connector_names[15] = {
- "Unknown",
- "VGA",
- "DVI-I",
- "DVI-D",
- "DVI-A",
- "Composite",
- "S-video",
- "LVDS",
- "Component",
- "DIN",
- "DisplayPort",
- "HDMI-A",
- "HDMI-B",
- "TV",
- "eDP",
-};
-
static const char *hpd_names[6] = {
"HPD1",
"HPD2",
@@ -613,7 +595,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
- DRM_INFO(" %s\n", connector_names[connector->connector_type]);
+ DRM_INFO(" %s\n", drm_get_connector_name(connector));
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
if (radeon_connector->ddc_bus) {
@@ -1243,6 +1225,93 @@ void radeon_update_display_priority(struct radeon_device *rdev)
}
+/*
+ * Allocate hdmi structs and determine register offsets
+ */
+static void radeon_afmt_init(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
+ rdev->mode_info.afmt[i] = NULL;
+
+ if (ASIC_IS_DCE6(rdev)) {
+ /* todo */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* DCE4/5 has 6 audio blocks tied to DIG encoders */
+ /* DCE4.1 has 2 audio blocks tied to DIG encoders */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ if (!ASIC_IS_DCE41(rdev)) {
+ rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[2]) {
+ rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+ rdev->mode_info.afmt[2]->id = 2;
+ }
+ rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[3]) {
+ rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+ rdev->mode_info.afmt[3]->id = 3;
+ }
+ rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[4]) {
+ rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+ rdev->mode_info.afmt[4]->id = 4;
+ }
+ rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[5]) {
+ rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+ rdev->mode_info.afmt[5]->id = 5;
+ }
+ }
+ } else if (ASIC_IS_DCE3(rdev)) {
+ /* DCE3.x has 2 audio blocks tied to DIG encoders */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ } else if (ASIC_IS_DCE2(rdev)) {
+ /* DCE2 has at least 1 routable audio block */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ /* r6xx has 2 routable audio blocks */
+ if (rdev->family >= CHIP_R600) {
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ }
+ }
+}
+
+static void radeon_afmt_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) {
+ kfree(rdev->mode_info.afmt[i]);
+ rdev->mode_info.afmt[i] = NULL;
+ }
+}
+
int radeon_modeset_init(struct radeon_device *rdev)
{
int i;
@@ -1251,7 +1320,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
drm_mode_config_init(rdev->ddev);
rdev->mode_info.mode_config_initialized = true;
- rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
+ rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
if (ASIC_IS_DCE5(rdev)) {
rdev->ddev->mode_config.max_width = 16384;
@@ -1303,6 +1372,9 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* initialize hpd */
radeon_hpd_init(rdev);
+ /* setup afmt */
+ radeon_afmt_init(rdev);
+
/* Initialize power management */
radeon_pm_init(rdev);
@@ -1319,6 +1391,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
radeon_pm_fini(rdev);
if (rdev->mode_info.mode_config_initialized) {
+ radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ef7bb3f6ecae..f0bb2b543b13 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -105,6 +105,11 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
int radeon_mode_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle);
+struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags);
+struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor);
@@ -128,6 +133,7 @@ int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = 0;
int radeon_msi = -1;
+int radeon_lockup_timeout = 10000;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -177,6 +183,9 @@ module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, radeon_msi, int, 0444);
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -329,7 +338,8 @@ static const struct file_operations radeon_driver_kms_fops = {
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM |
+ DRIVER_PRIME,
.dev_priv_size = 0,
.load = radeon_driver_load_kms,
.firstopen = radeon_driver_firstopen_kms,
@@ -364,6 +374,12 @@ static struct drm_driver kms_driver = {
.dumb_map_offset = radeon_mode_dumb_mmap,
.dumb_destroy = radeon_mode_dumb_destroy,
.fops = &radeon_driver_kms_fops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = radeon_gem_prime_export,
+ .gem_prime_import = radeon_gem_prime_import,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 4bd36a354fbe..11f5f402d22c 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -63,98 +63,82 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
- unsigned long irq_flags;
-
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (fence->emitted) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ /* we are protected by the ring emission mutex */
+ if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
return 0;
}
- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
- if (!rdev->ring[fence->ring].ready)
- /* FIXME: cp is not running assume everythings is done right
- * away
- */
- radeon_fence_write(rdev, fence->seq, fence->ring);
- else
- radeon_fence_ring_emit(rdev, fence->ring, fence);
-
+ fence->seq = ++rdev->fence_drv[fence->ring].seq;
+ radeon_fence_ring_emit(rdev, fence->ring, fence);
trace_radeon_fence_emit(rdev->ddev, fence->seq);
- fence->emitted = true;
- list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
-static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
+void radeon_fence_process(struct radeon_device *rdev, int ring)
{
- struct radeon_fence *fence;
- struct list_head *i, *n;
- uint32_t seq;
+ uint64_t seq, last_seq;
+ unsigned count_loop = 0;
bool wake = false;
- unsigned long cjiffies;
- seq = radeon_fence_read(rdev, ring);
- if (seq != rdev->fence_drv[ring].last_seq) {
- rdev->fence_drv[ring].last_seq = seq;
- rdev->fence_drv[ring].last_jiffies = jiffies;
- rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- } else {
- cjiffies = jiffies;
- if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
- cjiffies -= rdev->fence_drv[ring].last_jiffies;
- if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
- /* update the timeout */
- rdev->fence_drv[ring].last_timeout -= cjiffies;
- } else {
- /* the 500ms timeout is elapsed we should test
- * for GPU lockup
- */
- rdev->fence_drv[ring].last_timeout = 1;
- }
- } else {
- /* wrap around update last jiffies, we will just wait
- * a little longer
- */
- rdev->fence_drv[ring].last_jiffies = cjiffies;
+ /* Note there is a scenario here for an infinite loop but it's
+ * very unlikely to happen. For it to happen, the current polling
+ * process need to be interrupted by another process and another
+ * process needs to update the last_seq btw the atomic read and
+ * xchg of the current process.
+ *
+ * More over for this to go in infinite loop there need to be
+ * continuously new fence signaled ie radeon_fence_read needs
+ * to return a different value each time for both the currently
+ * polling process and the other process that xchg the last_seq
+ * btw atomic read and xchg of the current process. And the
+ * value the other process set as last seq must be higher than
+ * the seq value we just read. Which means that current process
+ * need to be interrupted after radeon_fence_read and before
+ * atomic xchg.
+ *
+ * To be even more safe we count the number of time we loop and
+ * we bail after 10 loop just accepting the fact that we might
+ * have temporarly set the last_seq not to the true real last
+ * seq but to an older one.
+ */
+ last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+ do {
+ seq = radeon_fence_read(rdev, ring);
+ seq |= last_seq & 0xffffffff00000000LL;
+ if (seq < last_seq) {
+ seq += 0x100000000LL;
}
- return false;
- }
- n = NULL;
- list_for_each(i, &rdev->fence_drv[ring].emitted) {
- fence = list_entry(i, struct radeon_fence, list);
- if (fence->seq == seq) {
- n = i;
+
+ if (seq == last_seq) {
break;
}
- }
- /* all fence previous to this one are considered as signaled */
- if (n) {
- i = n;
- do {
- n = i->prev;
- list_move_tail(i, &rdev->fence_drv[ring].signaled);
- fence = list_entry(i, struct radeon_fence, list);
- fence->signaled = true;
- i = n;
- } while (i != &rdev->fence_drv[ring].emitted);
+ /* If we loop over we don't want to return without
+ * checking if a fence is signaled as it means that the
+ * seq we just read is different from the previous on.
+ */
wake = true;
+ last_seq = seq;
+ if ((count_loop++) > 10) {
+ /* We looped over too many time leave with the
+ * fact that we might have set an older fence
+ * seq then the current real last seq as signaled
+ * by the hw.
+ */
+ break;
+ }
+ } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
+
+ if (wake) {
+ rdev->fence_drv[ring].last_activity = jiffies;
+ wake_up_all(&rdev->fence_queue);
}
- return wake;
}
static void radeon_fence_destroy(struct kref *kref)
{
- unsigned long irq_flags;
- struct radeon_fence *fence;
+ struct radeon_fence *fence;
fence = container_of(kref, struct radeon_fence, kref);
- write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
- list_del(&fence->list);
- fence->emitted = false;
- write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
- if (fence->semaphore)
- radeon_semaphore_free(fence->rdev, fence->semaphore);
+ fence->seq = RADEON_FENCE_NOTEMITED_SEQ;
kfree(fence);
}
@@ -162,171 +146,342 @@ int radeon_fence_create(struct radeon_device *rdev,
struct radeon_fence **fence,
int ring)
{
- unsigned long irq_flags;
-
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
}
kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
- (*fence)->emitted = false;
- (*fence)->signaled = false;
- (*fence)->seq = 0;
+ (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ;
(*fence)->ring = ring;
- (*fence)->semaphore = NULL;
- INIT_LIST_HEAD(&(*fence)->list);
-
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
-bool radeon_fence_signaled(struct radeon_fence *fence)
+static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
+ u64 seq, unsigned ring)
{
- unsigned long irq_flags;
- bool signaled = false;
-
- if (!fence)
+ if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
return true;
-
- if (fence->rdev->gpu_lockup)
+ }
+ /* poll new last sequence at least once */
+ radeon_fence_process(rdev, ring);
+ if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
return true;
+ }
+ return false;
+}
- write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
- signaled = fence->signaled;
- /* if we are shuting down report all fence as signaled */
- if (fence->rdev->shutdown) {
- signaled = true;
+bool radeon_fence_signaled(struct radeon_fence *fence)
+{
+ if (!fence) {
+ return true;
}
- if (!fence->emitted) {
+ if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
WARN(1, "Querying an unemitted fence : %p !\n", fence);
- signaled = true;
+ return true;
}
- if (!signaled) {
- radeon_fence_poll_locked(fence->rdev, fence->ring);
- signaled = fence->signaled;
+ if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
+ return true;
+ }
+ if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return true;
}
- write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
- return signaled;
+ return false;
+}
+
+static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
+ unsigned ring, bool intr, bool lock_ring)
+{
+ unsigned long timeout, last_activity;
+ uint64_t seq;
+ unsigned i;
+ bool signaled;
+ int r;
+
+ while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+ if (!rdev->ring[ring].ready) {
+ return -EBUSY;
+ }
+
+ timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+ if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
+ /* the normal case, timeout is somewhere before last_activity */
+ timeout = rdev->fence_drv[ring].last_activity - timeout;
+ } else {
+ /* either jiffies wrapped around, or no fence was signaled in the last 500ms
+ * anyway we will just wait for the minimum amount and then check for a lockup
+ */
+ timeout = 1;
+ }
+ seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+ /* Save current last activity valuee, used to check for GPU lockups */
+ last_activity = rdev->fence_drv[ring].last_activity;
+
+ trace_radeon_fence_wait_begin(rdev->ddev, seq);
+ radeon_irq_kms_sw_irq_get(rdev, ring);
+ if (intr) {
+ r = wait_event_interruptible_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
+ timeout);
+ } else {
+ r = wait_event_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
+ timeout);
+ }
+ radeon_irq_kms_sw_irq_put(rdev, ring);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+ trace_radeon_fence_wait_end(rdev->ddev, seq);
+
+ if (unlikely(!signaled)) {
+ /* we were interrupted for some reason and fence
+ * isn't signaled yet, resume waiting */
+ if (r) {
+ continue;
+ }
+
+ /* check if sequence value has changed since last_activity */
+ if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+ continue;
+ }
+
+ if (lock_ring) {
+ mutex_lock(&rdev->ring_lock);
+ }
+
+ /* test if somebody else has already decided that this is a lockup */
+ if (last_activity != rdev->fence_drv[ring].last_activity) {
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ continue;
+ }
+
+ if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+ /* good news we believe it's a lockup */
+ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
+ target_seq, seq);
+
+ /* change last activity so nobody else think there is a lockup */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ rdev->fence_drv[i].last_activity = jiffies;
+ }
+
+ /* mark the ring as not ready any more */
+ rdev->ring[ring].ready = false;
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ return -EDEADLK;
+ }
+
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ }
+ }
+ return 0;
}
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
- struct radeon_device *rdev;
- unsigned long irq_flags, timeout;
- u32 seq;
int r;
if (fence == NULL) {
WARN(1, "Querying an invalid fence : %p !\n", fence);
- return 0;
+ return -EINVAL;
}
- rdev = fence->rdev;
- if (radeon_fence_signaled(fence)) {
- return 0;
+
+ r = radeon_fence_wait_seq(fence->rdev, fence->seq,
+ fence->ring, intr, true);
+ if (r) {
+ return r;
}
- timeout = rdev->fence_drv[fence->ring].last_timeout;
-retry:
- /* save current sequence used to check for GPU lockup */
- seq = rdev->fence_drv[fence->ring].last_seq;
- trace_radeon_fence_wait_begin(rdev->ddev, seq);
- if (intr) {
- radeon_irq_kms_sw_irq_get(rdev, fence->ring);
- r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
- radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev, fence->ring);
- if (unlikely(r < 0)) {
- return r;
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return 0;
+}
+
+bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
+{
+ unsigned i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
+ return true;
}
- } else {
- radeon_irq_kms_sw_irq_get(rdev, fence->ring);
- r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
- radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev, fence->ring);
}
- trace_radeon_fence_wait_end(rdev->ddev, seq);
- if (unlikely(!radeon_fence_signaled(fence))) {
- /* we were interrupted for some reason and fence isn't
- * isn't signaled yet, resume wait
- */
- if (r) {
- timeout = r;
- goto retry;
+ return false;
+}
+
+static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
+ u64 *target_seq, bool intr)
+{
+ unsigned long timeout, last_activity, tmp;
+ unsigned i, ring = RADEON_NUM_RINGS;
+ bool signaled;
+ int r;
+
+ for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i]) {
+ continue;
+ }
+
+ /* use the most recent one as indicator */
+ if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
+ last_activity = rdev->fence_drv[i].last_activity;
}
- /* don't protect read access to rdev->fence_drv[t].last_seq
- * if we experiencing a lockup the value doesn't change
+
+ /* For lockup detection just pick the lowest ring we are
+ * actively waiting for
*/
- if (seq == rdev->fence_drv[fence->ring].last_seq &&
- radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
- /* good news we believe it's a lockup */
- printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
- fence->seq, seq);
- /* FIXME: what should we do ? marking everyone
- * as signaled for now
+ if (i < ring) {
+ ring = i;
+ }
+ }
+
+ /* nothing to wait for ? */
+ if (ring == RADEON_NUM_RINGS) {
+ return 0;
+ }
+
+ while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
+ timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+ if (time_after(last_activity, timeout)) {
+ /* the normal case, timeout is somewhere before last_activity */
+ timeout = last_activity - timeout;
+ } else {
+ /* either jiffies wrapped around, or no fence was signaled in the last 500ms
+ * anyway we will just wait for the minimum amount and then check for a lockup
*/
- rdev->gpu_lockup = true;
- r = radeon_gpu_reset(rdev);
- if (r)
- return r;
- radeon_fence_write(rdev, fence->seq, fence->ring);
- rdev->gpu_lockup = false;
+ timeout = 1;
+ }
+
+ trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (target_seq[i]) {
+ radeon_irq_kms_sw_irq_get(rdev, i);
+ }
+ }
+ if (intr) {
+ r = wait_event_interruptible_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
+ timeout);
+ } else {
+ r = wait_event_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
+ timeout);
+ }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (target_seq[i]) {
+ radeon_irq_kms_sw_irq_put(rdev, i);
+ }
+ }
+ if (unlikely(r < 0)) {
+ return r;
+ }
+ trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
+
+ if (unlikely(!signaled)) {
+ /* we were interrupted for some reason and fence
+ * isn't signaled yet, resume waiting */
+ if (r) {
+ continue;
+ }
+
+ mutex_lock(&rdev->ring_lock);
+ for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
+ tmp = rdev->fence_drv[i].last_activity;
+ }
+ }
+ /* test if somebody else has already decided that this is a lockup */
+ if (last_activity != tmp) {
+ last_activity = tmp;
+ mutex_unlock(&rdev->ring_lock);
+ continue;
+ }
+
+ if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+ /* good news we believe it's a lockup */
+ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
+ target_seq[ring]);
+
+ /* change last activity so nobody else think there is a lockup */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ rdev->fence_drv[i].last_activity = jiffies;
+ }
+
+ /* mark the ring as not ready any more */
+ rdev->ring[ring].ready = false;
+ mutex_unlock(&rdev->ring_lock);
+ return -EDEADLK;
+ }
+ mutex_unlock(&rdev->ring_lock);
}
- timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- rdev->fence_drv[fence->ring].last_jiffies = jiffies;
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- goto retry;
}
return 0;
}
-int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_any(struct radeon_device *rdev,
+ struct radeon_fence **fences,
+ bool intr)
{
- unsigned long irq_flags;
- struct radeon_fence *fence;
+ uint64_t seq[RADEON_NUM_RINGS];
+ unsigned i;
int r;
- if (rdev->gpu_lockup) {
- return 0;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ seq[i] = 0;
+
+ if (!fences[i]) {
+ continue;
+ }
+
+ if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
+ /* something was allready signaled */
+ return 0;
+ }
+
+ if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+ seq[i] = fences[i]->seq;
+ }
}
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (list_empty(&rdev->fence_drv[ring].emitted)) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return 0;
+
+ r = radeon_fence_wait_any_seq(rdev, seq, intr);
+ if (r) {
+ return r;
}
- fence = list_entry(rdev->fence_drv[ring].emitted.next,
- struct radeon_fence, list);
- radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- r = radeon_fence_wait(fence, false);
- radeon_fence_unref(&fence);
- return r;
+ return 0;
}
-int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
- unsigned long irq_flags;
- struct radeon_fence *fence;
- int r;
-
- if (rdev->gpu_lockup) {
- return 0;
+ uint64_t seq;
+
+ /* We are not protected by ring lock when reading current seq but
+ * it's ok as worst case is we return to early while we could have
+ * wait.
+ */
+ seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
+ if (seq >= rdev->fence_drv[ring].seq) {
+ /* nothing to wait for, last_seq is
+ already the last emited fence */
+ return -ENOENT;
}
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (list_empty(&rdev->fence_drv[ring].emitted)) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return 0;
- }
- fence = list_entry(rdev->fence_drv[ring].emitted.prev,
- struct radeon_fence, list);
- radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- r = radeon_fence_wait(fence, false);
- radeon_fence_unref(&fence);
- return r;
+ return radeon_fence_wait_seq(rdev, seq, ring, false, false);
+}
+
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+{
+ /* We are not protected by ring lock when reading current seq
+ * but it's ok as wait empty is call from place where no more
+ * activity can be scheduled so there won't be concurrent access
+ * to seq value.
+ */
+ return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq,
+ ring, false, false);
}
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
@@ -345,49 +500,27 @@ void radeon_fence_unref(struct radeon_fence **fence)
}
}
-void radeon_fence_process(struct radeon_device *rdev, int ring)
-{
- unsigned long irq_flags;
- bool wake;
-
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- wake = radeon_fence_poll_locked(rdev, ring);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- if (wake) {
- wake_up_all(&rdev->fence_drv[ring].queue);
- }
-}
-
-int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
{
- unsigned long irq_flags;
- int not_processed = 0;
-
- read_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (!rdev->fence_drv[ring].initialized) {
- read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return 0;
+ uint64_t emitted;
+
+ /* We are not protected by ring lock when reading the last sequence
+ * but it's ok to report slightly wrong fence count here.
+ */
+ radeon_fence_process(rdev, ring);
+ emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq);
+ /* to avoid 32bits warp around */
+ if (emitted > 0x10000000) {
+ emitted = 0x10000000;
}
-
- if (!list_empty(&rdev->fence_drv[ring].emitted)) {
- struct list_head *ptr;
- list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
- /* count up to 3, that's enought info */
- if (++not_processed >= 3)
- break;
- }
- }
- read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return not_processed;
+ return (unsigned)emitted;
}
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{
- unsigned long irq_flags;
uint64_t index;
int r;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
if (rdev->wb.use_event) {
rdev->fence_drv[ring].scratch_reg = 0;
@@ -396,7 +529,6 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
if (r) {
dev_err(rdev->dev, "fence failed to get scratch register\n");
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return r;
}
index = RADEON_WB_SCRATCH_OFFSET +
@@ -405,11 +537,10 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
}
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
+ radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring);
rdev->fence_drv[ring].initialized = true;
- DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
+ dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
@@ -418,24 +549,20 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
rdev->fence_drv[ring].scratch_reg = -1;
rdev->fence_drv[ring].cpu_addr = NULL;
rdev->fence_drv[ring].gpu_addr = 0;
- atomic_set(&rdev->fence_drv[ring].seq, 0);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
- init_waitqueue_head(&rdev->fence_drv[ring].queue);
+ rdev->fence_drv[ring].seq = 0;
+ atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
+ rdev->fence_drv[ring].last_activity = jiffies;
rdev->fence_drv[ring].initialized = false;
}
int radeon_fence_driver_init(struct radeon_device *rdev)
{
- unsigned long irq_flags;
int ring;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ init_waitqueue_head(&rdev->fence_queue);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
radeon_fence_driver_init_ring(rdev, ring);
}
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
if (radeon_debugfs_fence_init(rdev)) {
dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
@@ -444,19 +571,18 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
- unsigned long irq_flags;
int ring;
+ mutex_lock(&rdev->ring_lock);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
- radeon_fence_wait_last(rdev, ring);
- wake_up_all(&rdev->fence_drv[ring].queue);
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ radeon_fence_wait_empty_locked(rdev, ring);
+ wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
rdev->fence_drv[ring].initialized = false;
}
+ mutex_unlock(&rdev->ring_lock);
}
@@ -469,7 +595,6 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_fence *fence;
int i;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -477,14 +602,10 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
continue;
seq_printf(m, "--- ring %d ---\n", i);
- seq_printf(m, "Last signaled fence 0x%08X\n",
- radeon_fence_read(rdev, i));
- if (!list_empty(&rdev->fence_drv[i].emitted)) {
- fence = list_entry(rdev->fence_drv[i].emitted.prev,
- struct radeon_fence, list);
- seq_printf(m, "Last emitted fence %p with 0x%08X\n",
- fence, fence->seq);
- }
+ seq_printf(m, "Last signaled fence 0x%016llx\n",
+ (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
+ seq_printf(m, "Last emitted 0x%016llx\n",
+ rdev->fence_drv[i].seq);
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 456a77cf4b7f..79db56e6c2ac 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -80,7 +80,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->gart.robj);
+ NULL, &rdev->gart.robj);
if (r) {
return r;
}
@@ -326,7 +326,7 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
list_del_init(&vm->list);
vm->id = -1;
- radeon_sa_bo_free(rdev, &vm->sa_bo);
+ radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
vm->pt = NULL;
list_for_each_entry(bo_va, &vm->va, vm_list) {
@@ -395,7 +395,7 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
- RADEON_GPU_PAGE_SIZE);
+ RADEON_GPU_PAGE_SIZE, false);
if (r) {
if (list_empty(&rdev->vm_manager.lru_vm)) {
return r;
@@ -404,10 +404,8 @@ retry:
radeon_vm_unbind(rdev, vm_evict);
goto retry;
}
- vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
- vm->pt += (vm->sa_bo.offset >> 3);
- vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
- vm->pt_gpu_addr += vm->sa_bo.offset;
+ vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
+ vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
retry_id:
@@ -428,14 +426,14 @@ retry_id:
/* do hw bind */
r = rdev->vm_manager.funcs->bind(rdev, vm, id);
if (r) {
- radeon_sa_bo_free(rdev, &vm->sa_bo);
+ radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
return r;
}
rdev->vm_manager.use_bitmap |= 1 << id;
vm->id = id;
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
- return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
- &rdev->ib_pool.sa_manager.bo->tbo.mem);
+ return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
+ &rdev->ring_tmp_bo.bo->tbo.mem);
}
/* object have to be reserved */
@@ -633,7 +631,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
/* map the ib pool buffer at 0 in virtual address space, set
* read only
*/
- r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
+ r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
return r;
}
@@ -650,12 +648,12 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_mutex_unlock(&rdev->cs_mutex);
/* remove all bo */
- r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
- bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
+ bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
list_del_init(&bo_va->bo_list);
list_del_init(&bo_va->vm_list);
- radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
kfree(bo_va);
}
if (!list_empty(&vm->va)) {
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0519b05968b5..f28bd4b7ef98 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -42,6 +42,8 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
if (robj) {
+ if (robj->gem_base.import_attach)
+ drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
radeon_bo_unref(&robj);
}
}
@@ -59,7 +61,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
- r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
+ r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
@@ -154,6 +156,17 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
radeon_bo_unreserve(rbo);
}
+static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
+{
+ if (r == -EDEADLK) {
+ radeon_mutex_lock(&rdev->cs_mutex);
+ r = radeon_gpu_reset(rdev);
+ if (!r)
+ r = -EAGAIN;
+ radeon_mutex_unlock(&rdev->cs_mutex);
+ }
+ return r;
+}
/*
* GEM ioctls.
@@ -210,12 +223,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
args->initial_domain, false,
false, &gobj);
if (r) {
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
args->handle = handle;
@@ -245,6 +260,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_unreference_unlocked(gobj);
+ r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}
@@ -301,6 +317,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
break;
}
drm_gem_object_unreference_unlocked(gobj);
+ r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}
@@ -322,6 +339,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
if (robj->rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
drm_gem_object_unreference_unlocked(gobj);
+ r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 65060b77c805..5df58d1aba06 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -73,6 +73,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
rdev->irq.pflip[i] = false;
+ rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
/* Clear bits */
@@ -108,6 +109,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
rdev->irq.pflip[i] = false;
+ rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
}
@@ -170,6 +172,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
int r = 0;
INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
spin_lock_init(&rdev->irq.sw_lock);
for (i = 0; i < rdev->num_crtc; i++)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 3c2628b14d56..f1016a5820d1 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -57,8 +57,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
dev->dev_private = (void *)rdev;
- pci_set_master(dev->pdev);
-
/* update BUS flag */
if (drm_pci_device_is_agp(dev)) {
flags |= RADEON_IS_AGP;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 42db254f6bb0..a0c82229e8f0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -369,6 +369,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
goto error;
}
+ memset(&props, 0, sizeof(props));
props.max_brightness = MAX_RADEON_LEVEL;
props.type = BACKLIGHT_RAW;
bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index f7eb5d8b9fd3..5b10ffd7bb2f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -210,6 +210,7 @@ enum radeon_connector_table {
CT_RN50_POWER,
CT_MAC_X800,
CT_MAC_G5_9600,
+ CT_SAM440EP
};
enum radeon_dvo_chip {
@@ -219,12 +220,20 @@ enum radeon_dvo_chip {
struct radeon_fbdev;
+struct radeon_afmt {
+ bool enabled;
+ int offset;
+ bool last_buffer_filled_status;
+ int id;
+};
+
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
struct radeon_crtc *crtcs[6];
+ struct radeon_afmt *afmt[6];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
@@ -363,6 +372,7 @@ struct radeon_encoder_atom_dig {
int dpms_mode;
uint8_t backlight_level;
int panel_mode;
+ struct radeon_afmt *afmt;
};
struct radeon_encoder_atom_dac {
@@ -384,10 +394,6 @@ struct radeon_encoder {
struct drm_display_mode native_mode;
void *enc_priv;
int audio_polling_active;
- int hdmi_offset;
- int hdmi_config_offset;
- int hdmi_audio_workaround;
- int hdmi_buffer_status;
bool is_ext_encoder;
u16 caps;
};
@@ -476,6 +482,7 @@ extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
+extern int radeon_get_monitor_bpc(struct drm_connector *connector);
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index df6a4dbd93f8..830f1a7b486f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -104,7 +104,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
- struct radeon_bo **bo_ptr)
+ struct sg_table *sg, struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
@@ -120,6 +120,8 @@ int radeon_bo_create(struct radeon_device *rdev,
}
if (kernel) {
type = ttm_bo_type_kernel;
+ } else if (sg) {
+ type = ttm_bo_type_sg;
} else {
type = ttm_bo_type_device;
}
@@ -155,7 +157,7 @@ retry:
mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, 0, !kernel, NULL,
- acc_size, &radeon_ttm_bo_destroy);
+ acc_size, sg, &radeon_ttm_bo_destroy);
mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index f9104be88d7c..17fb99f177cf 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -111,9 +111,10 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
bool no_wait);
extern int radeon_bo_create(struct radeon_device *rdev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain,
- struct radeon_bo **bo_ptr);
+ unsigned long size, int byte_align,
+ bool kernel, u32 domain,
+ struct sg_table *sg,
+ struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
@@ -146,6 +147,17 @@ extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
/*
* sub allocation
*/
+
+static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
+{
+ return sa_bo->manager->gpu_addr + sa_bo->soffset;
+}
+
+static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
+{
+ return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+}
+
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 domain);
@@ -157,9 +169,15 @@ extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo *sa_bo,
- unsigned size, unsigned align);
+ struct radeon_sa_bo **sa_bo,
+ unsigned size, unsigned align, bool block);
extern void radeon_sa_bo_free(struct radeon_device *rdev,
- struct radeon_sa_bo *sa_bo);
+ struct radeon_sa_bo **sa_bo,
+ struct radeon_fence *fence);
+#if defined(CONFIG_DEBUG_FS)
+extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+ struct seq_file *m);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index caa55d68f319..08825548ee69 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -252,10 +252,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (rdev->ring[i].ring_obj)
- mutex_lock(&rdev->ring[i].mutex);
- }
+ mutex_lock(&rdev->ring_lock);
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
@@ -273,13 +270,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
} else {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
if (ring->ready) {
- struct radeon_fence *fence;
- radeon_ring_alloc(rdev, ring, 64);
- radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
- radeon_fence_emit(rdev, fence);
- radeon_ring_commit(rdev, ring);
- radeon_fence_wait(fence, false);
- radeon_fence_unref(&fence);
+ radeon_fence_wait_empty_locked(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
}
radeon_unmap_vram_bos(rdev);
@@ -311,10 +302,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (rdev->ring[i].ring_obj)
- mutex_unlock(&rdev->ring[i].mutex);
- }
+ mutex_unlock(&rdev->ring_lock);
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
new file mode 100644
index 000000000000..b8f835d8ecb4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * based on nouveau_prime.c
+ *
+ * Authors: Alex Deucher
+ */
+#include "drmP.h"
+#include "drm.h"
+
+#include "radeon.h"
+#include "radeon_drm.h"
+
+#include <linux/dma-buf.h>
+
+static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct radeon_bo *bo = attachment->dmabuf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
+ int npages = bo->tbo.num_pages;
+ struct sg_table *sg;
+ int nents;
+
+ mutex_lock(&dev->struct_mutex);
+ sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+ nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ mutex_unlock(&dev->struct_mutex);
+ return sg;
+}
+
+static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct radeon_bo *bo = dma_buf->priv;
+
+ if (bo->gem_base.export_dma_buf == dma_buf) {
+ DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
+ bo->gem_base.export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(&bo->gem_base);
+ }
+}
+
+static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+const static struct dma_buf_ops radeon_dmabuf_ops = {
+ .map_dma_buf = radeon_gem_map_dma_buf,
+ .unmap_dma_buf = radeon_gem_unmap_dma_buf,
+ .release = radeon_gem_dmabuf_release,
+ .kmap = radeon_gem_kmap,
+ .kmap_atomic = radeon_gem_kmap_atomic,
+ .kunmap = radeon_gem_kunmap,
+ .kunmap_atomic = radeon_gem_kunmap_atomic,
+};
+
+static int radeon_prime_create(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg,
+ struct radeon_bo **pbo)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_bo *bo;
+ int ret;
+
+ ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
+ RADEON_GEM_DOMAIN_GTT, sg, pbo);
+ if (ret)
+ return ret;
+ bo = *pbo;
+ bo->gem_base.driver_private = bo;
+
+ mutex_lock(&rdev->gem.mutex);
+ list_add_tail(&bo->list, &rdev->gem.objects);
+ mutex_unlock(&rdev->gem.mutex);
+
+ return 0;
+}
+
+struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags)
+{
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
+ int ret = 0;
+
+ /* pin buffer into GTT */
+ ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
+}
+
+struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct radeon_bo *bo;
+ int ret;
+
+ if (dma_buf->ops == &radeon_dmabuf_ops) {
+ bo = dma_buf->priv;
+ if (bo->gem_base.dev == dev) {
+ drm_gem_object_reference(&bo->gem_base);
+ return &bo->gem_base;
+ }
+ }
+
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
+ if (ret)
+ goto fail_unmap;
+
+ bo->gem_base.import_attach = attach;
+
+ return &bo->gem_base;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index cc33b3d7c33b..493a7be75306 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -24,6 +24,7 @@
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
+ * Christian König
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -33,8 +34,10 @@
#include "radeon.h"
#include "atom.h"
-int radeon_debugfs_ib_init(struct radeon_device *rdev);
-int radeon_debugfs_ring_init(struct radeon_device *rdev);
+/*
+ * IB.
+ */
+int radeon_debugfs_sa_init(struct radeon_device *rdev);
u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
@@ -61,123 +64,37 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
return idx_value;
}
-void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
-{
-#if DRM_DEBUG_CODE
- if (ring->count_dw <= 0) {
- DRM_ERROR("radeon: writting more dword to ring than expected !\n");
- }
-#endif
- ring->ring[ring->wptr++] = v;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw--;
- ring->ring_free_dw--;
-}
-
-/*
- * IB.
- */
-bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- bool done = false;
-
- /* only free ib which have been emited */
- if (ib->fence && ib->fence->emitted) {
- if (radeon_fence_signaled(ib->fence)) {
- radeon_fence_unref(&ib->fence);
- radeon_sa_bo_free(rdev, &ib->sa_bo);
- done = true;
- }
- }
- return done;
-}
-
int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib **ib, unsigned size)
+ struct radeon_ib *ib, unsigned size)
{
- struct radeon_fence *fence;
- unsigned cretry = 0;
- int r = 0, i, idx;
-
- *ib = NULL;
- /* align size on 256 bytes */
- size = ALIGN(size, 256);
+ int r;
- r = radeon_fence_create(rdev, &fence, ring);
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) {
- dev_err(rdev->dev, "failed to create fence for new IB\n");
+ dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
return r;
}
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- idx = rdev->ib_pool.head_id;
-retry:
- if (cretry > 5) {
- dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return -ENOMEM;
- }
- cretry++;
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
- if (rdev->ib_pool.ibs[idx].fence == NULL) {
- r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
- &rdev->ib_pool.ibs[idx].sa_bo,
- size, 256);
- if (!r) {
- *ib = &rdev->ib_pool.ibs[idx];
- (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
- (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
- (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
- (*ib)->gpu_addr += (*ib)->sa_bo.offset;
- (*ib)->fence = fence;
- (*ib)->vm_id = 0;
- (*ib)->is_const_ib = false;
- /* ib are most likely to be allocated in a ring fashion
- * thus rdev->ib_pool.head_id should be the id of the
- * oldest ib
- */
- rdev->ib_pool.head_id = (1 + idx);
- rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- return 0;
- }
- }
- idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
- }
- /* this should be rare event, ie all ib scheduled none signaled yet.
- */
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
- r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
- if (!r) {
- goto retry;
- }
- /* an error happened */
- break;
- }
- idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ r = radeon_fence_create(rdev, &ib->fence, ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
+ return r;
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return r;
+
+ ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
+ ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+ ib->vm_id = 0;
+ ib->is_const_ib = false;
+ ib->semaphore = NULL;
+
+ return 0;
}
-void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ib *tmp = *ib;
-
- *ib = NULL;
- if (tmp == NULL) {
- return;
- }
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (tmp->fence && !tmp->fence->emitted) {
- radeon_sa_bo_free(rdev, &tmp->sa_bo);
- radeon_fence_unref(&tmp->fence);
- }
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
+ radeon_semaphore_free(rdev, ib->semaphore, ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+ radeon_fence_unref(&ib->fence);
}
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
@@ -187,14 +104,14 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
- DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
+ dev_err(rdev->dev, "couldn't schedule ib\n");
return -EINVAL;
}
/* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, ring, 64);
if (r) {
- DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
+ dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
@@ -205,74 +122,90 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
int radeon_ib_pool_init(struct radeon_device *rdev)
{
- struct radeon_sa_manager tmp;
- int i, r;
+ int r;
- r = radeon_sa_bo_manager_init(rdev, &tmp,
+ if (rdev->ib_pool_ready) {
+ return 0;
+ }
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024,
RADEON_GEM_DOMAIN_GTT);
if (r) {
return r;
}
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.ready) {
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_sa_bo_manager_fini(rdev, &tmp);
- return 0;
- }
-
- rdev->ib_pool.sa_manager = tmp;
- INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- rdev->ib_pool.ibs[i].fence = NULL;
- rdev->ib_pool.ibs[i].idx = i;
- rdev->ib_pool.ibs[i].length_dw = 0;
- INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
- }
- rdev->ib_pool.head_id = 0;
- rdev->ib_pool.ready = true;
- DRM_INFO("radeon: ib pool ready.\n");
-
- if (radeon_debugfs_ib_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for IB !\n");
- }
- if (radeon_debugfs_ring_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for rings !\n");
+ rdev->ib_pool_ready = true;
+ if (radeon_debugfs_sa_init(rdev)) {
+ dev_err(rdev->dev, "failed to register debugfs file for SA\n");
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
return 0;
}
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
- unsigned i;
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.ready) {
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
- radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
- }
- radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
- rdev->ib_pool.ready = false;
+ if (rdev->ib_pool_ready) {
+ radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+ rdev->ib_pool_ready = false;
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
}
int radeon_ib_pool_start(struct radeon_device *rdev)
{
- return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
+ return radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
}
int radeon_ib_pool_suspend(struct radeon_device *rdev)
{
- return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
+ return radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
+}
+
+int radeon_ib_ring_tests(struct radeon_device *rdev)
+{
+ unsigned i;
+ int r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ring = &rdev->ring[i];
+
+ if (!ring->ready)
+ continue;
+
+ r = radeon_ib_test(rdev, i, ring);
+ if (r) {
+ ring->ready = false;
+
+ if (i == RADEON_RING_TYPE_GFX_INDEX) {
+ /* oh, oh, that's really bad */
+ DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+
+ } else {
+ /* still not good, but we can live with it */
+ DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
+ }
+ }
+ }
+ return 0;
}
/*
* Ring.
*/
+int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+{
+#if DRM_DEBUG_CODE
+ if (ring->count_dw <= 0) {
+ DRM_ERROR("radeon: writting more dword to ring than expected !\n");
+ }
+#endif
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
+}
+
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
{
/* r1xx-r5xx only has CP ring */
@@ -319,7 +252,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
if (ndw < ring->ring_free_dw) {
break;
}
- r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
+ r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring));
if (r)
return r;
}
@@ -332,10 +265,10 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
{
int r;
- mutex_lock(&ring->mutex);
+ mutex_lock(&rdev->ring_lock);
r = radeon_ring_alloc(rdev, ring, ndw);
if (r) {
- mutex_unlock(&ring->mutex);
+ mutex_unlock(&rdev->ring_lock);
return r;
}
return 0;
@@ -360,13 +293,85 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_commit(rdev, ring);
- mutex_unlock(&ring->mutex);
+ mutex_unlock(&rdev->ring_lock);
}
-void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_undo(struct radeon_ring *ring)
{
ring->wptr = ring->wptr_old;
- mutex_unlock(&ring->mutex);
+}
+
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ radeon_ring_undo(ring);
+ mutex_unlock(&rdev->ring_lock);
+}
+
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ int r;
+
+ radeon_ring_free_size(rdev, ring);
+ if (ring->rptr == ring->wptr) {
+ r = radeon_ring_alloc(rdev, ring, 1);
+ if (!r) {
+ radeon_ring_write(ring, ring->nop);
+ radeon_ring_commit(rdev, ring);
+ }
+ }
+}
+
+void radeon_ring_lockup_update(struct radeon_ring *ring)
+{
+ ring->last_rptr = ring->rptr;
+ ring->last_activity = jiffies;
+}
+
+/**
+ * radeon_ring_test_lockup() - check if ring is lockedup by recording information
+ * @rdev: radeon device structure
+ * @ring: radeon_ring structure holding ring information
+ *
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ unsigned long cjiffies, elapsed;
+ uint32_t rptr;
+
+ cjiffies = jiffies;
+ if (!time_after(cjiffies, ring->last_activity)) {
+ /* likely a wrap around */
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ rptr = RREG32(ring->rptr_reg);
+ ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+ if (ring->rptr != ring->last_rptr) {
+ /* CP is still working no lockup */
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
+ if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
+ dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+ return true;
+ }
+ /* give a chance to the GPU ... */
+ return false;
}
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
@@ -385,8 +390,8 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT,
- &ring->ring_obj);
+ RADEON_GEM_DOMAIN_GTT,
+ NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
@@ -411,6 +416,9 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->ring_free_dw = ring->ring_size / 4;
+ if (radeon_debugfs_ring_init(rdev, ring)) {
+ DRM_ERROR("Failed to register debugfs file for rings !\n");
+ }
return 0;
}
@@ -419,11 +427,12 @@ void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
int r;
struct radeon_bo *ring_obj;
- mutex_lock(&ring->mutex);
+ mutex_lock(&rdev->ring_lock);
ring_obj = ring->ring_obj;
+ ring->ready = false;
ring->ring = NULL;
ring->ring_obj = NULL;
- mutex_unlock(&ring->mutex);
+ mutex_unlock(&rdev->ring_lock);
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
@@ -476,59 +485,48 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
};
-static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
- unsigned i;
- if (ib == NULL) {
- return 0;
- }
- seq_printf(m, "IB %04u\n", ib->idx);
- seq_printf(m, "IB fence %p\n", ib->fence);
- seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
- for (i = 0; i < ib->length_dw; i++) {
- seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
- }
+ radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
+
return 0;
+
}
-static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
-static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
-static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
+static struct drm_info_list radeon_debugfs_sa_list[] = {
+ {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+};
+
#endif
-int radeon_debugfs_ring_init(struct radeon_device *rdev)
+int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
- if (rdev->family >= CHIP_CAYMAN)
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
- ARRAY_SIZE(radeon_debugfs_ring_info_list));
- else
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, 1);
-#else
- return 0;
+ unsigned i;
+ for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
+ struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
+ int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
+ unsigned r;
+
+ if (&rdev->ring[ridx] != ring)
+ continue;
+
+ r = radeon_debugfs_add_files(rdev, info, 1);
+ if (r)
+ return r;
+ }
#endif
+ return 0;
}
-int radeon_debugfs_ib_init(struct radeon_device *rdev)
+int radeon_debugfs_sa_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- unsigned i;
-
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
- radeon_debugfs_ib_idx[i] = i;
- radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
- radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
- radeon_debugfs_ib_list[i].driver_features = 0;
- radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
- }
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
- RADEON_IB_POOL_SIZE);
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
#else
return 0;
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 4cce47e7dc0d..32059b745728 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -27,23 +27,45 @@
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
+/* Algorithm:
+ *
+ * We store the last allocated bo in "hole", we always try to allocate
+ * after the last allocated bo. Principle is that in a linear GPU ring
+ * progression was is after last is the oldest bo we allocated and thus
+ * the first one that should no longer be in use by the GPU.
+ *
+ * If it's not the case we skip over the bo after last to the closest
+ * done bo if such one exist. If none exist and we are not asked to
+ * block we report failure to allocate.
+ *
+ * If we are asked to block we wait on all the oldest fence of all
+ * rings. We just wait for any of those fence to complete.
+ */
#include "drmP.h"
#include "drm.h"
#include "radeon.h"
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
+
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 domain)
{
- int r;
+ int i, r;
+ spin_lock_init(&sa_manager->lock);
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
- INIT_LIST_HEAD(&sa_manager->sa_bo);
+ sa_manager->hole = &sa_manager->olist;
+ INIT_LIST_HEAD(&sa_manager->olist);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ INIT_LIST_HEAD(&sa_manager->flist[i]);
+ }
r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
+ RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
@@ -57,11 +79,15 @@ void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
{
struct radeon_sa_bo *sa_bo, *tmp;
- if (!list_empty(&sa_manager->sa_bo)) {
- dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+ if (!list_empty(&sa_manager->olist)) {
+ sa_manager->hole = &sa_manager->olist,
+ radeon_sa_bo_try_free(sa_manager);
+ if (!list_empty(&sa_manager->olist)) {
+ dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+ }
}
- list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
- list_del_init(&sa_bo->list);
+ list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
+ radeon_sa_bo_remove_locked(sa_bo);
}
radeon_bo_unref(&sa_manager->bo);
sa_manager->size = 0;
@@ -113,77 +139,248 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
return r;
}
-/*
- * Principe is simple, we keep a list of sub allocation in offset
- * order (first entry has offset == 0, last entry has the highest
- * offset).
- *
- * When allocating new object we first check if there is room at
- * the end total_size - (last_object_offset + last_object_size) >=
- * alloc_size. If so we allocate new object there.
- *
- * When there is not enough room at the end, we start waiting for
- * each sub object until we reach object_offset+object_size >=
- * alloc_size, this object then become the sub object we return.
- *
- * Alignment can't be bigger than page size
- */
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
+{
+ struct radeon_sa_manager *sa_manager = sa_bo->manager;
+ if (sa_manager->hole == &sa_bo->olist) {
+ sa_manager->hole = sa_bo->olist.prev;
+ }
+ list_del_init(&sa_bo->olist);
+ list_del_init(&sa_bo->flist);
+ radeon_fence_unref(&sa_bo->fence);
+ kfree(sa_bo);
+}
+
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
+{
+ struct radeon_sa_bo *sa_bo, *tmp;
+
+ if (sa_manager->hole->next == &sa_manager->olist)
+ return;
+
+ sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
+ list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
+ if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
+ return;
+ }
+ radeon_sa_bo_remove_locked(sa_bo);
+ }
+}
+
+static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole != &sa_manager->olist) {
+ return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
+ }
+ return 0;
+}
+
+static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole->next != &sa_manager->olist) {
+ return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
+ }
+ return sa_manager->size;
+}
+
+static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
+ struct radeon_sa_bo *sa_bo,
+ unsigned size, unsigned align)
+{
+ unsigned soffset, eoffset, wasted;
+
+ soffset = radeon_sa_bo_hole_soffset(sa_manager);
+ eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+ wasted = (align - (soffset % align)) % align;
+
+ if ((eoffset - soffset) >= (size + wasted)) {
+ soffset += wasted;
+
+ sa_bo->manager = sa_manager;
+ sa_bo->soffset = soffset;
+ sa_bo->eoffset = soffset + size;
+ list_add(&sa_bo->olist, sa_manager->hole);
+ INIT_LIST_HEAD(&sa_bo->flist);
+ sa_manager->hole = &sa_bo->olist;
+ return true;
+ }
+ return false;
+}
+
+static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
+ struct radeon_fence **fences,
+ unsigned *tries)
+{
+ struct radeon_sa_bo *best_bo = NULL;
+ unsigned i, soffset, best, tmp;
+
+ /* if hole points to the end of the buffer */
+ if (sa_manager->hole->next == &sa_manager->olist) {
+ /* try again with its beginning */
+ sa_manager->hole = &sa_manager->olist;
+ return true;
+ }
+
+ soffset = radeon_sa_bo_hole_soffset(sa_manager);
+ /* to handle wrap around we add sa_manager->size */
+ best = sa_manager->size * 2;
+ /* go over all fence list and try to find the closest sa_bo
+ * of the current last
+ */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_sa_bo *sa_bo;
+
+ if (list_empty(&sa_manager->flist[i])) {
+ continue;
+ }
+
+ sa_bo = list_first_entry(&sa_manager->flist[i],
+ struct radeon_sa_bo, flist);
+
+ if (!radeon_fence_signaled(sa_bo->fence)) {
+ fences[i] = sa_bo->fence;
+ continue;
+ }
+
+ /* limit the number of tries each ring gets */
+ if (tries[i] > 2) {
+ continue;
+ }
+
+ tmp = sa_bo->soffset;
+ if (tmp < soffset) {
+ /* wrap around, pretend it's after */
+ tmp += sa_manager->size;
+ }
+ tmp -= soffset;
+ if (tmp < best) {
+ /* this sa bo is the closest one */
+ best = tmp;
+ best_bo = sa_bo;
+ }
+ }
+
+ if (best_bo) {
+ ++tries[best_bo->fence->ring];
+ sa_manager->hole = best_bo->olist.prev;
+
+ /* we knew that this one is signaled,
+ so it's save to remote it */
+ radeon_sa_bo_remove_locked(best_bo);
+ return true;
+ }
+ return false;
+}
+
int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo *sa_bo,
- unsigned size, unsigned align)
+ struct radeon_sa_bo **sa_bo,
+ unsigned size, unsigned align, bool block)
{
- struct radeon_sa_bo *tmp;
- struct list_head *head;
- unsigned offset = 0, wasted = 0;
+ struct radeon_fence *fences[RADEON_NUM_RINGS];
+ unsigned tries[RADEON_NUM_RINGS];
+ int i, r = -ENOMEM;
BUG_ON(align > RADEON_GPU_PAGE_SIZE);
BUG_ON(size > sa_manager->size);
- /* no one ? */
- head = sa_manager->sa_bo.prev;
- if (list_empty(&sa_manager->sa_bo)) {
- goto out;
+ *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
+ if ((*sa_bo) == NULL) {
+ return -ENOMEM;
}
+ (*sa_bo)->manager = sa_manager;
+ (*sa_bo)->fence = NULL;
+ INIT_LIST_HEAD(&(*sa_bo)->olist);
+ INIT_LIST_HEAD(&(*sa_bo)->flist);
- /* look for a hole big enough */
- offset = 0;
- list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
- /* room before this object ? */
- if ((tmp->offset - offset) >= size) {
- head = tmp->list.prev;
- goto out;
+ spin_lock(&sa_manager->lock);
+ do {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ fences[i] = NULL;
+ tries[i] = 0;
}
- offset = tmp->offset + tmp->size;
- wasted = offset % align;
- if (wasted) {
- wasted = align - wasted;
+
+ do {
+ radeon_sa_bo_try_free(sa_manager);
+
+ if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
+ size, align)) {
+ spin_unlock(&sa_manager->lock);
+ return 0;
+ }
+
+ /* see if we can skip over some allocations */
+ } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+
+ if (block) {
+ spin_unlock(&sa_manager->lock);
+ r = radeon_fence_wait_any(rdev, fences, false);
+ spin_lock(&sa_manager->lock);
+ if (r) {
+ /* if we have nothing to wait for we
+ are practically out of memory */
+ if (r == -ENOENT) {
+ r = -ENOMEM;
+ }
+ goto out_err;
+ }
}
- offset += wasted;
- }
- /* room at the end ? */
- head = sa_manager->sa_bo.prev;
- tmp = list_entry(head, struct radeon_sa_bo, list);
- offset = tmp->offset + tmp->size;
- wasted = offset % align;
- if (wasted) {
- wasted = align - wasted;
- }
- offset += wasted;
- if ((sa_manager->size - offset) < size) {
- /* failed to find somethings big enough */
- return -ENOMEM;
+ } while (block);
+
+out_err:
+ spin_unlock(&sa_manager->lock);
+ kfree(*sa_bo);
+ *sa_bo = NULL;
+ return r;
+}
+
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
+ struct radeon_fence *fence)
+{
+ struct radeon_sa_manager *sa_manager;
+
+ if (sa_bo == NULL || *sa_bo == NULL) {
+ return;
}
-out:
- sa_bo->manager = sa_manager;
- sa_bo->offset = offset;
- sa_bo->size = size;
- list_add(&sa_bo->list, head);
- return 0;
+ sa_manager = (*sa_bo)->manager;
+ spin_lock(&sa_manager->lock);
+ if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+ (*sa_bo)->fence = radeon_fence_ref(fence);
+ list_add_tail(&(*sa_bo)->flist,
+ &sa_manager->flist[fence->ring]);
+ } else {
+ radeon_sa_bo_remove_locked(*sa_bo);
+ }
+ spin_unlock(&sa_manager->lock);
+ *sa_bo = NULL;
}
-void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
+#if defined(CONFIG_DEBUG_FS)
+void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+ struct seq_file *m)
{
- list_del_init(&sa_bo->list);
+ struct radeon_sa_bo *i;
+
+ spin_lock(&sa_manager->lock);
+ list_for_each_entry(i, &sa_manager->olist, olist) {
+ if (&i->olist == sa_manager->hole) {
+ seq_printf(m, ">");
+ } else {
+ seq_printf(m, " ");
+ }
+ seq_printf(m, "[0x%08x 0x%08x] size %8d",
+ i->soffset, i->eoffset, i->eoffset - i->soffset);
+ if (i->fence) {
+ seq_printf(m, " protected by 0x%016llx on ring %d",
+ i->fence->seq, i->fence->ring);
+ }
+ seq_printf(m, "\n");
+ }
+ spin_unlock(&sa_manager->lock);
}
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 61dd4e3c9209..e2ace5dce117 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -31,148 +31,107 @@
#include "drm.h"
#include "radeon.h"
-static int radeon_semaphore_add_bo(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo;
- unsigned long irq_flags;
- uint64_t gpu_addr;
- uint32_t *cpu_ptr;
- int r, i;
-
-
- bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
- if (bo == NULL) {
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&bo->free);
- INIT_LIST_HEAD(&bo->list);
- bo->nused = 0;
-
- r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
- if (r) {
- dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
- kfree(bo);
- return r;
- }
- gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
- gpu_addr += bo->ib->sa_bo.offset;
- cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
- cpu_ptr += (bo->ib->sa_bo.offset >> 2);
- for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
- bo->semaphores[i].gpu_addr = gpu_addr;
- bo->semaphores[i].cpu_ptr = cpu_ptr;
- bo->semaphores[i].bo = bo;
- list_add_tail(&bo->semaphores[i].list, &bo->free);
- gpu_addr += 8;
- cpu_ptr += 2;
- }
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
- return 0;
-}
-
-static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
- struct radeon_semaphore_bo *bo)
-{
- radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
- radeon_fence_unref(&bo->ib->fence);
- list_del(&bo->list);
- kfree(bo);
-}
-
-void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo, *n;
-
- if (list_empty(&rdev->semaphore_drv.bo)) {
- return;
- }
- /* only shrink if first bo has free semaphore */
- bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
- if (list_empty(&bo->free)) {
- return;
- }
- list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
- if (bo->nused)
- continue;
- radeon_semaphore_del_bo_locked(rdev, bo);
- }
-}
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
- struct radeon_semaphore_bo *bo;
- unsigned long irq_flags;
- bool do_retry = true;
int r;
-retry:
- *semaphore = NULL;
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
- if (list_empty(&bo->free))
- continue;
- *semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
- (*semaphore)->cpu_ptr[0] = 0;
- (*semaphore)->cpu_ptr[1] = 0;
- list_del(&(*semaphore)->list);
- bo->nused++;
- break;
- }
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
-
+ *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
- if (do_retry) {
- do_retry = false;
- r = radeon_semaphore_add_bo(rdev);
- if (r)
- return r;
- goto retry;
- }
return -ENOMEM;
}
-
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
+ &(*semaphore)->sa_bo, 8, 8, true);
+ if (r) {
+ kfree(*semaphore);
+ *semaphore = NULL;
+ return r;
+ }
+ (*semaphore)->waiters = 0;
+ (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
+ *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
return 0;
}
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
+ --semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
}
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
+ ++semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
}
-void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore)
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ bool sync_to[RADEON_NUM_RINGS],
+ int dst_ring)
{
- unsigned long irq_flags;
+ int i = 0, r;
+
+ mutex_lock(&rdev->ring_lock);
+ r = radeon_ring_alloc(rdev, &rdev->ring[dst_ring], RADEON_NUM_RINGS * 8);
+ if (r) {
+ goto error;
+ }
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ /* no need to sync to our own or unused rings */
+ if (!sync_to[i] || i == dst_ring)
+ continue;
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- semaphore->bo->nused--;
- list_add_tail(&semaphore->list, &semaphore->bo->free);
- radeon_semaphore_shrink_locked(rdev);
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[i].ready) {
+ dev_err(rdev->dev, "Trying to sync to a disabled ring!");
+ r = -EINVAL;
+ goto error;
+ }
+
+ r = radeon_ring_alloc(rdev, &rdev->ring[i], 8);
+ if (r) {
+ goto error;
+ }
+
+ radeon_semaphore_emit_signal(rdev, i, semaphore);
+ radeon_semaphore_emit_wait(rdev, dst_ring, semaphore);
+
+ radeon_ring_commit(rdev, &rdev->ring[i]);
+ }
+
+ radeon_ring_commit(rdev, &rdev->ring[dst_ring]);
+ mutex_unlock(&rdev->ring_lock);
+
+ return 0;
+
+error:
+ /* unlock all locks taken so far */
+ for (--i; i >= 0; --i) {
+ if (sync_to[i] || i == dst_ring) {
+ radeon_ring_undo(&rdev->ring[i]);
+ }
+ }
+ radeon_ring_undo(&rdev->ring[dst_ring]);
+ mutex_unlock(&rdev->ring_lock);
+ return r;
}
-void radeon_semaphore_driver_fini(struct radeon_device *rdev)
+void radeon_semaphore_free(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence)
{
- struct radeon_semaphore_bo *bo, *n;
- unsigned long irq_flags;
-
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- /* we force to free everything */
- list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
- if (!list_empty(&bo->free)) {
- dev_err(rdev->dev, "still in use semaphore\n");
- }
- radeon_semaphore_del_bo_locked(rdev, bo);
+ if (semaphore == NULL) {
+ return;
+ }
+ if (semaphore->waiters > 0) {
+ dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
+ " hardware lockup imminent!\n", semaphore);
}
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+ radeon_sa_bo_free(rdev, &semaphore->sa_bo, fence);
+ kfree(semaphore);
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index dc5dcf483aa3..efff929ea49d 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -59,7 +59,7 @@ void radeon_test_moves(struct radeon_device *rdev)
}
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &vram_obj);
+ NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -78,7 +78,7 @@ void radeon_test_moves(struct radeon_device *rdev)
void **vram_start, **vram_end;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
+ RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_cleanup;
@@ -317,7 +317,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
out_cleanup:
if (semaphore)
- radeon_semaphore_free(rdev, semaphore);
+ radeon_semaphore_free(rdev, semaphore, NULL);
if (fence1)
radeon_fence_unref(&fence1);
@@ -437,7 +437,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
out_cleanup:
if (semaphore)
- radeon_semaphore_free(rdev, semaphore);
+ radeon_semaphore_free(rdev, semaphore, NULL);
if (fenceA)
radeon_fence_unref(&fenceA);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f493c6403af5..c94a2257761f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -222,8 +222,9 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
{
struct radeon_device *rdev;
uint64_t old_start, new_start;
- struct radeon_fence *fence;
- int r, i;
+ struct radeon_fence *fence, *old_fence;
+ struct radeon_semaphore *sem = NULL;
+ int r;
rdev = radeon_get_rdev(bo->bdev);
r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev));
@@ -242,6 +243,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ radeon_fence_unref(&fence);
return -EINVAL;
}
switch (new_mem->mem_type) {
@@ -253,42 +255,36 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ radeon_fence_unref(&fence);
return -EINVAL;
}
if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
+ radeon_fence_unref(&fence);
return -EINVAL;
}
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
/* sync other rings */
- if (rdev->family >= CHIP_R600) {
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- /* no need to sync to our own or unused rings */
- if (i == radeon_copy_ring_index(rdev) || !rdev->ring[i].ready)
- continue;
-
- if (!fence->semaphore) {
- r = radeon_semaphore_create(rdev, &fence->semaphore);
- /* FIXME: handle semaphore error */
- if (r)
- continue;
- }
+ old_fence = bo->sync_obj;
+ if (old_fence && old_fence->ring != fence->ring
+ && !radeon_fence_signaled(old_fence)) {
+ bool sync_to_ring[RADEON_NUM_RINGS] = { };
+ sync_to_ring[old_fence->ring] = true;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ radeon_fence_unref(&fence);
+ return r;
+ }
- r = radeon_ring_lock(rdev, &rdev->ring[i], 3);
- /* FIXME: handle ring lock error */
- if (r)
- continue;
- radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
- radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
-
- r = radeon_ring_lock(rdev, &rdev->ring[radeon_copy_ring_index(rdev)], 3);
- /* FIXME: handle ring lock error */
- if (r)
- continue;
- radeon_semaphore_emit_wait(rdev, radeon_copy_ring_index(rdev), fence->semaphore);
- radeon_ring_unlock_commit(rdev, &rdev->ring[radeon_copy_ring_index(rdev)]);
+ r = radeon_semaphore_sync_rings(rdev, sem,
+ sync_to_ring, fence->ring);
+ if (r) {
+ radeon_semaphore_free(rdev, sem, NULL);
+ radeon_fence_unref(&fence);
+ return r;
}
}
@@ -298,6 +294,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
evict, no_wait_reserve, no_wait_gpu, new_mem);
+ radeon_semaphore_free(rdev, sem, fence);
radeon_fence_unref(&fence);
return r;
}
@@ -614,10 +611,18 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
return 0;
+ if (slave && ttm->sg) {
+ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+ gtt->ttm.dma_address, ttm->num_pages);
+ ttm->state = tt_unbound;
+ return 0;
+ }
+
rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
@@ -658,6 +663,10 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+ if (slave)
+ return;
rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
@@ -729,8 +738,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
return r;
}
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM,
- &rdev->stollen_vga_memory);
+ RADEON_GEM_DOMAIN_VRAM,
+ NULL, &rdev->stollen_vga_memory);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 4cf381b3a6d8..a464eb5e2df2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -430,12 +430,9 @@ static int rs400_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d25cf869d08d..25f9eef12c42 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -396,7 +396,6 @@ int rs600_asic_reset(struct radeon_device *rdev)
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
- rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
@@ -553,6 +552,12 @@ int rs600_irq_set(struct radeon_device *rdev)
~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+ u32 hdmi0;
+ if (ASIC_IS_DCE2(rdev))
+ hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+ ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ else
+ hdmi0 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -579,10 +584,15 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.hpd[1]) {
hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
}
+ if (rdev->irq.afmt[0]) {
+ hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ }
WREG32(R_000040_GEN_INT_CNTL, tmp);
WREG32(R_006540_DxMODE_INT_MASK, mode_int);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+ if (ASIC_IS_DCE2(rdev))
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
return 0;
}
@@ -622,6 +632,17 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.r500.disp_int = 0;
}
+ if (ASIC_IS_DCE2(rdev)) {
+ rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) &
+ S_007404_HDMI0_AZ_FORMAT_WTRIG(1);
+ if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+ tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL);
+ tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1);
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ }
+ } else
+ rdev->irq.stat_regs.r500.hdmi0_status = 0;
+
if (irqs) {
WREG32(R_000044_GEN_INT_STATUS, irqs);
}
@@ -630,6 +651,9 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev)
void rs600_irq_disable(struct radeon_device *rdev)
{
+ u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+ ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
WREG32(R_000040_GEN_INT_CNTL, 0);
WREG32(R_006540_DxMODE_INT_MASK, 0);
/* Wait and acknowledge irq */
@@ -641,15 +665,20 @@ int rs600_irq_process(struct radeon_device *rdev)
{
u32 status, msi_rearm;
bool queue_hotplug = false;
+ bool queue_hdmi = false;
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
status = rs600_irq_ack(rdev);
- if (!status && !rdev->irq.stat_regs.r500.disp_int) {
+ if (!status &&
+ !rdev->irq.stat_regs.r500.disp_int &&
+ !rdev->irq.stat_regs.r500.hdmi0_status) {
return IRQ_NONE;
}
- while (status || rdev->irq.stat_regs.r500.disp_int) {
+ while (status ||
+ rdev->irq.stat_regs.r500.disp_int ||
+ rdev->irq.stat_regs.r500.hdmi0_status) {
/* SW interrupt */
if (G_000044_SW_INT(status)) {
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
@@ -687,12 +716,18 @@ int rs600_irq_process(struct radeon_device *rdev)
queue_hotplug = true;
DRM_DEBUG("HPD2\n");
}
+ if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+ queue_hdmi = true;
+ DRM_DEBUG("HDMI0\n");
+ }
status = rs600_irq_ack(rdev);
}
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
+ if (queue_hdmi)
+ schedule_work(&rdev->audio_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
@@ -883,12 +918,9 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index a27c13ac47c3..f1f89414dc63 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -485,6 +485,20 @@
#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16)
#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1)
#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF
+#define R_007404_HDMI0_STATUS 0x007404
+#define S_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) & 0x1) << 28)
+#define G_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) >> 28) & 0x1)
+#define C_007404_HDMI0_AZ_FORMAT_WTRIG 0xEFFFFFFF
+#define S_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) & 0x1) << 29)
+#define G_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) >> 29) & 0x1)
+#define C_007404_HDMI0_AZ_FORMAT_WTRIG_INT 0xDFFFFFFF
+#define R_007408_HDMI0_AUDIO_PACKET_CONTROL 0x007408
+#define S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) & 0x1) << 28)
+#define G_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) >> 28) & 0x1)
+#define C_007408_HDMI0_AZ_FORMAT_WTRIG_MASK 0xEFFFFFFF
+#define S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) & 0x1) << 29)
+#define G_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) >> 29) & 0x1)
+#define C_007408_HDMI0_AZ_FORMAT_WTRIG_ACK 0xDFFFFFFF
/* MC registers */
#define R_000000_MC_STATUS 0x000000
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index f2c3b9d75f18..3277ddecfe9f 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -647,12 +647,9 @@ static int rs690_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index d8d78fe17946..7f08cedb5333 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -412,12 +412,10 @@ static int rv515_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index cdab1aeaed6e..c2f473bc13b8 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1114,12 +1114,9 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "IB test failed (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
@@ -1178,10 +1175,6 @@ int rv770_init(struct radeon_device *rdev)
{
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -1281,7 +1274,6 @@ void rv770_fini(struct radeon_device *rdev)
rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 79fa588e9ed5..9c549f702f2f 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -353,6 +353,197 @@
#define SRBM_STATUS 0x0E50
+/* DCE 3.2 HDMI */
+#define HDMI_CONTROL 0x7400
+# define HDMI_KEEPOUT_MODE (1 << 0)
+# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
+# define HDMI_ERROR_ACK (1 << 8)
+# define HDMI_ERROR_MASK (1 << 9)
+#define HDMI_STATUS 0x7404
+# define HDMI_ACTIVE_AVMUTE (1 << 0)
+# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
+# define HDMI_VBI_PACKET_ERROR (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL 0x7408
+# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL 0x740c
+# define HDMI_ACR_SEND (1 << 0)
+# define HDMI_ACR_CONT (1 << 1)
+# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
+# define HDMI_ACR_HW 0
+# define HDMI_ACR_32 1
+# define HDMI_ACR_44 2
+# define HDMI_ACR_48 3
+# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
+# define HDMI_ACR_AUTO_SEND (1 << 12)
+#define HDMI_VBI_PACKET_CONTROL 0x7410
+# define HDMI_NULL_SEND (1 << 0)
+# define HDMI_GC_SEND (1 << 4)
+# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0 0x7414
+# define HDMI_AVI_INFO_SEND (1 << 0)
+# define HDMI_AVI_INFO_CONT (1 << 1)
+# define HDMI_AUDIO_INFO_SEND (1 << 4)
+# define HDMI_AUDIO_INFO_CONT (1 << 5)
+# define HDMI_MPEG_INFO_SEND (1 << 8)
+# define HDMI_MPEG_INFO_CONT (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1 0x7418
+# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL 0x741c
+# define HDMI_GENERIC0_SEND (1 << 0)
+# define HDMI_GENERIC0_CONT (1 << 1)
+# define HDMI_GENERIC1_SEND (1 << 4)
+# define HDMI_GENERIC1_CONT (1 << 5)
+# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+#define HDMI_GC 0x7428
+# define HDMI_GC_AVMUTE (1 << 0)
+#define AFMT_AUDIO_PACKET_CONTROL2 0x742c
+# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
+# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
+# define AFMT_60958_CS_SOURCE (1 << 4)
+# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
+# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0 0x7454
+# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
+# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
+# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
+# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
+# define AFMT_AVI_INFO_Y_RGB 0
+# define AFMT_AVI_INFO_Y_YCBCR422 1
+# define AFMT_AVI_INFO_Y_YCBCR444 2
+# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
+# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
+# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
+# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
+# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
+# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
+# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
+# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
+# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
+# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1 0x7458
+# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2 0x745c
+# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3 0x7460
+# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0 0x7464
+# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
+# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
+# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1 0x7468
+# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
+# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR 0x746c
+#define AFMT_GENERIC0_0 0x7470
+#define AFMT_GENERIC0_1 0x7474
+#define AFMT_GENERIC0_2 0x7478
+#define AFMT_GENERIC0_3 0x747c
+#define AFMT_GENERIC0_4 0x7480
+#define AFMT_GENERIC0_5 0x7484
+#define AFMT_GENERIC0_6 0x7488
+#define AFMT_GENERIC1_HDR 0x748c
+#define AFMT_GENERIC1_0 0x7490
+#define AFMT_GENERIC1_1 0x7494
+#define AFMT_GENERIC1_2 0x7498
+#define AFMT_GENERIC1_3 0x749c
+#define AFMT_GENERIC1_4 0x74a0
+#define AFMT_GENERIC1_5 0x74a4
+#define AFMT_GENERIC1_6 0x74a8
+#define HDMI_ACR_32_0 0x74ac
+# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1 0x74b0
+# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0 0x74b4
+# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1 0x74b8
+# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0 0x74bc
+# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1 0x74c0
+# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0 0x74c4
+#define HDMI_ACR_STATUS_1 0x74c8
+#define AFMT_AUDIO_INFO0 0x74cc
+# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
+# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
+#define AFMT_AUDIO_INFO1 0x74d0
+# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
+# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
+# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#define AFMT_60958_0 0x74d4
+# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
+# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
+# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
+# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
+# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
+# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
+# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+#define AFMT_60958_1 0x74d8
+# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
+# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
+# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL 0x74dc
+# define AFMT_AUDIO_CRC_EN (1 << 0)
+#define AFMT_RAMP_CONTROL0 0x74e0
+# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_RAMP_DATA_SIGN (1 << 31)
+#define AFMT_RAMP_CONTROL1 0x74e4
+# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2 0x74e8
+# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3 0x74ec
+# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_60958_2 0x74f0
+# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
+# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
+# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
+#define AFMT_STATUS 0x7600
+# define AFMT_AUDIO_ENABLE (1 << 4)
+# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL 0x7604
+# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
+# define AFMT_AUDIO_TEST_EN (1 << 12)
+# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
+# define AFMT_60958_CS_UPDATE (1 << 26)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL 0x7608
+# define AFMT_GENERIC0_UPDATE (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0 0x760c
+# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
+# define AFMT_MPEG_INFO_UPDATE (1 << 10)
+#define AFMT_GENERIC0_7 0x7610
+/* second instance starts at 0x7800 */
+#define HDMI_OFFSET0 (0x7400 - 0x7400)
+#define HDMI_OFFSET1 (0x7800 - 0x7400)
+
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 27bda986fc2b..549732e56ca9 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2217,8 +2217,6 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
u32 srbm_status;
u32 grbm_status, grbm_status2;
u32 grbm_status_se0, grbm_status_se1;
- struct r100_gpu_lockup *lockup = &rdev->config.si.lockup;
- int r;
srbm_status = RREG32(SRBM_STATUS);
grbm_status = RREG32(GRBM_STATUS);
@@ -2226,20 +2224,12 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, ring);
+ radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- /* XXX deal with CP0,1,2 */
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
static int si_gpu_soft_reset(struct radeon_device *rdev)
@@ -2275,6 +2265,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
SOFT_RESET_GDS |
SOFT_RESET_PA |
SOFT_RESET_SC |
+ SOFT_RESET_BCI |
SOFT_RESET_SPI |
SOFT_RESET_SX |
SOFT_RESET_TC |
@@ -2985,7 +2976,8 @@ int si_rlc_init(struct radeon_device *rdev)
/* save restore block */
if (rdev->rlc.save_restore_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.save_restore_obj);
+ RADEON_GEM_DOMAIN_VRAM, NULL,
+ &rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
@@ -3009,7 +3001,8 @@ int si_rlc_init(struct radeon_device *rdev)
/* clear state block */
if (rdev->rlc.clear_state_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.clear_state_obj);
+ RADEON_GEM_DOMAIN_VRAM, NULL,
+ &rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
si_rlc_fini(rdev);
@@ -3216,6 +3209,8 @@ static int si_irq_init(struct radeon_device *rdev)
/* force the active interrupt state to all disabled */
si_disable_interrupt_state(rdev);
+ pci_set_master(rdev->pdev);
+
/* enable irqs */
si_enable_interrupts(rdev);
@@ -3994,10 +3989,6 @@ int si_init(struct radeon_device *rdev)
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -4117,7 +4108,6 @@ void si_fini(struct radeon_device *rdev)
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index cb1ee4e0050a..6eb507a5d130 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -735,7 +735,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
return -EINVAL;
}
drm_core_ioremap(dev->agp_buffer_map, dev);
- if (!dev->agp_buffer_map) {
+ if (!dev->agp_buffer_map->handle) {
DRM_ERROR("failed to ioremap DMA buffer region!\n");
savage_do_cleanup_bci(dev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1f5c67c579cf..36792bd4da77 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -343,6 +343,16 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
if (unlikely(bo->ttm == NULL))
ret = -ENOMEM;
break;
+ case ttm_bo_type_sg:
+ bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags | TTM_PAGE_FLAG_SG,
+ glob->dummy_read_page);
+ if (unlikely(bo->ttm == NULL)) {
+ ret = -ENOMEM;
+ break;
+ }
+ bo->ttm->sg = bo->sg;
+ break;
default:
pr_err("Illegal buffer object type\n");
ret = -EINVAL;
@@ -1169,6 +1179,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bool interruptible,
struct file *persistent_swap_storage,
size_t acc_size,
+ struct sg_table *sg,
void (*destroy) (struct ttm_buffer_object *))
{
int ret = 0;
@@ -1223,6 +1234,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->seq_valid = false;
bo->persistent_swap_storage = persistent_swap_storage;
bo->acc_size = acc_size;
+ bo->sg = sg;
atomic_inc(&bo->glob->bo_count);
ret = ttm_bo_check_placement(bo, placement);
@@ -1233,7 +1245,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
- if (bo->type == ttm_bo_type_device) {
+ if (bo->type == ttm_bo_type_device ||
+ bo->type == ttm_bo_type_sg) {
ret = ttm_bo_setup_vm(bo);
if (ret)
goto out_err;
@@ -1312,7 +1325,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
buffer_start, interruptible,
- persistent_swap_storage, acc_size, NULL);
+ persistent_swap_storage, acc_size, NULL, NULL);
if (likely(ret == 0))
*p_bo = bo;
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 53673907a6a0..4d02c46a9420 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -38,7 +38,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
drm_unplug_dev(dev);
}
-static struct vm_operations_struct udl_gem_vm_ops = {
+static const struct vm_operations_struct udl_gem_vm_ops = {
.fault = udl_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -57,7 +57,7 @@ static const struct file_operations udl_driver_fops = {
};
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = udl_driver_load,
.unload = udl_driver_unload,
@@ -70,6 +70,10 @@ static struct drm_driver driver = {
.dumb_map_offset = udl_gem_mmap,
.dumb_destroy = udl_dumb_destroy,
.fops = &udl_driver_fops,
+
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = udl_gem_prime_import,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 96820d03a303..fccd361f7b50 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -66,6 +66,7 @@ struct udl_gem_object {
struct drm_gem_object base;
struct page **pages;
void *vmapping;
+ struct sg_table *sg;
};
#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
@@ -118,6 +119,8 @@ int udl_gem_init_object(struct drm_gem_object *obj);
void udl_gem_free_object(struct drm_gem_object *gem_obj);
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size);
+struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 4d9c3a5d8a45..a029ee39b0c5 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -593,11 +593,20 @@ udl_fb_user_fb_create(struct drm_device *dev,
struct drm_gem_object *obj;
struct udl_framebuffer *ufb;
int ret;
+ uint32_t size;
obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ if (size > obj->size) {
+ DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
+ return ERR_PTR(-ENOMEM);
+ }
+
ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
if (ufb == NULL)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 92f19ef329b0..40efd32f7dce 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -9,6 +9,7 @@
#include "drmP.h"
#include "udl_drv.h"
#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size)
@@ -161,6 +162,12 @@ static void udl_gem_put_pages(struct udl_gem_object *obj)
int page_count = obj->base.size / PAGE_SIZE;
int i;
+ if (obj->base.import_attach) {
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+ return;
+ }
+
for (i = 0; i < page_count; i++)
page_cache_release(obj->pages[i]);
@@ -195,6 +202,9 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
{
struct udl_gem_object *obj = to_udl_bo(gem_obj);
+ if (gem_obj->import_attach)
+ drm_prime_gem_destroy(gem_obj, obj->sg);
+
if (obj->vmapping)
udl_gem_vunmap(obj);
@@ -239,3 +249,68 @@ unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
+
+static int udl_prime_create(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg,
+ struct udl_gem_object **obj_p)
+{
+ struct udl_gem_object *obj;
+ int npages;
+ int i;
+ struct scatterlist *iter;
+
+ npages = size / PAGE_SIZE;
+
+ *obj_p = NULL;
+ obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
+ if (!obj)
+ return -ENOMEM;
+
+ obj->sg = sg;
+ obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (obj->pages == NULL) {
+ DRM_ERROR("obj pages is NULL %d\n", npages);
+ return -ENOMEM;
+ }
+
+ drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+
+ *obj_p = obj;
+ return 0;
+}
+
+struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct udl_gem_object *uobj;
+ int ret;
+
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_PTR(PTR_ERR(attach));
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
+ if (ret) {
+ goto fail_unmap;
+ }
+
+ uobj->base.import_attach = attach;
+
+ return &uobj->base;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index b3ecb3d12a1d..0d7816789da1 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -395,7 +395,7 @@ int udl_modeset_init(struct drm_device *dev)
dev->mode_config.prefer_shadow = 0;
dev->mode_config.preferred_depth = 24;
- dev->mode_config.funcs = (void *)&udl_mode_funcs;
+ dev->mode_config.funcs = &udl_mode_funcs;
drm_mode_create_dirty_info_property(dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 2286d47e5022..6b0078ffa763 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1178,7 +1178,7 @@ err_out:
return &vfb->base;
}
-static struct drm_mode_config_funcs vmw_kms_funcs = {
+static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a37abb581cbb..22bf9a21ec71 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1567,7 +1567,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
0, 0, interruptible,
- NULL, acc_size, bo_free);
+ NULL, acc_size, NULL, bo_free);
return ret;
}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 96c83a9a76bb..f34838839b08 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -21,6 +21,7 @@ config VGA_SWITCHEROO
bool "Laptop Hybrid Graphics - GPU switching support"
depends on X86
depends on ACPI
+ select VGA_ARB
help
Many laptops released in 2008/9/10 have two GPUs with a multiplexer
to switch between them. This adds support for dynamic switching when
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 58434e804d91..38f9534ac513 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -28,15 +28,16 @@
#include <linux/pci.h>
#include <linux/vga_switcheroo.h>
+#include <linux/vgaarb.h>
+
struct vga_switcheroo_client {
struct pci_dev *pdev;
struct fb_info *fb_info;
int pwr_state;
- void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
- void (*reprobe)(struct pci_dev *pdev);
- bool (*can_switch)(struct pci_dev *pdev);
+ const struct vga_switcheroo_client_ops *ops;
int id;
bool active;
+ struct list_head list;
};
static DEFINE_MUTEX(vgasr_mutex);
@@ -51,16 +52,23 @@ struct vgasr_priv {
struct dentry *switch_file;
int registered_clients;
- struct vga_switcheroo_client clients[VGA_SWITCHEROO_MAX_CLIENTS];
+ struct list_head clients;
struct vga_switcheroo_handler *handler;
};
+#define ID_BIT_AUDIO 0x100
+#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
+#define client_is_vga(c) ((c)->id == -1 || !client_is_audio(c))
+#define client_id(c) ((c)->id & ~ID_BIT_AUDIO)
+
static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
/* only one switcheroo per system */
-static struct vgasr_priv vgasr_priv;
+static struct vgasr_priv vgasr_priv = {
+ .clients = LIST_HEAD_INIT(vgasr_priv.clients),
+};
int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
{
@@ -86,72 +94,119 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
static void vga_switcheroo_enable(void)
{
- int i;
int ret;
+ struct vga_switcheroo_client *client;
+
/* call the handler to init */
vgasr_priv.handler->init();
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- ret = vgasr_priv.handler->get_client_id(vgasr_priv.clients[i].pdev);
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (client->id != -1)
+ continue;
+ ret = vgasr_priv.handler->get_client_id(client->pdev);
if (ret < 0)
return;
- vgasr_priv.clients[i].id = ret;
+ client->id = ret;
}
vga_switcheroo_debugfs_init(&vgasr_priv);
vgasr_priv.active = true;
}
-int vga_switcheroo_register_client(struct pci_dev *pdev,
- void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
- void (*reprobe)(struct pci_dev *pdev),
- bool (*can_switch)(struct pci_dev *pdev))
+static int register_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active)
{
- int index;
+ struct vga_switcheroo_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->pwr_state = VGA_SWITCHEROO_ON;
+ client->pdev = pdev;
+ client->ops = ops;
+ client->id = id;
+ client->active = active;
mutex_lock(&vgasr_mutex);
- /* don't do IGD vs DIS here */
- if (vgasr_priv.registered_clients & 1)
- index = 1;
- else
- index = 0;
-
- vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
- vgasr_priv.clients[index].pdev = pdev;
- vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
- vgasr_priv.clients[index].reprobe = reprobe;
- vgasr_priv.clients[index].can_switch = can_switch;
- vgasr_priv.clients[index].id = -1;
- if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
- vgasr_priv.clients[index].active = true;
-
- vgasr_priv.registered_clients |= (1 << index);
+ list_add_tail(&client->list, &vgasr_priv.clients);
+ if (client_is_vga(client))
+ vgasr_priv.registered_clients++;
/* if we get two clients + handler */
- if (vgasr_priv.registered_clients == 0x3 && vgasr_priv.handler) {
+ if (!vgasr_priv.active &&
+ vgasr_priv.registered_clients == 2 && vgasr_priv.handler) {
printk(KERN_INFO "vga_switcheroo: enabled\n");
vga_switcheroo_enable();
}
mutex_unlock(&vgasr_mutex);
return 0;
}
+
+int vga_switcheroo_register_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops)
+{
+ return register_client(pdev, ops, -1,
+ pdev == vga_default_device());
+}
EXPORT_SYMBOL(vga_switcheroo_register_client);
+int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active)
+{
+ return register_client(pdev, ops, id | ID_BIT_AUDIO, active);
+}
+EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
+
+static struct vga_switcheroo_client *
+find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
+{
+ struct vga_switcheroo_client *client;
+ list_for_each_entry(client, head, list)
+ if (client->pdev == pdev)
+ return client;
+ return NULL;
+}
+
+static struct vga_switcheroo_client *
+find_client_from_id(struct list_head *head, int client_id)
+{
+ struct vga_switcheroo_client *client;
+ list_for_each_entry(client, head, list)
+ if (client->id == client_id)
+ return client;
+ return NULL;
+}
+
+static struct vga_switcheroo_client *
+find_active_client(struct list_head *head)
+{
+ struct vga_switcheroo_client *client;
+ list_for_each_entry(client, head, list)
+ if (client->active && client_is_vga(client))
+ return client;
+ return NULL;
+}
+
void vga_switcheroo_unregister_client(struct pci_dev *pdev)
{
- int i;
+ struct vga_switcheroo_client *client;
mutex_lock(&vgasr_mutex);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].pdev == pdev) {
- vgasr_priv.registered_clients &= ~(1 << i);
- break;
- }
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (client) {
+ if (client_is_vga(client))
+ vgasr_priv.registered_clients--;
+ list_del(&client->list);
+ kfree(client);
+ }
+ if (vgasr_priv.active && vgasr_priv.registered_clients < 2) {
+ printk(KERN_INFO "vga_switcheroo: disabled\n");
+ vga_switcheroo_debugfs_fini(&vgasr_priv);
+ vgasr_priv.active = false;
}
-
- printk(KERN_INFO "vga_switcheroo: disabled\n");
- vga_switcheroo_debugfs_fini(&vgasr_priv);
- vgasr_priv.active = false;
mutex_unlock(&vgasr_mutex);
}
EXPORT_SYMBOL(vga_switcheroo_unregister_client);
@@ -159,29 +214,29 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_client);
void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
struct fb_info *info)
{
- int i;
+ struct vga_switcheroo_client *client;
mutex_lock(&vgasr_mutex);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].pdev == pdev) {
- vgasr_priv.clients[i].fb_info = info;
- break;
- }
- }
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (client)
+ client->fb_info = info;
mutex_unlock(&vgasr_mutex);
}
EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
static int vga_switcheroo_show(struct seq_file *m, void *v)
{
- int i;
+ struct vga_switcheroo_client *client;
+ int i = 0;
mutex_lock(&vgasr_mutex);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- seq_printf(m, "%d:%s:%c:%s:%s\n", i,
- vgasr_priv.clients[i].id == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
- vgasr_priv.clients[i].active ? '+' : ' ',
- vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
- pci_name(vgasr_priv.clients[i].pdev));
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ seq_printf(m, "%d:%s%s:%c:%s:%s\n", i,
+ client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
+ client_is_vga(client) ? "" : "-Audio",
+ client->active ? '+' : ' ',
+ client->pwr_state ? "Pwr" : "Off",
+ pci_name(client->pdev));
+ i++;
}
mutex_unlock(&vgasr_mutex);
return 0;
@@ -197,7 +252,7 @@ static int vga_switchon(struct vga_switcheroo_client *client)
if (vgasr_priv.handler->power_state)
vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
/* call the driver callback to turn on device */
- client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
+ client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
client->pwr_state = VGA_SWITCHEROO_ON;
return 0;
}
@@ -205,34 +260,39 @@ static int vga_switchon(struct vga_switcheroo_client *client)
static int vga_switchoff(struct vga_switcheroo_client *client)
{
/* call the driver callback to turn off device */
- client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
+ client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
if (vgasr_priv.handler->power_state)
vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
client->pwr_state = VGA_SWITCHEROO_OFF;
return 0;
}
+static void set_audio_state(int id, int state)
+{
+ struct vga_switcheroo_client *client;
+
+ client = find_client_from_id(&vgasr_priv.clients, id | ID_BIT_AUDIO);
+ if (client && client->pwr_state != state) {
+ client->ops->set_gpu_state(client->pdev, state);
+ client->pwr_state = state;
+ }
+}
+
/* stage one happens before delay */
static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
{
- int i;
- struct vga_switcheroo_client *active = NULL;
+ struct vga_switcheroo_client *active;
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active == true) {
- active = &vgasr_priv.clients[i];
- break;
- }
- }
+ active = find_active_client(&vgasr_priv.clients);
if (!active)
return 0;
if (new_client->pwr_state == VGA_SWITCHEROO_OFF)
vga_switchon(new_client);
- /* swap shadow resource to denote boot VGA device has changed so X starts on new device */
- active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW;
- new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
+ vga_set_default_device(new_client->pdev);
+ set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
+
return 0;
}
@@ -240,15 +300,9 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
{
int ret;
- int i;
- struct vga_switcheroo_client *active = NULL;
+ struct vga_switcheroo_client *active;
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active == true) {
- active = &vgasr_priv.clients[i];
- break;
- }
- }
+ active = find_active_client(&vgasr_priv.clients);
if (!active)
return 0;
@@ -264,8 +318,10 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
if (ret)
return ret;
- if (new_client->reprobe)
- new_client->reprobe(new_client->pdev);
+ if (new_client->ops->reprobe)
+ new_client->ops->reprobe(new_client->pdev);
+
+ set_audio_state(active->id, VGA_SWITCHEROO_OFF);
if (active->pwr_state == VGA_SWITCHEROO_ON)
vga_switchoff(active);
@@ -274,13 +330,26 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
return 0;
}
+static bool check_can_switch(void)
+{
+ struct vga_switcheroo_client *client;
+
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (!client->ops->can_switch(client->pdev)) {
+ printk(KERN_ERR "vga_switcheroo: client %x refused switch\n", client->id);
+ return false;
+ }
+ }
+ return true;
+}
+
static ssize_t
vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char usercmd[64];
const char *pdev_name;
- int i, ret;
+ int ret;
bool delay = false, can_switch;
bool just_mux = false;
int client_id = -1;
@@ -301,21 +370,21 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
/* pwr off the device not in use */
if (strncmp(usercmd, "OFF", 3) == 0) {
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active)
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (client->active)
continue;
- if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_ON)
- vga_switchoff(&vgasr_priv.clients[i]);
+ if (client->pwr_state == VGA_SWITCHEROO_ON)
+ vga_switchoff(client);
}
goto out;
}
/* pwr on the device not in use */
if (strncmp(usercmd, "ON", 2) == 0) {
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active)
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (client->active)
continue;
- if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_OFF)
- vga_switchon(&vgasr_priv.clients[i]);
+ if (client->pwr_state == VGA_SWITCHEROO_OFF)
+ vga_switchon(client);
}
goto out;
}
@@ -348,13 +417,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
if (client_id == -1)
goto out;
-
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].id == client_id) {
- client = &vgasr_priv.clients[i];
- break;
- }
- }
+ client = find_client_from_id(&vgasr_priv.clients, client_id);
+ if (!client)
+ goto out;
vgasr_priv.delayed_switch_active = false;
@@ -363,23 +428,16 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
goto out;
}
- if (client->active == true)
+ if (client->active)
goto out;
/* okay we want a switch - test if devices are willing to switch */
- can_switch = true;
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
- if (can_switch == false) {
- printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
- break;
- }
- }
+ can_switch = check_can_switch();
if (can_switch == false && delay == false)
goto out;
- if (can_switch == true) {
+ if (can_switch) {
pdev_name = pci_name(client->pdev);
ret = vga_switchto_stage1(client);
if (ret)
@@ -451,10 +509,8 @@ fail:
int vga_switcheroo_process_delayed_switch(void)
{
- struct vga_switcheroo_client *client = NULL;
+ struct vga_switcheroo_client *client;
const char *pdev_name;
- bool can_switch = true;
- int i;
int ret;
int err = -EINVAL;
@@ -464,17 +520,9 @@ int vga_switcheroo_process_delayed_switch(void)
printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].id == vgasr_priv.delayed_client_id)
- client = &vgasr_priv.clients[i];
- can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
- if (can_switch == false) {
- printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
- break;
- }
- }
-
- if (can_switch == false || client == NULL)
+ client = find_client_from_id(&vgasr_priv.clients,
+ vgasr_priv.delayed_client_id);
+ if (!client || !check_can_switch())
goto err;
pdev_name = pci_name(client->pdev);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 111d956d8e7d..3df8fc0ec01a 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -136,6 +136,13 @@ struct pci_dev *vga_default_device(void)
{
return vga_default;
}
+
+EXPORT_SYMBOL_GPL(vga_default_device);
+
+void vga_set_default_device(struct pci_dev *pdev)
+{
+ vga_default = pdev;
+}
#endif
static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
@@ -605,10 +612,12 @@ static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
goto bail;
}
+#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
if (vga_default == pdev) {
pci_dev_put(vga_default);
vga_default = NULL;
}
+#endif
if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
vga_decode_count--;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index a55e248618cd..86c63fe45d11 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -27,6 +27,7 @@
#include <linux/security.h>
#include <linux/pci-aspm.h>
#include <linux/slab.h>
+#include <linux/vgaarb.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
@@ -417,6 +418,10 @@ static ssize_t
boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *vga_dev = vga_default_device();
+
+ if (vga_dev)
+ return sprintf(buf, "%u\n", (pdev == vga_dev));
return sprintf(buf, "%u\n",
!!(pdev->resource[PCI_ROM_RESOURCE].flags &
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
index 490a7f15604b..8b864afb40b6 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -36,12 +36,6 @@ struct omap_crtc {
struct drm_framebuffer *old_fb;
};
-static void omap_crtc_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size)
-{
- /* not supported.. at least not yet */
-}
-
static void omap_crtc_destroy(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -198,7 +192,6 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
}
static const struct drm_crtc_funcs omap_crtc_funcs = {
- .gamma_set = omap_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = omap_crtc_destroy,
.page_flip = omap_crtc_page_flip_locked,
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index 0d2acca376ca..4beab9447ceb 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -58,7 +58,7 @@ static void omap_fb_output_poll_changed(struct drm_device *dev)
}
}
-static struct drm_mode_config_funcs omap_mode_config_funcs = {
+static const struct drm_mode_config_funcs omap_mode_config_funcs = {
.fb_create = omap_framebuffer_create,
.output_poll_changed = omap_fb_output_poll_changed,
};
@@ -726,7 +726,7 @@ static void dev_irq_uninstall(struct drm_device *dev)
DBG("irq_uninstall: dev=%p", dev);
}
-static struct vm_operations_struct omap_gem_vm_ops = {
+static const struct vm_operations_struct omap_gem_vm_ops = {
.fault = omap_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 784139aed079..b4a632ada401 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -18,6 +18,8 @@
static bool request_mem_succeeded = false;
+static struct pci_dev *default_vga;
+
static struct fb_var_screeninfo efifb_defined __devinitdata = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
@@ -298,35 +300,72 @@ static struct fb_ops efifb_ops = {
.fb_imageblit = cfb_imageblit,
};
+struct pci_dev *vga_default_device(void)
+{
+ return default_vga;
+}
+
+EXPORT_SYMBOL_GPL(vga_default_device);
+
+void vga_set_default_device(struct pci_dev *pdev)
+{
+ default_vga = pdev;
+}
+
static int __init efifb_setup(char *options)
{
char *this_opt;
int i;
+ struct pci_dev *dev = NULL;
+
+ if (options && *options) {
+ while ((this_opt = strsep(&options, ",")) != NULL) {
+ if (!*this_opt) continue;
+
+ for (i = 0; i < M_UNKNOWN; i++) {
+ if (!strcmp(this_opt, dmi_list[i].optname) &&
+ dmi_list[i].base != 0) {
+ screen_info.lfb_base = dmi_list[i].base;
+ screen_info.lfb_linelength = dmi_list[i].stride;
+ screen_info.lfb_width = dmi_list[i].width;
+ screen_info.lfb_height = dmi_list[i].height;
+ }
+ }
+ if (!strncmp(this_opt, "base:", 5))
+ screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
+ else if (!strncmp(this_opt, "stride:", 7))
+ screen_info.lfb_linelength = simple_strtoul(this_opt+7, NULL, 0) * 4;
+ else if (!strncmp(this_opt, "height:", 7))
+ screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
+ else if (!strncmp(this_opt, "width:", 6))
+ screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
+ }
+ }
- if (!options || !*options)
- return 0;
+ for_each_pci_dev(dev) {
+ int i;
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!*this_opt) continue;
+ if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ continue;
- for (i = 0; i < M_UNKNOWN; i++) {
- if (!strcmp(this_opt, dmi_list[i].optname) &&
- dmi_list[i].base != 0) {
- screen_info.lfb_base = dmi_list[i].base;
- screen_info.lfb_linelength = dmi_list[i].stride;
- screen_info.lfb_width = dmi_list[i].width;
- screen_info.lfb_height = dmi_list[i].height;
- }
+ for (i=0; i < DEVICE_COUNT_RESOURCE; i++) {
+ resource_size_t start, end;
+
+ if (!(pci_resource_flags(dev, i) & IORESOURCE_MEM))
+ continue;
+
+ start = pci_resource_start(dev, i);
+ end = pci_resource_end(dev, i);
+
+ if (!start || !end)
+ continue;
+
+ if (screen_info.lfb_base >= start &&
+ (screen_info.lfb_base + screen_info.lfb_size) < end)
+ default_vga = dev;
}
- if (!strncmp(this_opt, "base:", 5))
- screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
- else if (!strncmp(this_opt, "stride:", 7))
- screen_info.lfb_linelength = simple_strtoul(this_opt+7, NULL, 0) * 4;
- else if (!strncmp(this_opt, "height:", 7))
- screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
- else if (!strncmp(this_opt, "width:", 6))
- screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
}
+
return 0;
}